I'm not sure how to pass it to Tensorboard, but you can control it with python.
from keras.callbacks import Callback class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] self.lr = [] def on_epoch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.lr.append(initial_lr * 0.95 ** len(self.losses)) loss_hist = LossHistory()
Then just add loss_hist to your callbacks .
Update:
Based on this answer:
class LRTensorBoard(TensorBoard): def __init__(self, log_dir='./logs', **kwargs): super(LRTensorBoard, self).__init__(log_dir, **kwargs) self.lr_log_dir = log_dir def set_model(self, model): self.lr_writer = tf.summary.FileWriter(self.lr_log_dir) super(LRTensorBoard, self).set_model(model) def on_epoch_end(self, epoch, logs=None): lr = initial_lr * 0.95 ** epoch summary = tf.Summary(value=[tf.Summary.Value(tag='lr', simple_value=lr)]) self.lr_writer.add_summary(summary, epoch) self.lr_writer.flush() super(LRTensorBoard, self).on_epoch_end(epoch, logs) def on_train_end(self, logs=None): super(LRTensorBoard, self).on_train_end(logs) self.lr_writer.close()
Just use it like a regular TensorBoard .
source share