Or you just use the updated TensorFlow r1.1 evaluator API
The API for defining a model is very similar to some small changes in parameters, return type or function name. Here is an example that I used:
def model_fn():
def _build_model(features, labels, mode, params):
y = tf.contrib.layers.fully_connected(features, num_outputs=64, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
y = tf.contrib.layers.fully_connected(y, num_outputs=64, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
y = tf.contrib.layers.fully_connected(y, num_outputs=1, activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer())
predictions = y
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean((predictions - labels) ** 2)
else:
loss = None
if mode != tf.estimator.ModeKeys.PREDICT:
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(tf.cast(labels, tf.float32), predictions),
"accuracy": tf.metrics.accuracy(tf.cast(labels, tf.float32), predictions),
"precision": tf.metrics.precision(tf.cast(labels, tf.float32), predictions)
}
else:
eval_metric_ops = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=0.001,
optimizer="Adam")
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions_dict = {"pred": predictions}
else:
predictions_dict = None
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _build_model
, :
e = tf.estimator.Estimator(model_fn=model_fn(), params=None)
e.train(input_fn=input_fn(), steps=1000)
TensorFlow r1.1 .