In the code below, l2 unexpectedly returns the same value as l1, but since the optimizer is requested in the list before l2, I expected the loss to be a new loss after training. Can I not request several values from the graph at once and expect a consistent conclusion?
import tensorflow as tf import numpy as np x = tf.placeholder(tf.float32, shape=[None, 10]) y = tf.placeholder(tf.float32, shape=[None, 2]) weight = tf.Variable(tf.random_uniform((10, 2), dtype=tf.float32)) loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.matmul(x, weight), y) optimizer = tf.train.AdamOptimizer(0.1).minimize(loss) with tf.Session() as sess: tf.initialize_all_variables().run() X = np.random.rand(1, 10) Y = np.array([[0, 1]]) # Evaluate loss before running training step l1 = sess.run([loss], feed_dict={x: X, y: Y})[0][0][0] print(l1) # 3.32393 # Running the training step _, l2 = sess.run([optimizer, loss], feed_dict={x: X, y: Y}) print(l2[0][0]) # 3.32393 -- didn't change? # Evaluate loss again after training step as sanity check l3 = sess.run([loss], feed_dict={x: X, y: Y})[0][0][0] print(l3) # 2.71041
source share