Tensorflow - A convolute neural network with a custom dataset DOESN'T

I am trying to create a convolutional neural network for custom datasets. The classifier has only two classes. I can correctly read the input images correctly and also assigned them batch_labels for the two corresponding classes. The code runs without errors, but the output is abnormal. For some reason, the accuracy is always 50%.

image=inputs()

image_batch=tf.train.batch([image],batch_size=150)
label_batch_pos=tf.train.batch([tf.constant([0,1])],batch_size=75) # label_batch for first class
label_batch_neg=tf.train.batch([tf.constant([1,0])],batch_size=75) # label_batch for second class
label_batch=tf.concat(0,[label_batch_pos,label_batch_neg])

W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])

image_4d = tf.reshape(image, [-1,32,32,3])

h_conv1 = tf.nn.relu(conv2d(image_4d, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

W_fc1 = weight_variable([8 * 8 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)

W_fc2 = weight_variable([1024, 2])
b_fc2 = bias_variable([2])

y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(tf.cast(label_batch,tf.float32)*tf.log(y_conv+1e-9))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)


tf.train.start_queue_runners(sess=sess)
correct_prediction=tf.equal(tf.argmax(y_conv,1), tf.argmax(label_batch,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

for i in range(100):
 train_step.run(session=sess)
 print(sess.run(accuracy))

print(sess.run(correct_prediction))

When I print the tensor correct_prediction, I get the following output no matter what.

[ True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True  True  True  True  True  True  True  True  True  True
  True  True  True False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False]

0,5, . , . , . , ? . 150 , 75 . - ?

:

def weight_variable(shape,name):
  initial = tf.truncated_normal(shape, stddev=0.5)
  return tf.Variable(initial,name=name)

def bias_variable(shape,name):
  initial = tf.constant(1.0, shape=shape)
  return tf.Variable(initial,name=name)
+1
1

. - - softmax . , Tensorflow : tf.nn.softmax_cross_entropy_with_logits. >

? . , , , , . , , ?

. 0,5, , / (, ) . . , , sess.run.

:

h_fc_drop = tf.nn.dropout(h_fc, keep_prob)

(...)

accu, top1, top3, top5 = sess.run([accuracy, te_top1, te_top3, te_top5],
                            feed_dict={
                                x: teX[i: i + batch_size],
                                y: teY[i: i + batch_size]
                                keep_prob: 1.0
                            }
                         )

Tensorflow accuracy topX , teX teY keep_prob 1,0 .

, . ( ), , 0, . , , , Xavier. Xavier.

, , .., , . .

+3

Source: https://habr.com/ru/post/1629850/


All Articles