import tensorflow as tf
import numpy as np
import os
from PIL import Image
cur_dir = os.getcwd()
def modify_image(image):
resized = tf.image.resize_images(image, 180, 180, 1)
resized.set_shape([180,180,3])
flipped_images = tf.image.flip_up_down(resized)
return flipped_images
def read_image(filename_queue):
reader = tf.WholeFileReader()
key,value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value)
return key,image
def inputs():
filenames = ['standard_1.jpg', 'standard_2.jpg' ]
filename_queue = tf.train.string_input_producer(filenames)
filename,read_input = read_image(filename_queue)
reshaped_image = modify_image(read_input)
reshaped_image = tf.cast(reshaped_image, tf.float32)
label=tf.constant([1])
return reshaped_image,label
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
image,label = inputs()
W_conv1=weight_variable([5,5,3,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2=weights_variable([5,5,32,64])
b_conv2 = bias_variable([32])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([8 * 8 * 32, 512])
b_fc1 = bias_variable([512])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*32])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
W_fc2 = weight_variable([512, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
for i in xrange(100):
img,label = sess.run(image)
print (label)
train_step.run({img, label, 0.5})
When I run the code, I get an error,
"ValueError: ShapesTensorShape([Dimension(180),Dimension(180),Dimension(3)]) and TensorShape([Dimension(None), Dimension(None), Dimension(None), Dimension(None)]) must have the same rank"
But the scales were initialized, and even then they show them as empty tensors. Files and tags are read and transferred correctly. The first convolutional layer has a 5x5 window with a depth of 3 and I want to have 32 such 5X5 filters. Therefore, the form [5,5,3,32] for W_conv1.