0

我无法在我的以下tensorflow leNet模型中找到我的错误。我得到以下错误:ValueError:试图将'输入'转换为张量并失败。错误:形状必须是相同的等级,但是是2和1 将形状22与其他形状合并。对于'Print_4/packed'(op:'Pack'),输入形状为:[5,5,1,20],[20],[5,5,20,50],[50],[2450,200] ,[200],[200,10],[10],[5,5,1,20],[20],[5,5,20,50],[50],[2450,200],[ 200],[200,10],[10],[5,5,1,20],[20],[5,5,20,50],[50],[2450,200],[200] ,[200,10],[10]。 看来我的建筑是不是在尺寸长期正确的,但我似乎无法找出问题的所在,这里是我的代码:?Tensorflow LeNet型号MNIST

def weight_variable(shape): 
    initial = tf.truncated_normal(shape, stddev=0.1) 
    return tf.Variable(initial) 

def bias_variable(shape): 
    initial = tf.constant(0.1, shape=shape) 
    return tf.Variable(initial) 



def conv2d(x, W): 
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID') 

def max_pool_2x2(x): 
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], 
         strides=[1, 2, 2, 1], padding='SAME') 
    # Input layer 
x = tf.placeholder(tf.float32, [None, 784], name='x') 
y_ = tf.placeholder(tf.float32, [None, 10], name='y_') 
x_image = tf.reshape(x, [-1, 28, 28, 1]) 
# Convolutional layer 1 
W_conv1 = weight_variable([5, 5, 1, 20]) 
b_conv1 = bias_variable([20]) 

h_conv1 = conv2d(x_image, W_conv1) + b_conv1 
h_pool1 = max_pool_2x2(h_conv1) 


W_conv2 = weight_variable([5, 5, 20, 50]) 

b_conv2 = bias_variable([50]) 

h_conv2 = conv2d(h_pool1, W_conv2) + b_conv2 
h_pool2 = max_pool_2x2(h_conv2) 

h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*50]) 

W_fc1 = weight_variable([8 * 8* 50, 500]) 

b_fc1 = bias_variable([500]) 

h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 


W_fc2 = weight_variable([500, 10]) 

b_fc2 = bias_variable([10]) 

y = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2, name='y') 


cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), 
reduction_indices=[1])) 

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), 
name='accuracy') 

# Training algorithm 
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 


with tf.Session() as sess: 
    sess.run(tf.global_variables_initializer()) 
    max_steps = 20000 
    for step in range(max_steps): 
      a = tf.Print(v, [v], message="This is a: ") 
     #print(a.eval()) 
     batch_xs, batch_ys = mnist.train.next_batch(50) 


     sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys, 
     keep_prob: 0.5}) 
print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, 
    y_: mnist.test.labels, keep_prob: 1.0})) 

回答

0

h_pool2的形状是(,4,4,50 )。所以你的这两行代码是错误的:

h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*50]) 

W_fc1 = weight_variable([8 * 8* 50, 500]) 

更改为4 * 4 * 50应该可以工作。