我在youtube上关注Sentdex的例子,这是我的代码
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
Run Code Online (Sandbox Code Playgroud)
它引发了这个错误:
ValueError: Shapes must be equal rank, but are 2 and 1
From merging shape 0 with other shapes. for 'SparseSoftmaxCrossEntropyWithLogits/packed' (op: 'Pack') with input shapes: [?,10], [10].
Run Code Online (Sandbox Code Playgroud)
在这条线上:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
Run Code Online (Sandbox Code Playgroud)
我认为它是关于导致错误的y的大小,我尝试使用
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
prediction, tf.squeeze(y)))
Run Code Online (Sandbox Code Playgroud)
我很确定这意味着成本函数会导致错误(如上所示)预测而y的形状不同,但我不太了解TensorFlow,知道如何解决它.我甚至不太了解y的设置位置,我从教程中获得了大部分代码,并将其用于将其应用于不同的数据集.我该如何解决这个错误?
ps我试图打印预测,它给了我两个输出,我猜那里的错误来自:
prediction
(<tf.Tensor 'MatMul_39:0' shape=(?, 10) dtype=float32>,
<tf.Variable 'Variable_79:0' shape=(10,) dtype=float32_ref>)
Run Code Online (Sandbox Code Playgroud)
小智 -5
#WORKING CODE\n#I had the same problem as you, (not counting the comma) and i\xc2\xb4m sorry i don\xc2\xb4t remember the things i changed, but hopefully this will work\n\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist= input_data.read_data_sets("/tmp/data/", one_hot=True)\n#10 clasees, 0-9\nn_nodes_hl1=500\nn_nodes_hl2=500\nn_nodes_hl3=500\n\nn_classes=10\nbatch_size=100\nx=tf.placeholder(\'float\',[None,784])\ny=tf.placeholder(\'float\')\n\ndef neural(data):\n hidden_1_layer={\'weights\':tf.Variable(tf.random_normal([784, n_nodes_hl1])),\n \'biases\':tf.Variable(tf.random_normal([n_nodes_hl1]))}\n hidden_2_layer={\'weights\':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),\n \'biases\':tf.Variable(tf.random_normal([n_nodes_hl2]))}\n hidden_3_layer={\'weights\':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),\n \'biases\':tf.Variable(tf.random_normal([n_nodes_hl3]))}\n output_layer={\'weights\':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),\n \'biases\':tf.Variable(tf.random_normal([n_classes]))}\n\n l1=tf.add(tf.matmul(data, hidden_1_layer[\'weights\']), hidden_1_layer[\'biases\'])\n li= tf.nn.relu(l1)\n l2=tf.add(tf.matmul(l1, hidden_2_layer[\'weights\']), hidden_2_layer[\'biases\'])\n l2= tf.nn.relu(l2)\n l3=tf.add(tf.matmul(l2, hidden_3_layer[\'weights\']), hidden_3_layer[\'biases\'])\n l3= tf.nn.relu(l3)\n output= tf.matmul(l3, output_layer[\'weights\'])+ output_layer[\'biases\']\n return output\ndef train(x):\n prediction=neural(x)\n cost= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))\n optimizer=tf.train.AdamOptimizer().minimize(cost)\n hm_epochs=20\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(hm_epochs):\n epoch_loss=0\n for _ in range(int(mnist.train.num_examples/batch_size)):\n epoch_x,epoch_y = mnist.train.next_batch(batch_size)\n _,c=sess.run([optimizer,cost],feed_dict={x: epoch_x, y: epoch_y})\n epoch_loss += c\n print(\'Epoch\', epoch, \'completed out of\', hm_epochs, \'loss:\',epoch_loss)\n\n correct= tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))\n accuracy= tf.reduce_mean(tf.cast(correct,\'float\'))\n print(\'Accuracy:\',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))\n\ntrain(x)\nRun Code Online (Sandbox Code Playgroud)\n
| 归档时间: |
|
| 查看次数: |
6589 次 |
| 最近记录: |