Ish*_*ery 7 normalization tensorflow
我尝试在Mnist数据集上使用函数tf.contrib.layers.batch_norm实现CNN.
当我训练和检查模型时,我发现损失正在减少(好!)但是测试数据集的准确性保持随机(~10%)(BAD !!!)
如果我在没有批量标准化的情况下使用相同的模型,我会发现测试精度正在按预期增加.
你可以在下面的代码中看到我如何使用批量规范化功能.如果我用于测试数据集来设置is_training = True我得到了很好的结果,所以问题是批量标准化函数的is_training = False模式.
请帮我解决一下这个.提前感谢所有人.
# BLOCK2 - Layer 1
conv1 = tf.nn.conv2d(output, block2_layer1_1_weights, [1, 1, 1, 1], padding='SAME')
conv2 = tf.nn.conv2d(output, block2_layer1_2_weights, [1, 1, 1, 1], padding='SAME')
conv3 = tf.nn.conv2d(output, block2_layer1_3_weights, [1, 1, 1, 1], padding='SAME')
conv4 = tf.nn.conv2d(output, block2_layer1_4_weights, [1, 1, 1, 1], padding='SAME')
conv_normed1 = tf.contrib.layers.batch_norm(conv1, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
conv_normed2 = tf.contrib.layers.batch_norm(conv2, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
conv_normed3 = tf.contrib.layers.batch_norm(conv3, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
conv_normed4 = tf.contrib.layers.batch_norm(conv4, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
after_stack = tf.stack([conv_normed1, conv_normed2, conv_normed3, conv_normed4])
after_maxout = tf.reduce_max(after_stack, 0)
# BLOCK2 - Layer 2
conv1 = tf.nn.conv2d(after_maxout, block2_layer2_1_weights, [1, 1, 1, 1], padding='SAME')
conv2 = tf.nn.conv2d(after_maxout, block2_layer2_2_weights, [1, 1, 1, 1], padding='SAME')
conv_normed1 = tf.contrib.layers.batch_norm(conv1, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
conv_normed2 = tf.contrib.layers.batch_norm(conv2, scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
after_stack = tf.stack([conv_normed1, conv_normed2])
after_maxout = tf.reduce_max(after_stack, 0)
# BLOCK2 - Layer 3
conv1 = tf.nn.conv2d(after_maxout, block2_layer3_1_weights, [1, 1, 1, 1], padding='SAME')
conv2 = tf.nn.conv2d(after_maxout, block2_layer3_2_weights, [1, 1, 1, 1], padding='SAME')
conv_normed1 = tf.contrib.layers.batch_norm(conv1 , scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
conv_normed2 = tf.contrib.layers.batch_norm(conv2 , scale=True, decay=batch_norm_decay, center=True, is_training=is_training, updates_collections=None )
after_stack = tf.stack([conv_normed1, conv_normed2])
after_maxout = tf.reduce_max(after_stack, 0)
pooled = tf.nn.max_pool(after_maxout, [1, 3, 3, 1], [1, 3, 3, 1], 'SAME')
output = tf.nn.dropout(pooled, 0.5)
# # Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'BatchNorm' not in v.name])
loss += LAMBDA * l2_loss
#
# # Optimizer.
tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
# # Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
#print(valid_prediction.shape)
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 6000
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
test_offset = (step * batch_size) % (test_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, is_training: True}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
for i in range(1, 10001):
test_batch = test_dataset[((i - 1) * test_batch_size):(i * test_batch_size), :, :, :]
pred = test_prediction.eval(feed_dict={tf_test_dataset: test_batch, is_training: False})
if i == 1:
stacked_pred = pred
else:
stacked_pred = np.vstack((stacked_pred, pred))
print(np.argmax(stacked_pred,1))
print('test accuracy: %.1f%%' % accuracy(stacked_pred, test_labels))`
Run Code Online (Sandbox Code Playgroud)
在训练过程中,batch-norm 使用基于批次的统计数据。在评估/测试期间(每当is_training)时False,它使用人口统计数据。
在内部,人口统计数据通过隐式创建的更新操作进行更新,这些操作添加到集合中tf.GraphKeys.UPDATE_OPS- 但您必须强制张量流运行这些操作。执行此操作的一个简单方法是引入control_dependencies您的优化操作。
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, step)
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
1648 次 |
| 最近记录: |