CNN中tensorboard数据可视化
日期: 2019-01-18 分类: 个人收藏 347次阅读
1.CNN_my_test.py
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data/', one_hot=True)
print('数据ok')
print(mnist.train.images[0].shape)
def weight_initializer(shape):
initializer = tf.truncated_normal(shape, stddev= 0.1)
return tf.Variable(initializer)
def biases_initializer(shape):
initializer = tf.constant(0.1, shape=shape)
return tf.Variable(initializer)
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', x_image, 1)
wc1 = weight_initializer([5, 5, 1, 32])
bc1 = biases_initializer([32])
hc1 = tf.nn.relu(tf.nn.conv2d(x_image, wc1, strides=[1, 1, 1, 1], padding='SAME') + bc1)
pool_hc1 = tf.nn.max_pool(hc1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
wc2 = weight_initializer([5, 5, 32, 64])
bc2 = biases_initializer([64])
hc2 = tf.nn.relu(tf.nn.conv2d(pool_hc1, wc2, strides=[1, 1, 1, 1], padding='SAME') + bc2)
pool_hc2 = tf.nn.max_pool(hc2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
wd1 = weight_initializer([7*7*64, 1024])
bd1 = biases_initializer([1024])
hc2_flat = tf.reshape(pool_hc2, [-1, 7*7*64])
hd1 = tf.nn.relu(tf.matmul(hc2_flat, wd1) + bd1)
hd1_dp = tf.nn.dropout(hd1, keep_prob=0.7)
wd2 = weight_initializer([1024, 10])
bd2 = biases_initializer([10])
y_conv = tf.nn.softmax(tf.matmul(hd1_dp, wd2) + bd2)
cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))
tf.summary.scalar('cross entropy', cross_entropy)
train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)
corr = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(corr, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
log_dir = './log'
train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
for i in range(2000):
if i % 100 != 0:
batch = mnist.train.next_batch(50)
train_step.run(session=sess, feed_dict={x: batch[0], y: batch[1]})
summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})
train_writer.add_summary(summary, i)
else:
batch = mnist.train.next_batch(50)
train_accuracy = acc.eval(session=sess, feed_dict={x: batch[0], y: batch[1]})
test_accuracy = acc.eval(session=sess, feed_dict={x: mnist.test.images[0:50], y: mnist.test.labels[0:50]})
print('train_acc: %.5f, test_acc: %.5f' % (train_accuracy, test_accuracy))
run_metadata = tf.RunMetadata()
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})
train_writer.add_summary(summary, 1)
print('训练完成!!')
train_writer.close()
分3个部分
1.将需要记录的变量用一下函数记录
图像
tf.summary.image('input', x_image, 1)
散点图
tf.summary.scalar('cross entropy', cross_entropy)
2.生成实现变量记录的对象,和记录文件路径
merged = tf.summary.merge_all()
log_dir = './log'
train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
3.训练时进行记录
summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})
train_writer.add_summary(summary, 1)
转载于:https://www.cnblogs.com/CK85/p/10289164.html
除特别声明,本站所有文章均为原创,如需转载请以超级链接形式注明出处:SmartCat's Blog
标签:人工智能 python
上一篇: 一小段C语言代码!为何都说牛逼?
下一篇: 记一次redis缓存击穿问题
精华推荐