码迷,mamicode.com
首页 > 其他好文 > 详细

理解官方教程中卷积神经网络部分

时间:2018-07-21 22:50:00      阅读:203      评论:0      收藏:0      [点我收藏+]

标签:numpy   网络   normal   print   red   data   [1]   毕设   oat   

import tensorflow.examples.tutorials.mnist.input_data as input_data 
import tensorflow as tf
import numpy as np

def weight_variable(in_shape):
    # 截断正态分布,保证initial中元素的范围在0附近
    initial = tf.truncated_normal(in_shape,stddev = 0.15)
    return tf.Variable(initial)
def bias_variable(in_shape):
    initial = tf.constant(0.1,shape = in_shape)
    return tf.Variable(initial)
def conv2d(x,W):
    # 默认采用NHWC格式,即[Number,Height,Width,Channel]
    # 因此strides往往是[1,dh,dw,1],我们往往希望N和Channel one by one
    # padding = ‘SAME‘不会丢弃图像边缘信息
    # padding = ‘VALID‘可能会丢弃图像边缘信息
    return tf.nn.conv2d(x,W,strides = [1,1,1,1],padding = ‘SAME‘)
def max_pool_2x2(x):
    # 默认采用NHWC格式
    return tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1],padding = ‘SAME‘)

mnist = input_data.read_data_sets(‘MNIST_data/‘,one_hot = True) 

x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
# conv2d参数中kernel设计为[height,width,in_channel,out_channel]
# 32个卷积核,每个卷积核为5*5,输入为单通道
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
# -1的意思是这个维度的值由推断得到,本程序中将推断为50
x_image = tf.reshape(x,[-1,28,28,1])
# 此时z1的维度为[50,28,28,32]
z1 = conv2d(x_image,W_conv1) + b_conv1
# 当初的毕设代码运行不佳很可能是缺少这一句,为何抽取特征后要执行relu?
h_conv1 = tf.nn.relu(z1);
# 池化操作,h_pool1的维度为[50,14,14,32]
h_pool1 = max_pool_2x2(h_conv1)
# 64个卷积核,每个卷积核是5*5形状,每个卷积核是32通道的
# 每个单通道卷积核理解为一张牌,32通道即一叠32张牌
# 每个32通道的卷积核和每个[1,14,14,32]特征图进行卷积操作
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
# conv2d(h_pool1,W_conv2)之后得到[50,14,14,64],特征图大小为14*14,64通道
# 同样,理解为一叠64张14*14特征图即可
z2 = conv2d(h_pool1,W_conv2) + b_conv2
# 依然是奇怪的relu操作
h_conv2 = tf.nn.relu(z2)
h_pool2 = max_pool_2x2(h_conv2)
# 全连接部分
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
# dropout操作,暂时还不太懂
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
# softmax分类
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) + b_fc2)

loss = -tf.reduce_mean(y*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

for i in range(20000):
    batch = mnist.train.next_batch(50)
    sess.run(train_step,feed_dict = {x:batch[0],y:batch[1],keep_prob:1.0})
    #非常好用的两句debug语句
    #print(np.array(sess.run(z2,feed_dict = {x:batch[0],y:batch[1],keep_prob:1.0})).shape)
    #print(sess.run(conv2d(x_image,W_conv1)[0][0][0],feed_dict = {x:batch[0],y:batch[1],keep_prob:1.0}))
    break
    if i%100 == 0:
        print(‘loss = ‘,sess.run(loss,feed_dict = {x:batch[0],y:batch[1],keep_prob:1.0}))

理解官方教程中卷积神经网络部分

标签:numpy   网络   normal   print   red   data   [1]   毕设   oat   

原文地址:https://www.cnblogs.com/liuzhan709/p/9348134.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!