[Tensorflow] Cookbook - Neural Network

In this chapter, we'll cover the following recipes:

  • Implementing Operational Gates
  • Working with Gates and Activation Functions
  • Implementing an One-Hidden-Layer Neural Network
  • Implementing Different Layers
  • Using Multilayer Networks
  • Improving Predictions of Linear Models
  • Learning to Play Tic Tac Toe

 

一层隐藏层的全连接神经网络

-- 与MLP像,但solver, loss的策略不同

 

加载Iris数据。

# Implementing a one-layer Neural Network
#---------------------------------------
#
# We will illustrate how to create a one hidden layer NN
#
# We will use the iris data for this exercise
#
# We will build a one-hidden layer neural network
#  to predict the fourth attribute, Petal Width from
#  the other three (Sepal length, Sepal width, Petal length).

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()

iris   = datasets.load_iris()
x_vals = np.array([x[0:3] for x in iris.data])
y_vals = np.array([x[3]   for x in iris.data])
Load data

加了种子。

# Create graph session 
sess = tf.Session()

# Set Seed
seed = 3
tf.set_random_seed(seed)
np.random.seed(seed)

随机分组:training and testing;并通过"min-max norm"做成单位向量,也就是normalize。

# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices  = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train  = x_vals[train_indices]
x_vals_test   = x_vals[test_indices]
y_vals_train  = y_vals[train_indices]
y_vals_test   = y_vals[test_indices]
 

# Normalize by column (min-max norm) 做成单位向量
def normalize_cols(m):
  col_max = m.max(axis=0)
  col_min = m.min(axis=0)
  return (m-col_min) / (col_max - col_min)

x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test  = np.nan_to_num(normalize_cols(x_vals_test))

构建Graph。

# Declare batch size
batch_size = 50
# Initialize placeholders
x_data   = tf.placeholder(shape=[None, 3], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)

# Create variables for both Neural Network Layers
hidden_layer_nodes = 5
A1 = tf.Variable(tf.random_normal(shape=[3, hidden_layer_nodes])) # inputs -> hidden nodes
b1 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes   ])) # one biases for each hidden node
A2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes, 1])) # hidden inputs -> 1 output
b2 = tf.Variable(tf.random_normal(shape=[1]))   # 1 bias for the output

如上可见,作为 bias b1 and b2 只需考虑下层服务的node个数即可。

然后是activation, solver的设置。

# Declare model operations
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data, A1), b1))
final_output  = tf.nn.relu(tf.add(tf.matmul(hidden_output, A2), b2))
# 激活函数针对的是output node
# Declare loss function loss = tf.reduce_mean(tf.square(y_target - final_output)) # 因为是mini batch
# Declare optimizer my_opt = tf.train.GradientDescentOptimizer(0.005) train_step = my_opt.minimize(loss) # Initialize variables init = tf.initialize_all_variables() sess.run(init)

训练,启动计算流程。

# Training loop
loss_vec  = []
test_loss = []
for i in range(500): rand_index = np.random.choice(len(x_vals_train), size=batch_size)  # mini batch,一次选择一筐样本,而非一个 rand_x = x_vals_train[rand_index] rand_y = np.transpose([y_vals_train[rand_index]]) sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})  # 启动计算流程,获得loss loss_vec.append(np.sqrt(temp_loss))
   # Why do we need this part? It's useless I think. test_temp_loss
= sess.run(loss, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])}) test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%50==0: print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))

数据展示。 

# Plot loss (MSE) over time
plt.plot(loss_vec,  'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='Test Loss')
plt.title('Loss (MSE) per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.show()
View Code

Result: (红线表示的test部分,感觉没啥意义在这里)

 

Implementing Different Layers

We will expand our knowledge of various layers in this recipe: convolutional layers and maxpool layers

一维输入 --> 卷积 --> 激活 --> 池化 --> 全连接

 

样本初始化:

# Implementing Different Layers
#---------------------------------------
#
# We will illustrate how to use different types
# of layers in Tensorflow
#
# The layers of interest are:
#  (1) Convolutional Layer
#  (2) Activation Layer
#  (3) Max-Pool Layer
#  (4) Fully Connected Layer
#
# We will generate two different data sets for this
#  script, a 1-D data set (row of data) and
#  a 2-D data set (similar to picture)

import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
ops.reset_default_graph()

#---------------------------------------------------|
#-------------------1D-data-------------------------|
#---------------------------------------------------|
print('\n----------1D Arrays----------')

# Create graph session 
sess = tf.Session()

# Generate 1D data
data_size = 25
data_1d = np.random.normal(size=data_size)
View Code

构建卷积:

# Placeholder
x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])

#--------Convolution--------
def conv_layer_1d(input_1d, my_filter):
    # Tensorflow's 'conv2d()' function only works with 4D arrays:
    # [batch#, width, height, channels], we have 1 batch, and
    # width = 1, but height = the length of the input, and 1 channel.
    # So next we create the 4D array by inserting dimension 1's.
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    
    # Perform convolution with stride = 1, if we wanted to increase the stride,
    # to say '2', then strides=[1,1,2,1]
    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,1,1,1], padding="VALID")
    # Get rid of extra dimensions
    conv_output_1d = tf.squeeze(convolution_output)
    return(conv_output_1d)
    
# Create filter for convolution.
my_filter = tf.Variable(tf.random_normal(shape=[1,5,1,1]))  # 一维的数据,其卷积核也就是一个滑动的棍子
# Create convolution layer my_convolution_output = conv_layer_1d(x_input_1d, my_filter)

 

tf.nn.conv2d

Ref: http://blog.csdn.net/mao_xiao_feng/article/details/53444333

tf.nn.conv2d(input,                   # 要求是一个Tensor,具有[batch, in_height, in_width, in_channels]这样的shape,四维Tensor
        filter,                   # 要求是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]
        strides,                  # 卷积步长
        padding,                  # 只能是"SAME","VALID"其中之一,这个值决定了不同的卷积方式
        use_cudnn_on_gpu=None,    # 是否使用cudnn加速,默认为true
        name=None)

结果返回一个Tensor,这个输出,就是我们常说的Feature Map。

 

tf.expand_dims 与 tf.reshape

Ref: http://blog.csdn.net/jasonzzj/article/details/60811035

TensorFlow中,想要维度增加一维,可以使用tf.expand_dims(input, dim, name=None)函数。当然,我们常用tf.reshape(input, shape=[])也可以达到相同效果,

但是有些时候在构建图的过程中,placeholder没有被feed具体的值,这时就会包下面的错误:TypeError: Expected binary or unicode string, got 1 

在这种情况下,我们就可以考虑使用expand_dims来将维度加1。

比如我自己代码中遇到的情况,在对图像维度降到二维做特定操作后,要还原成四维[batch, height, width, channels],前后各增加一维。如果用reshape,则因为上述原因报错

one_img2 = tf.reshape(one_img, shape=[1, one_img.get_shape()[0].value, one_img.get_shape()[1].value, 1])

用下面的方法可以实现:

one_img = tf.expand_dims(one_img, 0)  # 0表示第一维
one_img = tf.expand_dims(one_img, -1) #-1表示最后一维

在最后,给出官方的例子和说明:(各三种不同的expand法,没有串行操作关系)

# 't' is a tensor of shape [2]
shape(expand_dims(t,  0))  ==> [1, 2]
shape(expand_dims(t,  1))  ==> [2, 1]
shape(expand_dims(t, -1))  ==> [2, 1]

# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]

 

构建池化:

#--------Activation--------
def activation(input_1d):
    return(tf.nn.relu(input_1d))

# Create activation layer: 最后的输出层需要relu下
my_activation_output = activation(my_convolution_output)

#--------Max Pool--------
def max_pool(input_1d, width):
    # Just like 'conv2d()' above, max_pool() works with 4D arrays.
    # [batch_size=1, width=1, height=num_input, channels=1]
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
# Perform the max pooling with strides = [1,1,1,1] # If we wanted to increase the stride on our data dimension, say by # a factor of '2', we put strides = [1,1,2,1] # We will also need to specify the width of the max-window ('width') pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1], strides=[1, 1, 1, 1], padding='VALID') # Get rid of extra dimensions pool_output_1d = tf.squeeze(pool_output) return(pool_output_1d) my_maxpool_output = max_pool(my_activation_output, width=5)  # <-- 先激活,再池化

 

构建全连接层:

#--------Fully Connected--------
def fully_connected(input_layer, num_outputs):
    # First we find the needed shape of the multiplication weight matrix:
    # The dimension will be (length of input) by (num_outputs)
    weight_shape = tf.squeeze(tf.pack([tf.shape(input_layer),[num_outputs]]))

    # Initialize such weight
    weight = tf.random_normal(weight_shape, stddev=0.1)

    # Initialize the bias
    bias = tf.random_normal(shape=[num_outputs])

    # Make the 1D input array into a 2D array for matrix multiplication
    input_layer_2d = tf.expand_dims(input_layer, 0)

    # Perform the matrix multiplication and add the bias
    full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)

    # Get rid of extra dimensions
    full_output_1d = tf.squeeze(full_output)
    return(full_output_1d)

my_full_output = fully_connected(my_maxpool_output, 5)

 

变量初始化:

# Run graph
# Initialize Variables
init = tf.initialize_all_variables()
sess.run(init)

feed_dict = {x_input_1d: data_1d}

结果展示:

# Convolution Output
print('Input = array of length 25')
print('Convolution w/filter, length = 5, stride size = 1, results in an array of length 21:')
print(sess.run(my_convolution_output, feed_dict=feed_dict))

# Activation Output
print('\nInput = the above array of length 21')
print('ReLU element wise returns the array of length 21:')
print(sess.run(my_activation_output, feed_dict=feed_dict))

# Max Pool Output
print('\nInput = the above array of length 21')
print('MaxPool, window length = 5, stride size = 1, results in the array of length 17:')
print(sess.run(my_maxpool_output, feed_dict=feed_dict))

# Fully Connected Output
print('\nInput = the above array of length 17')
print('Fully connected layer on all four rows with five outputs:')
print(sess.run(my_full_output, feed_dict=feed_dict))
View Code

 

 

二维输入 --> 卷积 --> 激活 --> 池化 --> 全连接

样本初始化:

#---------------------------------------------------|
#-------------------2D-data-------------------------|
#---------------------------------------------------|
print('\n----------2D Arrays----------')


# Reset Graph
ops.reset_default_graph()
sess = tf.Session()

#Generate 2D data
data_size = [10,10]
data_2d = np.random.normal(size=data_size)

#--------Placeholder--------
x_input_2d = tf.placeholder(dtype=tf.float32, shape=data_size)
Init

样本数据显示如下:【shape is 10x10】

array([[ 0.08045739,  0.90452849, -1.86565117, ...,  0.61657788,
        -0.83763145,  1.83915189],
       [ 2.3158586 , -0.20828342, -0.01497319, ..., -0.61510856,
        -1.06215612, -1.11278115],
       [-1.63929625,  0.36280349, -1.15903647, ..., -0.61238442,
         1.3999655 , -0.84960736],
       ..., 
       [-1.51521566,  0.31919618, -2.9839702 , ...,  0.13801466,
         0.93950285,  0.12730852],
       [ 0.23502701, -1.94507226, -1.15972295, ..., -0.87015919,
        -0.23963207,  0.25508069],
       [-0.23149741,  0.4955804 , -0.57056282, ...,  1.49152235,
        -1.39811601,  0.51679755]])

 

构造卷积: 

# Convolution
def conv_layer_2d(input_2d, my_filter):
    # Tensorflow's 'conv2d()' function only works with 4D arrays:
    # [batch#, width, height, channels], we have 1 batch, and
    # 1 channel, but we do have width AND height this time.
    # So next we create the 4D array by inserting dimension 1's.
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
# Note the stride difference below! convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,2,2,1], padding="VALID")
【对于图片,因为只有两维,通常strides取[1,stride,stride,1],看来是横向两个两个的移动,纵向也是】
# Get rid of unnecessary dimensions conv_output_2d = tf.squeeze(convolution_output) return(conv_output_2d)
# Create Convolutional Filter
# [filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]
my_filter = tf.Variable(tf.random_normal(shape=[2,2,1,1]))
# Create Convolutional Layer my_convolution_output = conv_layer_2d(x_input_2d, my_filter)

 

构建池化:

#--------Activation--------
def activation(input_2d):
    return(tf.nn.relu(input_2d))

# Create Activation Layer
my_activation_output = activation(my_convolution_output)

#--------Max Pool--------
def max_pool(input_2d, width, height):
    # Just like 'conv2d()' above, max_pool() works with 4D arrays.
    # [batch_size=1, width=given, height=given, channels=1]
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
# Perform the max pooling with strides = [1,1,1,1] # If we wanted to increase the stride on our data dimension, say by # a factor of '2', we put strides = [1, 2, 2, 1] pool_output = tf.nn.max_pool(input_4d,
ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding='VALID')
# Get rid of unnecessary dimensions pool_output_2d = tf.squeeze(pool_output) return(pool_output_2d) # Create Max-Pool Layer my_maxpool_output = max_pool(my_activation_output, width=2, height=2)

 

构建全连接层:

#--------Fully Connected--------
def fully_connected(input_layer, num_outputs):
    # In order to connect our whole W byH 2d array, we first flatten it out to
    # a W times H 1D array.
    flat_input = tf.reshape(input_layer, [-1])
# We then find out how long it is, and create an array for the shape of # the multiplication weight = (WxH) by (num_outputs) weight_shape = tf.squeeze( tf.pack([tf.shape(flat_input),[num_outputs]]) )

【Variable: 全连接的weight】
# Initialize the weight weight = tf.random_normal(weight_shape, stddev=0.1)
# Initialize the bias bias = tf.random_normal(shape=[num_outputs])

【Variable:全连接的weight设置 done!】
# Now make the flat 1D array into a 2D array for multiplication 【有必要提升到2D么? input_2d = tf.expand_dims(flat_input, 0)
# Multiply and add the bias full_output = tf.add(tf.matmul(input_2d, weight), bias)  # <---- 最后一层的计算
# Get rid of extra dimension full_output_2d = tf.squeeze(full_output) return(full_output_2d) # Create Fully Connected Layer my_full_output = fully_connected(my_maxpool_output, 5)
【可见,返回的是一维:<tf.Tensor 'Squeeze_3:0' shape=(5,) dtype=float32>】
# Run graph # Initialize Variables init = tf.initialize_all_variables() sess.run(init) feed_dict = {x_input_2d: data_2d}

 

shape理解:

Tensor("Reshape:0", shape=(16,), dtype=float32)
Tensor("ExpandDims_4:0", shape=(1, 16), dtype=float32) # 第一维只有一个元素,这个元素中也就是下一维度有16个元素

  

# Convolution Output
print('Input = [10 X 10] array')
print('2x2 Convolution, stride size = [2x2], results in the [5x5] array:')
print(sess.run(my_convolution_output, feed_dict=feed_dict))

# Activation Output
print('\nInput = the above [5x5] array')
print('ReLU element wise returns the [5x5] array:')
print(sess.run(my_activation_output, feed_dict=feed_dict))

# Max Pool Output
print('\nInput = the above [5x5] array')
print('MaxPool, stride size = [1x1], results in the [4x4] array:')
print(sess.run(my_maxpool_output, feed_dict=feed_dict))

# Fully Connected Output
print('\nInput = the above [4x4] array')
print('Fully connected layer on all four rows with five outputs:')
print(sess.run(my_full_output, feed_dict=feed_dict))
Log输出

 

posted @ 2017-07-19 07:22  郝壹贰叁  阅读(970)  评论(0编辑  收藏  举报