test

test1.py

import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, activation_function=None):  # inputs.shape=[None,1],in_size.shape=1,out_size=10 |inputs.shape=[None,10],in_size.shape=10,out_size=1
    with tf.name_scope('layer'):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='w')  # [1,10] | [10,1]
        with tf.name_scope('bias'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  # [1,10],这个1跟上面那个1貌似不是一个意思 | [1,1]
        with tf.name_scope('wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases  # 注意这里顺序,inputs在前。[None,1]×[1,10]+[1,10](python广播)=[None,10] | [None,10]×[10,1]+[1,1](python广播)=[None,1]
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs

# 训练集数据和标签
train_data_x = np.linspace(-1,1,300, dtype=np.float32)[:, np.newaxis]  # shape=(300,1)
noise = np.random.normal(0, 0.05, train_data_x.shape).astype(np.float32)
label_y = np.square(train_data_x) - 0.5 + noise
with tf.name_scope('inputs'):
    inputer_x = tf.placeholder(tf.float32, [None, 1], name='inputer_x')
    inputer_y = tf.placeholder(tf.float32, [None, 1], name='inputer_y')

# 设计的网络结构是1-10-1的
l1 = add_layer(inputer_x, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, activation_function=None)
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(inputer_y-prediction), reduction_indices=[1]))
with tf.name_scope('train_scope'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

sess = tf.Session()

init = tf.global_variables_initializer()
sess.run(init)
writer = tf.summary.FileWriter("logs", sess.graph)  # 文件写在该.py文件同级,在命令行中,用tensorboard --logdir=.打开

for i in range(1000):
    # training
    sess.run(train_step, feed_dict={inputer_x: train_data_x, inputer_y: label_y})
    if i % 50 == 0:
        # to see the step improvement
        print(sess.run(loss, feed_dict={inputer_x: train_data_x, inputer_y: label_y}))

test2.py

import tensorflow._api.v2.compat.v1 as tf
import numpy as np #numpy是科学计算的模块
tf.disable_v2_behavior()
# create data
x_data = np.random.rand(100).astype(np.float32) #生成100个随机数列
y_data = x_data*0.1 + 0.3#w=0.1,b+0.3


#### create tensorflow structure start ###
Weights = tf.Variable(tf.random_uniform((1,), -1.0, 1.0)) #一维数列、随机生成数列的范围是-1~1
biases = tf.Variable(tf.zeros((1,)))#一维数列

y = Weights*x_data + biases

loss = tf.reduce_mean(tf.square(y-y_data)) #
optimizer = tf.train.GradientDescentOptimizer(0.5) #0.5是学习率
train = optimizer.minimize(loss)

init = tf.initialize_all_variables()#初始化
#### create tensorflow structure end ###


# 创建session,执行命令
sess = tf.Session()
sess.run(init) #session像一个指针,run的时��会激活init
for step in range(201): #训练201步
    sess.run(train)
    if step % 20 == 0: #每隔20步打印一下训练结果
        print(step, sess.run(Weights), sess.run(biases), sess.run(loss))
posted @ 2022-09-25 20:09  watsono  阅读(30)  评论(1编辑  收藏  举报