TensorFlow学习笔记1 会话Session与张量

import tensorflow as tf
import numpy as np
tf.compat.v1.disable_eager_execution()
x = tf.compat.v1.placeholder(shape = [1,10], dtype = tf.float32, name = 'x')
#placeholder 占位符没有初始值,图执行时临时赋值
W = tf.compat.v1.Variable(tf.compat.v1.random_uniform(shape = [10,5], minval = -0.1, maxval = 0.1, dtype = tf.float32),name = 'W')
b = tf.compat.v1.Variable(tf.zeros(shape = [5], dtype = tf.float32), name = 'b')
#variable 变量需要初始化random_uniform在minval-maxval之间均匀采样
h = tf.nn.sigmoid(tf.matmul(x,W)+b)

with tf.compat.v1.Session() as sess:
    sess.run(tf.compat.v1.global_variables_initializer())
    print(sess.run(h,feed_dict={x:np.random.rand(1,10)}))
[[0.52614367 0.5181997  0.5162186  0.48906413 0.46755365]]import tensorflow as tfimport numpy as np
tf.compat.v1.disable_eager_execution()
graph = tf.Graph()#create a graph
session = tf.compat.v1.InteractiveSession(graph=graph)#create a session,version 1
x = tf.compat.v1.placeholder(shape = [1,10], dtype = tf.float32, name = 'x')
#placeholder 占位符没有初始值,图执行时临时赋值
W = tf.compat.v1.Variable(tf.compat.v1.random_uniform(shape = [10,5], minval = -0.1, maxval = 0.1, dtype = tf.float32),name = 'W')
b = tf.compat.v1.Variable(tf.zeros(shape = [5], dtype = tf.float32), name = 'b')
#variable 变量需要初始化random_uniform在minval-maxval之间均匀采样
h = tf.nn.sigmoid(tf.matmul(x,W)+b)

初始化图中所有变量
tf.compat.v1.global_variables_initializer().run()
(等价于W.initializer.run() b.initializer.run()
h_eval= session.run(h,feed_dict={x:np.random.rand(1,10)})
print(h_eval)
[[0.54421955 0.5040116  0.5199564  0.5172269  0.54251057]]

tf.Session() 与 tf.InteractiveSession()的区别

tf.InteractiveSession 把它自己当作默认会话,tf.Tensor.eval、tf.Operation.run 方法都可以在当前会话中进行操作运算。

举个栗子

sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()

等价于

a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
  # We can also use 'c.eval()' here.
  print(c.eval())
posted @ 2020-05-19 17:25  Jessyswing  阅读(562)  评论(0编辑  收藏  举报