初识 ❤ TensorFlow |【一见倾心】

说明

🔎代码重现笔记
📘 renference book: 【Hands-On Machine Learning with Scikit-Learn & TensorFlow 】
📝Chapter 9: 【Up and Running with TensorFlow】
准备工作 💦

🔗 配置深度学习环境
导入需用到package
import tensorflow as tf
import os
import numpy as np
from sklearn.datasets import fetch_california_housing

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 去除AVX2警告
1
2
3
4
5
Session的四种用法 🔢

Creating Your First Graph and Running It in a Session
def session_usage():
print("tensorflow version:", tf.__version__)
# the following code does not actually perform any computation.
# it just creates a computation graph.
x = tf.Variable(3, name="x")
y = tf.Variable(4, name="y")
f = x*x*y + y + 2

sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print("第一种方式运行结果:", result)
sess.close()

with tf.Session() as sess_2:
x.initializer.run()
y.initializer.run()
result = f.eval() # 等价于 result = tf.get_default_session().run(f)
print("第二种方式运行结果:", result)

init = tf.global_variables_initializer() # prepare an init node
with tf.Session() as sess_3:
init.run()
result = f.eval()
print("第三种方式运行结果:", result)

# Inside Jupyter or within Python shell
sess_4 = tf.InteractiveSession()
init.run()
result = f.eval()
print("第四种方式运行结果:", result)
sess_4.close()


session_usage()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
节点值的生命周期 🕑

In single-process TensorFlow, multiple sessions do not share any state, even if they reuse the same graph (each session would have its own copy of every variable). In distributed TensorFlow, variable state is stored on the servers, not in the sessions, so multiple sessions can share the same variables.
def lifecycle_of_node_value():
w = tf.constant(3)
x = w + 2
y = x + 5
z = x * 3

# In short, the preceding code evaluates w and x twice.
with tf.Session() as sess_1:
print(y.eval())
print(z.eval())
# A variable starts its life when its initializer is run,
# and it ends when the session is closed.
# evaluate both y and z in just one graph run
with tf.Session() as sess_2:
y_val, z_val = sess_2.run([y, z])
print(y_val)
print(z_val)


lifecycle_of_node_value()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
线性回归 | 正规方程法 📌

Use Normal Equation 🔎
Note that housing.target is a 1D array, but we need to reshape it to a column vector to compute theta.Recall that NumPy’s reshape() function accepts –1 (meaning “unspecified”) for one of the dimensions: that dimension will be computed based on the array’s length and the remaining dimensions.
housing = fetch_california_housing()
m, n = housing.data.shape # 20640 * 8
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data] # 20640 * 9


def normal_equation_linear_regression():
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
XT = tf.transpose(X)
# the Normal Equation: (θ = XT · X)^(–1) · XT · y;
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)

with tf.Session() as sess:
theta_value = theta.eval()
print(theta_value)


normal_equation_linear_regression()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
线性回归 | 批梯度下降法 📌

Implementing Gradient Descent Using Batch Gradient Descent 🔎
When using Gradient Descent, remember that it is important to first normalize the input feature vectors, or else training may be much slower. You can do this using TensorFlow, NumPy, Scikit-Learn’s StandardScaler, or any other solution you prefer.
def batch_gradient_descent_linear_regression():
from sklearn.preprocessing import StandardScaler
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
# gradients = 2/m * tf.matmul(tf.transpose(X), error) # derived from the cost function
gradients = tf.gradients(mse, [theta])[0] # using auto_diff to compute gradients automatically
# implement the Batch Gradient Descent step θ(next step) = θ –η∇θMSE(θ)
training_op = tf.assign(theta, theta - learning_rate * gradients)

init = tf.global_variables_initializer()

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))


# batch_gradient_descent_linear_regression()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
线性回归 | 优化器法 📌

Using an Optimizer with Batch Gradient Descent
def optimizer_gradient_descent_linear_regression():
from sklearn.preprocessing import StandardScaler
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# using another optimizer
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))


optimizer_gradient_descent_linear_regression()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
线性回归 | mini-批梯度下降法 📌

Using an Optimizer with Mini-batch Gradient Descent
we need a way to replace X and y at every iteration with the next mini-batch. The simplest way to do this is to use placeholder nodes. These nodes are special because they don’t actually perform any computation, they just output the data you tell them to output at runtime.They are typically used to pass the training data to TensorFlow during training. If you don’t specify a value at runtime for a placeholder, you get an exception.
def mini_batch_gradient_descent_linear_regression():
from sklearn.preprocessing import StandardScaler
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.placeholder(tf.float32, shape=(None, n+1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1))

theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# using another optimizer
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()

n_epochs = 10 # smaller for mini-batch
batch_size = 100
n_batchs = int(np.ceil(m / batch_size))

def fetch_batch(epoch, batch_index, batch_size):
np.random.seed(epoch * n_batchs + batch_index)
indices = np.random.randint(m, size=batch_size)
X_batch = scaled_housing_data_plus_bias[indices]
y_batch = housing.target.reshape(-1, 1)[indices]
return X_batch, y_batch

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batchs):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})

best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))


mini_batch_gradient_descent_linear_regression()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
保存及加载模型 📌

Saving and Restoring Models
def save_and_restore_model():
tf.reset_default_graph()
from sklearn.preprocessing import StandardScaler
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)

# save model
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# for restoring theta with a different name, such as "weights"
# saver = tf.train.Saver({"weight": theta})

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
saver.save(sess, save_path="model_saved/my_model.ckpt")
sess.run(training_op)

best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))
saver.save(sess, save_path="model_saved/my_model_final.ckpt")

# restore model
tf.reset_default_graph() # notice that we start with an empty graph
# load the graph structure
saver = tf.train.import_meta_graph("model_saved/my_model_final.ckpt.meta")
theta = tf.get_default_graph().get_tensor_by_name("theta:0")

with tf.Session() as sess:
saver.restore(sess, "model_saved/my_model_final.ckpt")
best_theta_restored = theta.eval()
print("比较两个值是否相近: ", np.allclose(best_theta, best_theta_restored))


# save_and_restore_model()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
可视化 📌

Visualizing the Graph and Training Curves
Using TensorBoard
def log_mse_for_visualization():
from sklearn.preprocessing import StandardScaler
from datetime import datetime

tf.reset_default_graph()
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.placeholder(tf.float32, shape=(None, n+1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1))

theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# using another optimizer
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()

# added for log
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
logdir = "{}/run-{}".format(root_logdir, now)
mse_summary = tf.summary.scalar("MSE", mse)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

n_epochs = 10 # smaller for mini-batch
batch_size = 100
n_batchs = int(np.ceil(m / batch_size))

def fetch_batch(epoch, batch_index, batch_size):
np.random.seed(epoch * n_batchs + batch_index)
indices = np.random.randint(m, size=batch_size)
X_batch = scaled_housing_data_plus_bias[indices]
y_batch = housing.target.reshape(-1, 1)[indices]
return X_batch, y_batch

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batchs):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
# added for log
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch})
step = epoch * n_batchs + batch_index
file_writer.add_summary(summary_str, step)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})

file_writer.close()
best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))


log_mse_for_visualization()
"""
# cmd中输入以下命令,查看可视化结果
activate py36_tf
# cd 到tf_logs的上级目录
cd xxxxx/
tensorboard --logdir tf_logs/
# 复制出现的网址,用Chrome浏览器打开
"""
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
Tips | 名称作用域 🔍

Using Name scopes
def name_scope_for_tensorflow_skills():
from sklearn.preprocessing import StandardScaler
from datetime import datetime

tf.reset_default_graph()
n_epochs = 1000
learning_rate = 0.01
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]

X = tf.placeholder(tf.float32, shape=(None, n+1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1))

theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
# added for name scope skill
with tf.name_scope("loss") as scope:
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")

optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()

# added for log
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
logdir = "{}/run-{}".format(root_logdir, now)

mse_summary = tf.summary.scalar("MSE", mse)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

n_epochs = 10 # smaller for mini-batch
batch_size = 100
n_batchs = int(np.ceil(m / batch_size))

def fetch_batch(epoch, batch_index, batch_size):
np.random.seed(epoch * n_batchs + batch_index)
indices = np.random.randint(m, size=batch_size)
X_batch = scaled_housing_data_plus_bias[indices]
y_batch = housing.target.reshape(-1, 1)[indices]
return X_batch, y_batch

with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batchs):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
# added for log
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch})
step = epoch * n_batchs + batch_index
file_writer.add_summary(summary_str, step)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})

file_writer.flush()
file_writer.close()
best_theta = theta.eval()
print("Best theta: {:}".format([theta[0] for theta in best_theta]))


# name_scope_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
Tips | 常见的模块化 🔍

Using Common Modularity to build multiple ReLUs
def common_modularity_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X):
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, 0., name="relu")

n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
relus = [relu(X) for i in range(5)]

output = tf.add_n(relus, name="output")
print("common modularity coding test: {:}".format([relu.op.name for relu in relus]))


common_modularity_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
Tips | 更好的模块化 🔍

Using Better Modularity to build multiple ReLUs
Note that when you create a node, TensorFlow checks whether its name already exists, and if it does it appends an underscore followed by an index to make the name unique.
def better_modularity_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X):
# added for better modularity
with tf.name_scope("relu"):
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, 0., name="max")

n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
relus = [relu(X) for i in range(5)]

output = tf.add_n(relus, name="output")
print("better modularity coding test: {:}".format([relu.op.name for relu in relus]))


better_modularity_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Tips | 基于参数的共享变量 🔍

Sharing Variables: Using Parameter
def using_parameter_sharing_variables_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X, threshold):
with tf.name_scope("relu"):
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, threshold, name="max")

threshold = tf.Variable(0.0, name="threshold")
n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name="output")
print("classic sharing variables using parameter coding test: {:}".format(output))


using_parameter_sharing_variables_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
Tips | 基于函数属性的共享变量 🔍

Sharing Variables: Using Function Attribute
def using_attribute_sharing_variables_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X):
with tf.name_scope("relu"):
if not hasattr(relu, "threshold"):
relu.threshold = tf.Variable(0.0, name="threshold")
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, relu.threshold, name="max")

n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name="output")
print("sharing variables using function attribute coding test: {:}".format(output))


using_attribute_sharing_variables_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Tips | 基于函数的共享变量 🔍

Sharing Variables: Using Function Provided By Tensorflow
The idea is to use the get_variable() function to create the shared variable if it does not exist yet, or reuse it if it already exists. The desired behavior (creating or reusing) is controlled by an attribute of the current variable_scope().
def using_function_sharing_variables_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X):
with tf.variable_scope("relu", reuse=True): # reuse 由get_variable()创建的threshold变量
threshold = tf.get_variable("threshold")
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, threshold, name="max")

n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
with tf.variable_scope("relu"): # 第一次定义threshold变量
threshold = tf.get_variable("threshold", shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add_n(relus, name="output")

file_witer = tf.summary.FileWriter("logs/relu9", tf.get_default_graph())
file_witer.close()


using_function_sharing_variables_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
Tips | 基于函数的共享变量(升级版) 🔍

Sharing Variables: Using Function Provided By Tensorflow
solve the first call problem
The resulting graph is slightly different than before, since the shared variable lives within the first ReLU
def using_better_function_sharing_variables_for_tensorflow_skills():
tf.reset_default_graph()

def relu(X):
threshold = tf.get_variable("threshold", shape=(),
initializer=tf.constant_initializer(0.0))
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, threshold, name="max")

n_feature = 3
X = tf.placeholder(tf.float32, shape=(None, n_feature), name="X")
relus = []

for relu_index in range(5):
with tf.variable_scope("relu", reuse=(relu_index >= 1)) as scope:
relus.append(relu(X))
output = tf.add_n(relus, name="output")

file_witer = tf.summary.FileWriter("logs/relu9_modified", tf.get_default_graph())
file_witer.close()


using_better_function_sharing_variables_for_tensorflow_skills()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
tensorflow | 练习 ✒

Tensorflow Skills Exercise
variable name scope exercise
def variable_scope_exercise(http://www.my516.com):
tf.reset_default_graph()

with tf.variable_scope("my_scope"):
x0 = tf.get_variable("x", shape=(), initializer=tf.constant_initializer(0.))
x1 = tf.Variable(0., name="x")
x2 = tf.Variable(0., name="x")

with tf.variable_scope("my_scope", reuse=True):
x3 = tf.get_variable("x")
x4 = tf.Variable(0., name="x")

with tf.variable_scope("", default_name="", reuse=True):
x5 = tf.get_variable("my_scope/x")

print("x0:", x0.op.name) # x0: my_scope/x
print("x1:", x1.op.name) # x1: my_scope/x_1
print("x2:", x2.op.name) # x2: my_scope/x_2
print("x3:", x3.op.name) # x3: my_scope/x
print("x4:", x4.op.name) # x4: my_scope_1/x
print("x5:", x5.op.name) # x5: my_scope/x
print(x0 is x3 and x3 is x5) # True


variable_scope_exercise()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
The End 🏁🏁

Congratulations🎈
---------------------

posted on 2019-07-17 04:47  激流勇进1  阅读(269)  评论(0编辑  收藏  举报