tensolrflow之基础变量
#优化一个乘法算子
#coding:utf-8 __author__ = 'similarface' import tensorflow as tf sess=tf.Session() #创建一个常量张量 a=tf.Variable(tf.constant(4.)) x_val=5. x_data=tf.placeholder(dtype=tf.float32) #添加计算图 multiplication=tf.multiply(a,x_data) #我们将声明损失函数为输出与期望目标值100之间的L2距离: loss = tf.square(tf.subtract(multiplication, 100.)) #初始化模型变量 现在我们并将我们的优化算法声明为标准梯度下降: init = tf.initialize_all_variables() sess.run(init) #标准梯度下降 my_opt = tf.train.GradientDescentOptimizer(0.01) train_step = my_opt.minimize(loss) print('优化乘法输出100.') for i in range(10): sess.run(train_step, feed_dict={x_data: x_val}) a_val = sess.run(a) mult_output = sess.run(multiplication, feed_dict={x_data: x_val}) print(str(a_val) + ' * ' + str(x_val) + ' = ' + str(mult_output))
__author__ = 'similarface' from tensorflow.python.framework import ops import tensorflow as tf ''' y=a*x+b ''' ops.reset_default_graph() sess = tf.Session() a = tf.Variable(tf.constant(1.)) b = tf.Variable(tf.constant(1.)) x_val = 5. x_data = tf.placeholder(dtype=tf.float32) two_gate = tf.add(tf.multiply(a, x_data), b) loss = tf.square(tf.subtract(two_gate, 50.)) my_opt = tf.train.GradientDescentOptimizer(0.01) train_step = my_opt.minimize(loss) init = tf.initialize_all_variables() sess.run(init) print('\nOptimizing Two Gate Output to 50.') for i in range(10): a_val, b_val = (sess.run(a), sess.run(b)) # Run the train step sess.run(train_step, feed_dict={x_data: x_val}) # Get the a and b values a_val, b_val = (sess.run(a), sess.run(b)) # Run the two-gate graph output two_gate_output = sess.run(two_gate, feed_dict={x_data: x_val}) print(str(a_val) + ' * ' + str(x_val) + ' + ' + str(b_val) + '= ' + str(two_gate_output))
'''
result:
10.4 * 5.0 + 2.88= 54.88
14.912 * 5.0 + 3.7824= 78.3424
17.0778 * 5.0 + 4.21555= 89.6043
18.1173 * 5.0 + 4.42347= 95.0101
18.6163 * 5.0 + 4.52326= 97.6048
18.8558 * 5.0 + 4.57117= 98.8503
18.9708 * 5.0 + 4.59416= 99.4482
19.026 * 5.0 + 4.6052= 99.7351
19.0525 * 5.0 + 4.61049= 99.8729
19.0652 * 5.0 + 4.61304= 99.939
'''
#coding:utf-8 __author__ = 'similarface' ''' 使用 Placeholders and Variables Variables 变量是tensorflow 会跟踪并优化 Placeholders 占位符 类型,维度 占用 ''' import tensorflow as tf import numpy as np l_var=tf.Variable(tf.zeros([2,3])) sess=tf.Session() init_all=tf.global_variables_initializer() sess.run(init_all) print(l_var) x=tf.placeholder(tf.float32,shape=[2,2]) #identity x=y y=tf.identity(x) x_vals=np.random.rand(2,2) #Placeholders 需要喂入数据 sess.run(y,feed_dict={x:x_vals}) print(y) #返回一个给定对角值的对角tensor ''' 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 ''' id_matrix=tf.diag([1.0,1.0,1.0]) #tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。这个函数产生正太分布,均值和标准差自己设定 A=tf.truncated_normal([2,3]) #指定值填充矩阵 B = tf.fill([2,3], 5.0) #均匀分布 C = tf.random_uniform([3,2]) #将np数组转化成tensor D = tf.convert_to_tensor(np.array([[1., 2., 3.],[-3., -7.,-1.],[0., 5., -2.]])) print("tf.diag: \n",sess.run(id_matrix)) print("truncated_normal: 2-2\n",sess.run(A)) print('fill:\n',sess.run(B)) print('random_uniform:\n',sess.run(C)) print('convert_to_tensor:\n',sess.run(D)) print("A+B\n",sess.run(A+B)) print('C:\n',sess.run(C)) #C转置 print("C'T:\n",sess.run(tf.transpose(C))) #行列式 print(sess.run(tf.matrix_determinant(D))) #就是得到逆矩阵 print(sess.run(tf.matrix_inverse(D))) #对称正定矩阵 print(sess.run(tf.cholesky(id_matrix))) #求解特征值和特征向量 print(sess.run(D)) print(sess.run(tf.self_adjoint_eig(D)))