深度学习--tf操作tensor的常用api-88
目录
1. 张量的创建
import tensorflow as tf
tf.constant([1, 2, 3]).numpy()
array([1, 2, 3])
tf.zeros((2, 3)).numpy()
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
tf.ones((3, 4)).numpy()
array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]], dtype=float32)
tf.fill((4, 3), 1.1).numpy()
array([[1.1, 1.1, 1.1],
[1.1, 1.1, 1.1],
[1.1, 1.1, 1.1],
[1.1, 1.1, 1.1]], dtype=float32)
tf.random.normal((3, 4), mean=5, stddev=1).numpy()
array([[5.407701 , 5.475788 , 5.5228395, 4.250098 ],
[5.755986 , 4.6296687, 6.3727207, 5.3540034],
[4.6932573, 4.623758 , 3.7468662, 4.5256224]], dtype=float32)
tf.random.uniform((4, 5), minval=0, maxval=10).numpy()
array([[3.9198053, 7.8100157, 9.427258 , 5.3632746, 4.598016 ],
[2.2394156, 6.405282 , 7.2577057, 2.4150395, 6.204544 ],
[3.7058866, 2.9686987, 7.873453 , 7.5488734, 9.617502 ],
[5.40668 , 1.1854482, 5.420072 , 8.667919 , 1.9604456]],
dtype=float32)
2. shape的操作
2.1 tf.reshape
t1 = tf.random.uniform((3, 4), minval=0, maxval=3)
t1
<tf.Tensor: shape=(3, 4), dtype=float32, numpy=
array([[1.5252786 , 2.7575176 , 0.5540403 , 1.6556203 ],
[2.6404145 , 0.27731216, 1.5243076 , 2.2232752 ],
[1.6754533 , 2.4828534 , 1.5633917 , 0.41992092]], dtype=float32)>
tf.reshape(t1, shape=(4, 3))
<tf.Tensor: shape=(4, 3), dtype=float32, numpy=
array([[1.5252786 , 2.7575176 , 0.5540403 ],
[1.6556203 , 2.6404145 , 0.27731216],
[1.5243076 , 2.2232752 , 1.6754533 ],
[2.4828534 , 1.5633917 , 0.41992092]], dtype=float32)>
tf.constant(1).shape
TensorShape([])
2.2 tf.expand_dims
给定的张量input,axis为需要在第几维度扩充,axis=0表示在原有的张量的第一维扩充,axis=1表示在原有的张量的第二维扩充,axis=-1表示在原有的张量的最后一维扩充
t2 = tf.constant([1, 2, 3, 4, 5])
print(t2.shape) # t2的维度是(5,)
(5,)
tf.expand_dims(t2, axis=0) # axis=0 (5,)变成(1, 5) 相当于增加批次维度
<tf.Tensor: shape=(1, 5), dtype=int32, numpy=array([[1, 2, 3, 4, 5]])>
tf.expand_dims(t2, axis=1) # axis=1 (5,)变成(5, 1) 向量转置
<tf.Tensor: shape=(5, 1), dtype=int32, numpy=
array([[1],
[2],
[3],
[4],
[5]])>
# tf.expand_dims(t2, axis=2) 直接报错 没办法升维
2.3 tf.squeeze
该函数返回一个张量,这个张量是将原始input中所有维度为1的那些维都删掉的结果。
axis可以用来指定要删掉的为1的维度,此处要注意指定的维度必须确保其是1,否则会报错。
expand_dims 与squeeze 操作 一个是升维 一个是降维度
t1 = tf.constant([1, 2, 3, 4, 5])
print(t1)
tf.Tensor([1 2 3 4 5], shape=(5,), dtype=int32)
t2 = tf.expand_dims(t1, axis=0) # 升维
print(t2)
tf.Tensor([[1 2 3 4 5]], shape=(1, 5), dtype=int32)
t3 = tf.squeeze(t2, axis=0) # 降维
print(t3)
tf.Tensor([1 2 3 4 5], shape=(5,), dtype=int32)
2.4 tf.transpose 转置
t1 = tf.random.normal((3,4))
t1
<tf.Tensor: shape=(3, 4), dtype=float32, numpy=
array([[ 1.0217291 , 0.80566406, -1.3235798 , 0.00846866],
[ 0.544451 , -0.5384374 , -0.71138096, 2.317901 ],
[-0.8243301 , -1.2006387 , -0.10023449, 0.34265777]],
dtype=float32)>
tf.transpose(t1)
<tf.Tensor: shape=(4, 3), dtype=float32, numpy=
array([[ 1.0217291 , 0.544451 , -0.8243301 ],
[ 0.80566406, -0.5384374 , -1.2006387 ],
[-1.3235798 , -0.71138096, -0.10023449],
[ 0.00846866, 2.317901 , 0.34265777]], dtype=float32)>
3.数学运算
x = tf.random.normal((3, 4))
print(x)
y = tf.ones((3, 4))
print(y)
tf.Tensor(
[[-0.64711064 -0.8264138 0.34565753 -0.17993973]
[ 1.9282252 -0.99265015 0.7095741 0.50488204]
[-1.34313 0.13999788 -0.49323723 -0.3143978 ]], shape=(3, 4), dtype=float32)
tf.Tensor(
[[1. 1. 1. 1.]
[1. 1. 1. 1.]
[1. 1. 1. 1.]], shape=(3, 4), dtype=float32)
z = tf.add(x, y)
print(z)
tf.Tensor(
[[ 0.35288936 0.17358619 1.3456576 0.82006025]
[ 2.928225 0.00734985 1.7095741 1.5048821 ]
[-0.34313 1.1399978 0.50676274 0.6856022 ]], shape=(3, 4), dtype=float32)
x1 = tf.subtract(z, y)
print(x1)
tf.Tensor(
[[-0.64711064 -0.8264138 0.3456576 -0.17993975]
[ 1.928225 -0.99265015 0.7095741 0.5048821 ]
[-1.34313 0.13999784 -0.49323726 -0.3143978 ]], shape=(3, 4), dtype=float32)
z1 = tf.multiply(x, y) # 对应位置直接相乘 形状要一样
print(z1)
tf.Tensor(
[[-0.64711064 -0.8264138 0.34565753 -0.17993973]
[ 1.9282252 -0.99265015 0.7095741 0.50488204]
[-1.34313 0.13999788 -0.49323723 -0.3143978 ]], shape=(3, 4), dtype=float32)
z2 = tf.divide(x, y) # 对应位置直接相除
print(z2)
tf.Tensor(
[[-0.64711064 -0.8264138 0.34565753 -0.17993973]
[ 1.9282252 -0.99265015 0.7095741 0.50488204]
[-1.34313 0.13999788 -0.49323723 -0.3143978 ]], shape=(3, 4), dtype=float32)
z3 = tf.matmul(x, tf.transpose(y)) # 线性代数中的 矩阵相乘 3x4 4x3 --> 3x3
print(z3)
tf.Tensor(
[[-1.3078066 -1.3078066 -1.3078066]
[ 2.150031 2.150031 2.150031 ]
[-2.0107672 -2.0107672 -2.0107672]], shape=(3, 3), dtype=float32)
reduce_sum reduce_mean reduce_max reduce_min 聚合操作
t1 = tf.random.normal((3,4))
print(t1)
tf.reduce_sum(t1, axis=0)
tf.Tensor(
[[ 1.1250675 0.05182844 -0.5701071 0.7476569 ]
[-0.01252978 0.02883564 0.55116016 0.00195847]
[ 0.61022764 -0.26732844 0.518448 0.25869775]], shape=(3, 4), dtype=float32)
<tf.Tensor: shape=(4,), dtype=float32, numpy=array([ 1.7227654 , -0.18666437, 0.49950105, 1.0083132 ], dtype=float32)>
tf.reduce_sum(t1, axis=1)
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([1.3544457, 0.5694245, 1.120045 ], dtype=float32)>
4. 逻辑运算
x = tf.constant([[1, 2, 3], [4, 5, 6]])
y = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.equal(x, y)
<tf.Tensor: shape=(2, 3), dtype=bool, numpy=
array([[ True, True, True],
[ True, True, True]])>
tf.greater_equal(x, y)
<tf.Tensor: shape=(2, 3), dtype=bool, numpy=
array([[ True, True, True],
[ True, True, True]])>
t2 = tf.less_equal(x, y)
print(t2)
tf.Tensor(
[[ True True True]
[ True True True]], shape=(2, 3), dtype=bool)
tf.cast(t2, dtype=tf.float32)
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
5. 张量之间的操作
t1 = tf.random.normal((3, 4))
tf.concat([t1, t1], axis=0) # 3*4 --> 6*4
<tf.Tensor: shape=(6, 4), dtype=float32, numpy=
array([[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983],
[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]],
dtype=float32)>
tf.concat([t1, t1], axis=1) # 3*4 --> 6*8
<tf.Tensor: shape=(3, 8), dtype=float32, numpy=
array([[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 , 0.45750073,
1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833, 0.29276186,
-0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983, 1.0173426 ,
1.9283781 , 0.11059741, -0.61148983]], dtype=float32)>
st1 = tf.stack([t1, t1]) # (3, 4) -->(2, 3, 4)
st1
<tf.Tensor: shape=(2, 3, 4), dtype=float32, numpy=
array([[[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]],
[[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]]],
dtype=float32)>
a, b = tf.unstack(st1)
a, b
(<tf.Tensor: shape=(3, 4), dtype=float32, numpy=
array([[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]],
dtype=float32)>,
<tf.Tensor: shape=(3, 4), dtype=float32, numpy=
array([[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]],
dtype=float32)>)
tf.split(t1, 1)
[<tf.Tensor: shape=(3, 4), dtype=float32, numpy=
array([[ 0.45750073, 1.3054565 , 0.77839047, 0.7122914 ],
[ 0.29276186, -0.32228792, -0.3649346 , 0.46718833],
[ 1.0173426 , 1.9283781 , 0.11059741, -0.61148983]],
dtype=float32)>]
t1 = tf.random.normal((4, 3))
tf.split(t1, 2)
[<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[-0.74238527, -0.39685684, 0.52447265],
[-0.3410778 , -1.1399179 , 1.4882557 ]], dtype=float32)>,
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[ 0.5945266 , 1.3282952 , -0.8548996 ],
[ 0.22969016, -0.35583028, 0.54077435]], dtype=float32)>]
8. argmax
import numpy as np
softmax_output = np.array([[0.1, 0.2, 0.7], [0.8, 0.1, 0.1], [0.4, 0.3, 0.3]])
predict_labels = tf.argmax(softmax_output, axis=1)
labels = ['cat', 'dog', 'pig']
predict = tf.gather(labels, predict_labels)
predict.numpy()
array([b'pig', b'cat', b'cat'], dtype=object)