Pytorch快速使用手册 02张量操作

import os
import tqdm
import torch
import random
import shutil
import numpy as np

2 Tenosr Operation

2.1 Base information of tensor

tensor = torch.rand([2, 5])
print(tensor)
print("type ", tensor.type())   # Data type
print("size ", tensor.size())   # Shape of the tensor. It is a subclass of Python tuple
print("dim ", tensor.dim())    # Number of dimensions.

tensor([[0.7604, 0.6443, 0.2335, 0.0522, 0.5894],
[0.9144, 0.5356, 0.1220, 0.9488, 0.4529]])
type torch.FloatTensor
size torch.Size([2, 5])
dim 2

2.2 Data type conversion

# Set default tensor type. Float in PyTorch is much faster than double.
torch.set_default_tensor_type(torch.FloatTensor)

# Type convertions.
tensor = tensor.cuda()
print(tensor.type())

tensor = tensor.cpu()
print(tensor.type())

tensor = tensor.float()
print(tensor.type())

tensor = tensor.long()
print(tensor.type())

torch.cuda.FloatTensor
torch.FloatTensor
torch.FloatTensor
torch.LongTensor

2.3 Conversion between torch.Tensor and np.ndarray

tensor = torch.empty((2,5)).normal_(mean=0.,std=0.6)  # torch.Tensor -> np.ndarray. 

np_val = tensor.cpu().numpy()
print(type(np_val))
print(np_val)
print()

tensor = torch.Tensor(np_val).float()  # np.ndarray -> torch.Tensor.
print(tensor)

# tensor = torch.from_numpy(np_val.copy()).float()  # If ndarray has negative stride

<class 'numpy.ndarray'>
[[ 0.47877645 0.4708376 -0.02325169 -0.18679902 0.30069816]
[-0.98023766 -0.51492953 0.35119155 -1.1812955 -0.01345202]]

tensor([[ 0.4788, 0.4708, -0.0233, -0.1868, 0.3007],
[-0.9802, -0.5149, 0.3512, -1.1813, -0.0135]])

2.4 Tensor Reshape

tensor = torch.rand((2,5))
print(tensor.size())
print(tensor)
print()

tensor = torch.reshape(tensor, (5,2))
print(tensor.size())
print(tensor)
print()

torch.Size([2, 5])
tensor([[0.6753, 0.8400, 0.5969, 0.0085, 0.6854],
[0.5195, 0.9284, 0.1016, 0.5699, 0.7604]])

torch.Size([5, 2])
tensor([[0.6753, 0.8400],
[0.5969, 0.0085],
[0.6854, 0.5195],
[0.9284, 0.1016],
[0.5699, 0.7604]])

2.5 Tensor permute

a = torch.randn(16, 16, 3) 
print(a.size())
print()

b = a.permute(2,0,1)
print(b.size())
print()

torch.Size([16, 16, 3])

torch.Size([3, 16, 16])

2.6 Tensor Copy

# Operation                 |  New/Shared memory | Still in computation graph |
tensor.clone()            # |        New         |          Yes               |
tensor.detach()           # |      Shared        |          No                |
tensor.contiguous()       # |      Shared        |          Yes               |
    
a = torch.rand(2,2)
print(id(a))
b = a.clone()
print(id(b))
print()

a = torch.rand(2,2)
print(id(a))
b = a.detach()
print(id(b))
print()

a = torch.rand(2,2)
print(id(a))
b = a.contiguous()
print(id(b))
print()

2691879448152
2691879420136

2691879370008
2691879448152

2691879420136
2691879420136

2.7 Tensor joint

lt_in = [torch.rand([1, 5]), torch.rand([2, 5]), torch.rand([3, 5])]
tensor = torch.cat(lt_in, dim=0)
for i, e in enumerate(lt_in):
    print(i, e.shape)
print("cat done(given dim)", tensor.shape)
print()

lt_in = [torch.rand([1, 5]), torch.rand([1, 5]), torch.rand([1, 5])]
tensor = torch.stack(lt_in, dim=0)
for i, e in enumerate(lt_in):
    print(i, e.shape)
print("stack done(new dim)", tensor.shape)

0 torch.Size([1, 5])
1 torch.Size([2, 5])
2 torch.Size([3, 5])
cat done(given dim) torch.Size([6, 5])

0 torch.Size([1, 5])
1 torch.Size([1, 5])
2 torch.Size([1, 5])
stack done(new dim) torch.Size([3, 1, 5])

2.8 Tensor squeeze

A = torch.rand(1,2,2)
print(A.size())
print()

B = A.squeeze(0)
print(B.size())

C = A.unsqueeze(0)
print(C.size())

torch.Size([1, 2, 2])

torch.Size([2, 2])
torch.Size([1, 1, 2, 2])

2.9 Get non-zero/zero element

tensor = torch.rand(3,3)
print(tensor)
print(torch.nonzero(tensor))               # Index of non-zero elements
print(torch.nonzero(tensor == 0))          # Index of zero elements
print(torch.nonzero(tensor).size(0))       # Number of non-zero elements
print(torch.nonzero(tensor == 0).size(0))  # Number of zero elements

tensor([[0.8941, 0.2039, 0.8508],
[0.3449, 0.3553, 0.2724],
[0.5092, 0.2380, 0.3142]])
tensor([[0, 0],
[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 2],
[2, 0],
[2, 1],
[2, 2]])
tensor([], size=(0, 2), dtype=torch.int64)
9
0

2.10 Tensor A == Tensor B ?

t1 = torch.Tensor([1., 2.])
t2 = torch.Tensor([1., 2.])
# print(torch.allclose(t1, t2))  # float tensor
print(torch.equal(t1, t2))     # int tensor

True

2.11 Tensor Expand

# Expand tensor of shape 64*512 to shape 64*512*7*7.
t = torch.rand((3,3))
print(t.size())
print(t)
print()

out = torch.reshape(t, (3, 3, 1, 1)).expand(3, 3, 2, 2)
print(out.size())
print(out)

torch.Size([3, 3])
tensor([[0.2546, 0.7513, 0.1227],
[0.8105, 0.8989, 0.4692],
[0.9552, 0.5418, 0.2136]])

torch.Size([3, 3, 2, 2])
tensor([[[[0.2546, 0.2546],
[0.2546, 0.2546]],

     [[0.7513, 0.7513],
      [0.7513, 0.7513]],

     [[0.1227, 0.1227],
      [0.1227, 0.1227]]],


    [[[0.8105, 0.8105],
      [0.8105, 0.8105]],

     [[0.8989, 0.8989],
      [0.8989, 0.8989]],

     [[0.4692, 0.4692],
      [0.4692, 0.4692]]],


    [[[0.9552, 0.9552],
      [0.9552, 0.9552]],

     [[0.5418, 0.5418],
      [0.5418, 0.5418]],

     [[0.2136, 0.2136],
      [0.2136, 0.2136]]]])

2.12 Matrix multiplication

# Matrix multiplication: (m*n) * (n*p) -> (m*p).
t1 = torch.rand((2,3))
t2 = torch.rand((3,2))
r = t1 @ t2
print(r.size())
print(r)
print()

# Batch matrix multiplication: (b*m*n) * (b*n*p) -> (b*m*p).
t1 = torch.rand((6, 2, 3))
t2 = torch.rand((6, 3, 2))
r = t1 @ t2
print(r.size())
print(r)
print()

# Element-wise multiplication.
t1 = torch.rand((3,3))
t2 = torch.rand((3,3))
r = t1 * t2
print(r.size())
print(r)
print()

torch.Size([2, 2])
tensor([[0.2805, 0.4128],
[0.3273, 0.5588]])

torch.Size([6, 2, 2])
tensor([[[0.9034, 1.1809],
[1.0052, 0.5672]],

    [[1.0503, 0.7826],
     [0.5958, 0.3320]],

    [[0.3317, 0.9927],
     [0.1044, 0.1776]],

    [[0.3701, 0.3560],
     [0.5593, 0.8541]],

    [[0.8256, 0.6950],
     [1.3882, 0.8211]],

    [[0.4006, 0.6250],
     [1.0060, 1.1441]]])

torch.Size([3, 3])
tensor([[0.0139, 0.4317, 0.0394],
[0.0608, 0.4654, 0.3051],
[0.1981, 0.2008, 0.2868]])

2.13 Get value from tensor which only has one element

tensor = torch.rand(1)
val = tensor.item()
print(type(val), val)

<class 'float'> 0.6443968415260315

2.14 Convert int label to one-hot vector

n_batch, n_class_num = 6, 3
t_label = torch.Tensor([random.randint(0, n_class_num-1) for i in range(n_batch)]).long().reshape((n_batch,1))
print("t_label ")
print(t_label.size())
print(t_label)
print()

one_hot = torch.zeros(t_label.shape[0], 3).scatter_(1, t_label, 1).long() # (dim, index, value)
print("one_hot ")
print(one_hot.size())
print(one_hot)

t_label
torch.Size([6, 1])
tensor([[0],
[1],
[1],
[0],
[1],
[2]])

one_hot
torch.Size([6, 3])
tensor([[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])

2.15 Compute Euclid Distance of two arrays

# X1 is of shape m*d, X2 is of shape n*d.
X1 = torch.rand((2, 3))
X2 = torch.rand((6, 3))
D = torch.sqrt(torch.sum((X1[:,None,:] - X2) ** 2, dim=2))
print(D.size())
print(D)

torch.Size([2, 6])
tensor([[1.1364, 1.1124, 0.5864, 0.8961, 0.6905, 0.5420],
[0.6629, 0.8406, 0.5869, 0.3428, 0.5778, 0.6701]])

2.16 Tensor ouput with limitation (torch.clamp)

ouput elemets must be limited in [min, max]

A = torch.rand(3,3)
print(A)
B = torch.clamp(A, 0.2, 0.8)
print(B)

tensor([[0.4451, 0.8816, 0.3674],
[0.7309, 0.2765, 0.0899],
[0.7772, 0.4225, 0.6008]])
tensor([[0.4451, 0.8000, 0.3674],
[0.7309, 0.2765, 0.2000],
[0.7772, 0.4225, 0.6008]])

2.17 Gather data with index (torch.gather)

a = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 12, 9]])
print(a)
print()

b = torch.gather(input=a, dim=0, index=torch.tensor([[0,1,2], [1,2,0]]))
print(b)

tensor([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 12, 9]])

tensor([[ 1, 5, 9],
[ 4, 12, 3]])

1 1st layout(row), 0-th col, 0-th elemet
5 1st layout(row), 1-th col, 1-th elemet
9 1st layout(row), 2-th col, 2-th elemet

4 2nd layout(row), 0-th col, 1-th elemet
12 2nd layout(row), 1-th col, 2-th elemet
3 2nd layout(row), 2-th col, 0-th elemet

Github

posted @ 2023-02-25 09:24  Yumeka  阅读(45)  评论(0编辑  收藏  举报