pytorch入门--矩阵统计属性

其他相关操作:https://blog.csdn.net/qq_43923588/article/details/108007534

本篇pytorch的矩阵统计属性展示,包含:

  • norm 范数
  • prod 累乘
  • max/min/argmax/argmin
  • dim/keepdim
  • kthvalue/topk
  • 常用运算符

使用方法和含义均在代码的批注中给出,因为有较多的输出,所以设置输出内容的第一个值为当前print()方法所在的行

矩阵统计属性

import torch
import numpy as np
import sys
loc = sys._getframe()
_ = '\n'


'''
norm范数
这里并不是normalize(正则化)的意思
'''
a = torch.full([8], 2.)
b = a.view(2, 4)
c = a.view(2, 2, 2)
print(loc.f_lineno, _, a, _, b, _, c)
# 一范数
print(loc.f_lineno, _, a.norm(1), _, b.norm(1), _, c.norm(1))
# 二范数
print(loc.f_lineno, _, a.norm(2), _, b.norm(2), _, c.norm(2))

# 给定维度做norm   第一个参数为第n范数,第二个参数为维度
# 取某一维度的范数,则其他维度都会被消去
print(loc.f_lineno, _, b.norm(1, dim=1))
print(loc.f_lineno, _, c.norm(2, dim=1))


'''prod累乘,max/min/argmax/argmin'''
d = torch.rand(2, 3)*10
print(loc.f_lineno, _, d, _, d.prod())
'''最大值'''
print(loc.f_lineno, _, d, _, d.max())
'''最小值'''
print(loc.f_lineno, _, d, _, d.min())
'''求和'''
print(loc.f_lineno, _, d, _, d.sum())
'''求最大值和最小值的索引时,若没有指定维度,则会将所有维度合并以后求对应索引'''
'''返回最大值的索引'''
print(loc.f_lineno, _, d, _, d.argmax())
print(loc.f_lineno, _, d, _, d.argmax(dim=1))
'''返回最小值的索引'''
print(loc.f_lineno, _, d, _, d.argmin())
print(loc.f_lineno, _, d, _, d.argmin(dim=1))


'''dim/keepdim'''
e = torch.rand(2, 3)*10
print(loc.f_lineno, _, e, _, e.max(dim=1))
print(loc.f_lineno, _, e, _, e.min(dim=1))
# keepdim的作用为返回最大值和最大值对应的索引
print(loc.f_lineno, _, e, _, e.max(dim=1, keepdim=True))
print(loc.f_lineno, _, e, _, e.min(dim=1, keepdim=True))


'''
kthvalue/topk
kthvalue返回值为第n小的元素
topk为取其最大/小的某几个值,及其索引,作为max的扩展,返回最大的前n个值
'''
f = torch.rand(2, 10)*10
# 第3小的元素
print(loc.f_lineno, _, f, _, f.kthvalue(3, dim=1))

# 取最大的n个值,默认返回最大
print(loc.f_lineno, _, f, _, f.topk(3, dim=1))
# 取最小的n个值,设置largest=False返回最小
print(loc.f_lineno, _, f, _, f.topk(3, dim=1, largest=False))


'''
常用运算符的返回
返回结果全部为True和False
'''
g = torch.randn(3, 4)
print(loc.f_lineno, _, g, _, g > 0)
print(loc.f_lineno, _, g, _, g >= 0)
print(loc.f_lineno, _, g, _, g < 0)
print(loc.f_lineno, _, g, _, g <= 0)
# 类似于 .eq()方法和.equal()
print(loc.f_lineno, _, g, _, g == 0)
print(loc.f_lineno, _, g, _, g != 0)

# .eq()返回每一个位置的对比
x = torch.randn(3, 4)
y = torch.randn(3, 4)
print(loc.f_lineno, _, x, _, y, _, x.eq(y))
# .equal()返回整体的对比,完全相同返回True
print(loc.f_lineno, _, x, _, y, _, x.equal(y))

输出结果

15 
 tensor([2., 2., 2., 2., 2., 2., 2., 2.]) 
 tensor([[2., 2., 2., 2.],
        [2., 2., 2., 2.]]) 
 tensor([[[2., 2.],
         [2., 2.]],

        [[2., 2.],
         [2., 2.]]])
17 
 tensor(16.) 
 tensor(16.) 
 tensor(16.)
19 
 tensor(5.6569) 
 tensor(5.6569) 
 tensor(5.6569)
23 
 tensor([8., 8.])
24 
 tensor([[2.8284, 2.8284],
        [2.8284, 2.8284]])
29 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(2176.8455)
31 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(7.7679)
33 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(0.2703)
35 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(31.4919)
38 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(4)
39 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor([1, 1])
41 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor(2)
42 
 tensor([[3.6823, 6.2348, 0.2703],
        [7.5783, 7.7679, 5.9583]]) 
 tensor([2, 2])
47 
 tensor([[1.7239, 4.4074, 4.3381],
        [4.2586, 1.3631, 4.8033]]) 
 torch.return_types.max(
values=tensor([4.4074, 4.8033]),
indices=tensor([1, 2]))
48 
 tensor([[1.7239, 4.4074, 4.3381],
        [4.2586, 1.3631, 4.8033]]) 
 torch.return_types.min(
values=tensor([1.7239, 1.3631]),
indices=tensor([0, 1]))
50 
 tensor([[1.7239, 4.4074, 4.3381],
        [4.2586, 1.3631, 4.8033]]) 
 torch.return_types.max(
values=tensor([[4.4074],
        [4.8033]]),
indices=tensor([[1],
        [2]]))
51 
 tensor([[1.7239, 4.4074, 4.3381],
        [4.2586, 1.3631, 4.8033]]) 
 torch.return_types.min(
values=tensor([[1.7239],
        [1.3631]]),
indices=tensor([[0],
        [1]]))
61 
 tensor([[7.3982, 2.2860, 7.0574, 6.9474, 3.5272, 6.1280, 7.1340, 8.2738, 3.3975,
         2.0385],
        [9.6731, 7.9593, 4.5918, 6.2519, 6.0082, 3.4219, 7.0018, 6.3874, 1.9820,
         2.7148]]) 
 torch.return_types.kthvalue(
values=tensor([3.3975, 3.4219]),
indices=tensor([8, 5]))
64 
 tensor([[7.3982, 2.2860, 7.0574, 6.9474, 3.5272, 6.1280, 7.1340, 8.2738, 3.3975,
         2.0385],
        [9.6731, 7.9593, 4.5918, 6.2519, 6.0082, 3.4219, 7.0018, 6.3874, 1.9820,
         2.7148]]) 
 torch.return_types.topk(
values=tensor([[8.2738, 7.3982, 7.1340],
        [9.6731, 7.9593, 7.0018]]),
indices=tensor([[7, 0, 6],
        [0, 1, 6]]))
66 
 tensor([[7.3982, 2.2860, 7.0574, 6.9474, 3.5272, 6.1280, 7.1340, 8.2738, 3.3975,
         2.0385],
        [9.6731, 7.9593, 4.5918, 6.2519, 6.0082, 3.4219, 7.0018, 6.3874, 1.9820,
         2.7148]]) 
 torch.return_types.topk(
values=tensor([[2.0385, 2.2860, 3.3975],
        [1.9820, 2.7148, 3.4219]]),
indices=tensor([[9, 1, 8],
        [8, 9, 5]]))
74 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[ True,  True, False,  True],
        [False, False, False, False],
        [ True, False, False, False]])
75 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[ True,  True, False,  True],
        [False, False, False, False],
        [ True, False, False, False]])
76 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[False, False,  True, False],
        [ True,  True,  True,  True],
        [False,  True,  True,  True]])
77 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[False, False,  True, False],
        [ True,  True,  True,  True],
        [False,  True,  True,  True]])
79 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[False, False, False, False],
        [False, False, False, False],
        [False, False, False, False]])
80 
 tensor([[ 0.9232,  1.4068, -1.7394,  0.7218],
        [-0.4559, -0.3780, -0.0922, -0.5623],
        [ 0.1175, -0.3747, -1.2555, -1.1089]]) 
 tensor([[True, True, True, True],
        [True, True, True, True],
        [True, True, True, True]])
85 
 tensor([[ 1.9690,  1.6532, -0.2326, -0.8481],
        [ 0.0636, -0.3111, -0.0929, -0.9803],
        [-0.1041,  0.6831,  1.4762,  0.0402]]) 
 tensor([[ 1.0883, -1.0709, -0.0672,  1.0233],
        [ 0.4177, -0.2675,  0.3996,  0.4233],
        [-0.2287,  0.1887,  0.0367,  0.2743]]) 
 tensor([[False, False, False, False],
        [False, False, False, False],
        [False, False, False, False]])
87 
 tensor([[ 1.9690,  1.6532, -0.2326, -0.8481],
        [ 0.0636, -0.3111, -0.0929, -0.9803],
        [-0.1041,  0.6831,  1.4762,  0.0402]]) 
 tensor([[ 1.0883, -1.0709, -0.0672,  1.0233],
        [ 0.4177, -0.2675,  0.3996,  0.4233],
        [-0.2287,  0.1887,  0.0367,  0.2743]]) 
 False

Process finished with exit code 0

posted @ 2020-08-14 16:01  博0_oer~  阅读(41)  评论(0编辑  收藏  举报