from __future__ import print_function
import torch as t
x = t.Tensor(5, 3)
print(x)
tensor([[1.0194e-38, 8.4490e-39, 1.0469e-38],
[9.3674e-39, 9.9184e-39, 8.7245e-39],
[9.2755e-39, 8.9082e-39, 9.9184e-39],
[8.4490e-39, 9.6429e-39, 1.0653e-38],
[1.0469e-38, 4.2246e-39, 1.0378e-38]])
x =t.rand(5, 3)
print(x)
tensor([[0.8813, 0.9889, 0.6149],
[0.8595, 0.5743, 0.0719],
[0.8653, 0.6924, 0.2119],
[0.1017, 0.0243, 0.4142],
[0.9373, 0.1185, 0.6594]])
x.size()
torch.Size([5, 3])
y = t.rand(5, 3)
x + y
tensor([[1.4673, 1.1781, 1.6074],
[1.8543, 1.0660, 0.6873],
[1.6326, 1.3947, 0.5899],
[0.1402, 0.1022, 0.8971],
[1.9228, 0.4745, 1.2459]])
t.add(x, y)
tensor([[1.4673, 1.1781, 1.6074],
[1.8543, 1.0660, 0.6873],
[1.6326, 1.3947, 0.5899],
[0.1402, 0.1022, 0.8971],
[1.9228, 0.4745, 1.2459]])
result = t.Tensor(5, 3)
print(result)
t.add(x, y, out=result)
result
tensor([[8.4490e-39, 9.6428e-39, 8.4490e-39],
[9.6429e-39, 9.2755e-39, 1.0286e-38],
[9.0919e-39, 8.9082e-39, 9.2755e-39],
[8.4490e-39, 9.6429e-39, 4.6838e-39],
[8.4489e-39, 1.1112e-38, 4.1328e-39]])
tensor([[1.4673, 1.1781, 1.6074],
[1.8543, 1.0660, 0.6873],
[1.6326, 1.3947, 0.5899],
[0.1402, 0.1022, 0.8971],
[1.9228, 0.4745, 1.2459]])
print("最初的y:")
print(y)
print("第一次相加后的y:")
y.add(x)
print(y)
print("第二次加,y:")
y.add_(x)
print(y)
最初的y:
tensor([[0.5859, 0.1892, 0.9925],
[0.9948, 0.4917, 0.6154],
[0.7673, 0.7023, 0.3780],
[0.0385, 0.0779, 0.4829],
[0.9855, 0.3560, 0.5865]])
第一次相加后的y:
tensor([[0.5859, 0.1892, 0.9925],
[0.9948, 0.4917, 0.6154],
[0.7673, 0.7023, 0.3780],
[0.0385, 0.0779, 0.4829],
[0.9855, 0.3560, 0.5865]])
第二次加,y:
tensor([[1.4673, 1.1781, 1.6074],
[1.8543, 1.0660, 0.6873],
[1.6326, 1.3947, 0.5899],
[0.1402, 0.1022, 0.8971],
[1.9228, 0.4745, 1.2459]])
注意: 函数明后面带_ 会修改Tensorb本身, 不带_的函数会返回一个新的Tensor,不改变输入本身
切片操作
print(x)
x[:, 1]
tensor([[0.8813, 0.9889, 0.6149],
[0.8595, 0.5743, 0.0719],
[0.8653, 0.6924, 0.2119],
[0.1017, 0.0243, 0.4142],
[0.9373, 0.1185, 0.6594]])
tensor([0.9889, 0.5743, 0.6924, 0.0243, 0.1185])
a = t.ones(5)
a
tensor([1., 1., 1., 1., 1.])
b = a.numpy()
b
array([1., 1., 1., 1., 1.], dtype=float32)
import numpy as np
a = np.ones(5)
print(a)
b = t.from_numpy(a)
print(b)
[1. 1. 1. 1. 1.]
tensor([1., 1., 1., 1., 1.], dtype=torch.float64)
b.add_(1)
print(a)
print(b)
[3. 3. 3. 3. 3.]
tensor([3., 3., 3., 3., 3.], dtype=torch.float64)
autograd.Variable 是Autograd的核心类 调用.callback实现反向传播,自动计算所有的梯度
from torch.autograd import Variable
x = Variable(t.ones(2, 2), requires_grad=True)
print(x)
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
y = x.sum()
y
tensor(4., grad_fn=<SumBackward0>)
y.grad_fn
<SumBackward0 at 0x20fe8155390>
y.backward()
x.grad
tensor([[1., 1.],
[1., 1.]])
y.backward()
x.grad
tensor([[2., 2.],
[2., 2.]])
grad在反向传播过程中是累加的, 这意味着 每次运行反向传播,梯度都会累加之前的梯度, 所以反向传播之前需要把梯度清零
x.grad.zero_()
tensor([[0., 0.],
[0., 0.]])
x.grad
tensor([[0., 0.],
[0., 0.]])
y.backward()
x.grad
tensor([[1., 1.],
[1., 1.]])
x = Variable(t.ones(4, 5))
y = t.cos(x)
x_tensor_cos = t.cos(x.data)
print(y)
print(x_tensor_cos)
tensor([[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403]])
tensor([[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403],
[0.5403, 0.5403, 0.5403, 0.5403, 0.5403]])
如何定义神经网络
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
parmas = list(net.parameters())
print(len(parmas))
10
for name, parameters in net.named_parameters():
print(name, ":", parameters.size())
conv1.weight : torch.Size([6, 1, 5, 5])
conv1.bias : torch.Size([6])
conv2.weight : torch.Size([16, 6, 5, 5])
conv2.bias : torch.Size([16])
fc1.weight : torch.Size([120, 400])
fc1.bias : torch.Size([120])
fc2.weight : torch.Size([84, 120])
fc2.bias : torch.Size([84])
fc3.weight : torch.Size([10, 84])
fc3.bias : torch.Size([10])
forward 的输出输出都是Variable, 只有Variable才具有自动求导的功能,Tensor是没有的, 所以在输入的时候需要把Tensor 封装成Variable
input = Variable(t.randn(1, 1, 32, 32))
out = net(input)
out.size()
torch.Size([1, 10])
net.zero_grad()
out.backward(Variable(t.ones(1, 10)))
损失函数
output = net(input)
print(output)
target = Variable(t.arange(0, 10)).float()
target.unsqueeze_(0)
print(target)
criterion = nn.MSELoss()
loss = criterion(output, target)
loss
tensor([[-0.0576, 0.0641, -0.0303, -0.0565, -0.0330, 0.0690, 0.0637, 0.1712,
0.0882, -0.1025]], grad_fn=<AddmmBackward0>)
tensor([[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]])
tensor(28.2247, grad_fn=<MseLossBackward0>)
loss.grad_fn
<MseLossBackward0 at 0x20ff0715180>
net.zero_grad()
print("反向传播之前的conv1.bias的梯度:")
print(net.conv1.bias.grad)
print("执行反向传播...")
loss.backward()
print("反向传播之后的conv1.bias的梯度:")
print(net.conv1.bias.grad)
反向传播之前的conv1.bias的梯度:
None
执行反向传播...
反向传播之后的conv1.bias的梯度:
tensor([ 0.0441, -0.0690, 0.0346, -0.0465, 0.0828, -0.1356])
优化器
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data*learning_rate)
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.01)
optimizer.zero_grad()
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 为什么说在企业级应用开发中,后端往往是效率杀手?
· 本地部署DeepSeek后,没有好看的交互界面怎么行!
· 趁着过年的时候手搓了一个低代码框架
· 推荐一个DeepSeek 大模型的免费 API 项目!兼容OpenAI接口!
· 用 C# 插值字符串处理器写一个 sscanf
2023-08-28 java基础-运算符--day03