线性回归模型实现——pytorch版
import random
import torch
from d2l import torch as d2l
def synthetic_data(w,b,num_examples):
"""生成y=Xw+b+噪声"""
x = torch.normal(0,1,(num_examples,len(w))) #0 1 正态分布,num_examples个样本,len(w)列
print('len是:'+str(len(w)))
# 生成y
y = torch.matmul(x,w) + b
# 加入正态分布0 0.01的和y相等列的噪音
y += torch.normal(0,0.01,y.shape)
# -1代表自动计算维度,1代表一列
return x,y.reshape((-1,1))
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = synthetic_data(true_w,true_b,1000)
print('features:',features[0],'\nlabel:',labels[0])
# d2l.set_figsize()
# 画第一列和labels
# d2l.plt.scatter(features[:,(1)].detach().numpy(),labels.detach().numpy(),1)
def data_iter(batch_size,features,labels):
# 查看一共有多少个样本
num_examples = len(features)
#获取样本索引
indices = list(range(num_examples))
# 把样本索引打乱
random.shuffle(indices)
for i in range(0,num_examples,batch_size):
batch_indices = torch.tensor(
# 从头开始按索引取值,防止去过头加了验证条件
indices[i:min(i+batch_size,num_examples)]
)
# 随机顺序的特征和标号
yield features[batch_indices],labels[batch_indices],
batch_size = 10
for x,y in data_iter(batch_size,features,labels):
# x是10x1的tensor,y是10x1的向量
print(x,'\n',y)
break
w = torch.normal(0,0.01,size=(2,1),requires_grad=True)
b = torch.zeros(1,requires_grad=True)
def linreg(x,w,b):
"""线性回归模型"""
return torch.matmul(x,w)+b
def squared_loss(y_hat,y):
"""均方损失"""
# 让预测值和真实值格式统一,做均方损失
return (y_hat - y.reshape(y_hat.shape))**2/2
# 定义优化算法
def sgd(params,lr,batch_size):
"""小批量随机梯度下降"""
with torch.no_grad(): #不更新梯度
for param in params:
# 对每个参数做梯度下降
param -= lr*param.grad/batch_size
# 梯度清零
param.grad.zero_()
# 学习率
lr = 0.03
# 将模型扫3遍
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for x,y in data_iter(batch_size,features,labels):
# x和y的小批量损失
l = loss(net(x,w,b),y)
# 因为l的形状是(batch_size,1),而不是一个标量
# l中的所有元素被加到一起,并以此计算关于[w,b]的梯度
l.sum().backward()
# 使用参数的梯度更新参数
sgd([w,b],lr,batch_size)
with torch.no_grad():
train_l = loss(net(features,w,b),labels)
print(f'epoch{epoch+1},loss{float(train_l.mean()):f}')
print(f'w的估计误差:{true_w-w.reshape(true_w.shape)}')
print(f'b的估计误差:{true_b-b}')
运行结果
len是:2
features: tensor([-0.2004, -2.0049])
label: tensor([10.6152])
tensor([[ 1.0884, -0.9585],
[-0.1686, 0.4229],
[-0.3642, -0.7151],
[ 1.2426, -1.3948],
[ 0.7473, -0.0862],
[ 0.6056, -1.4912],
[ 0.8237, 1.2756],
[-0.0991, -1.0443],
[-1.0252, 0.6845],
[ 0.1991, -0.4367]])
tensor([[ 9.6328],
[ 2.4175],
[ 5.9022],
[11.4402],
[ 5.9788],
[10.4812],
[ 1.5190],
[ 7.5556],
[-0.1732],
[ 6.0907]])
epoch1,loss0.032279
epoch2,loss0.000108
epoch3,loss0.000048
w的估计误差:tensor([-0.0004, 0.0002], grad_fn=<SubBackward0>)
b的估计误差:tensor([0.0003], grad_fn=<RsubBackward1>)
作者:Jace Jin
github地址:https://github.com/buxianghua
原创文章版权归作者所有.
欢迎转载,转载时请在文章页面明显位置给出原文连接,否则保留追究法律责任的权利.
欢迎转载,转载时请在文章页面明显位置给出原文连接,否则保留追究法律责任的权利.