深度学习06-pytorch 实现线性回归

今天使用pytorch 来实现一元线性回归和多元线性回归,先来学习一下pytorch 如何实现线性回归。

一元线性回归代码如下:

import torch
import pandas as pd

df = pd.read_csv('archive/train.csv')

x_data = torch.Tensor([df['x']])
y_data = torch.Tensor([df['y']])
x_data = x_data.view(-1, 1)
y_data = y_data.view(-1, 1)


# nn 是神经网络Neural Network 缩写
class LinearModel(torch.nn.Module):
    # 构造函数
    def __init__(self):
        # 调用父类构造对象
        super(LinearModel, self).__init__()
        # 实例化对象,Linear 会做weight 权重和bias 偏差的计算y=wx+b
        # https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#torch.nn.Linear
        # 函数原型:Linear(in_features, out_features, bias=True),in_features - 每个输入样本的大小,out_features - 每个输出样本的大小
        # 也就是模型的y*x 的对应,比如y=wx 就是1*1,y=wx+ax^2 就是1*2
        self.linear = torch.nn.Linear(1, 1)

    # 重载forward 函数,因为Linear.__call__() 会调用forward 函数
    def forward(self, x):
        # 使用__call__ 方法生成一个可调用的对象
        y_pred = self.linear(x)
        return y_pred


# 调用上面的__init__ 函数实例化一个可调用的对象
model = LinearModel()

# criterion 可以使用y_hat 和y 把计算损失
criterion = torch.nn.MSELoss(size_average=True)
# 优化器,做梯度下降
optimizer = torch.optim.SGD(model.parameters(), lr=0.00001)

# 训练次数1000 次
for epoch in range(1000):
    y_pred = model(x_data)
    # 算y_hat
    loss = criterion(y_pred, y_data)

    # 梯度归零,否则会叠加
    optimizer.zero_grad()
    # 反向传播
    loss.backward()
    # 权重更新
    optimizer.step()

print('w =', model.linear.weight.item())
print('b =', model.linear.bias.item())

# 测试
df = pd.read_csv('archive/test.csv')

x_data = torch.Tensor([df['x']])
y_data = torch.Tensor([df['y']])
x_data = x_data.view(-1, 1)
y_data = y_data.view(-1, 1)

criterion = torch.nn.MSELoss(size_average=True)
loss = torch.Tensor([0])

for epoch in range(len(df)):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
print('loss = ', loss.item())

输出:

w = 0.9982311725616455
b = 0.049427345395088196
loss = 9.469813346862793

多元线性回归代码如下:

import torch
import numpy as np

xy = np.loadtxt('Real estate.csv', delimiter=',', dtype=np.float32, skiprows=1)

x_data = torch.from_numpy(xy[:, 1:-1])
y_data = torch.from_numpy(xy[:, [-1]])


class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        self.linear = torch.nn.Linear(6, 1)

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred


model = LinearModel()
criterion = torch.nn.MSELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.0000001)

for epoch in range(1000):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

print('w =', model.linear.weight.data)
print('b =', model.linear.bias.item())

输出:
w = tensor([[ 0.0335, 0.1162, -0.0070, 0.2405, 0.2382, -0.2520]])
b = -0.29595470428466797

posted @ 2021-05-14 15:24  RabbitKeeper  阅读(488)  评论(0编辑  收藏  举报