方法一
网络模型、数据(输入、标注)以及损失函数.cuda()
点击查看代码
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter
from model import *
from torch.utils.data import DataLoader
train_data = torchvision.datasets.CIFAR10("./dataset1", train=True, download=True,
transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10("./dataset1", train=False, download=True,
transform=torchvision.transforms.ToTensor())
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练集长度:{}".format(train_data_size))
print("测试集长度:{}".format(test_data_size))
train_data_loader = DataLoader(train_data, batch_size=64)
test_data_loader = DataLoader(test_data, batch_size=64)
class Test(nn.Module):
def __init__(self):
super().__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, 1, 2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
test = Test()
if torch.cuda.is_available():
test = test.cuda()
loss_fn = nn.CrossEntropyLoss()
if torch.cuda.is_available():
loss_fn = loss_fn.cuda()
learning_rate = 1e-2
optimizer = torch.optim.SGD(test.parameters(), lr=learning_rate)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter("./log_train")
for i in range(epoch):
print("----------第{}轮训练开始----------".format(i+1))
for data in train_data_loader:
imgs, targets = data
if torch.cuda.is_available():
imgs = imgs.cuda()
targets = targets.cuda()
output = test(imgs)
loss = loss_fn(output, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
print("训练次数: {},loss: {}".format(total_train_step, loss.item()))
writer.add_scalar("train_loss", loss.item(), total_train_step)
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_data_loader:
imgs, targets = data
if torch.cuda.is_available():
imgs = imgs.cuda()
targets = targets.cuda()
output = test(imgs)
loss = loss_fn(output, targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (output.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集的loss: {}".format(total_test_loss))
print("整体测试集的正确率: {}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss", total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
total_test_step = total_test_step + 1
torch.save(test, "test_{}.pth".format(i))
print("模型已保存")
writer.close()
方法二
device = torch.device("cuda")
网络模型、数据(输入、标注)以及损失函数.to(device)
点击查看代码
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from model import *
device = torch.device("cuda")
train_data = torchvision.datasets.CIFAR10("./dataset1", train=True, download=True,
transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10("./dataset1", train=False, download=True,
transform=torchvision.transforms.ToTensor())
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练集长度:{}".format(train_data_size))
print("测试集长度:{}".format(test_data_size))
train_data_loader = DataLoader(train_data, batch_size=64)
test_data_loader = DataLoader(test_data, batch_size=64)
class Test(nn.Module):
def __init__(self):
super().__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, 1, 2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
test = Test()
test = test.to(device)
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
learning_rate = 1e-2
optimizer = torch.optim.SGD(test.parameters(), lr=learning_rate)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter("./log_train")
for i in range(epoch):
print("----------第{}轮训练开始----------".format(i+1))
for data in train_data_loader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
output = test(imgs)
loss = loss_fn(output, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
print("训练次数: {},loss: {}".format(total_train_step, loss.item()))
writer.add_scalar("train_loss", loss.item(), total_train_step)
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_data_loader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
output = test(imgs)
loss = loss_fn(output, targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (output.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集的loss: {}".format(total_test_loss))
print("整体测试集的正确率: {}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss", total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
total_test_step = total_test_step + 1
torch.save(test, "test_{}.pth".format(i))
print("模型已保存")
writer.close()
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 地球OL攻略 —— 某应届生求职总结
· 周边上新:园子的第一款马克杯温暖上架
· Open-Sora 2.0 重磅开源!
· 提示词工程——AI应用必不可少的技术
· .NET周刊【3月第1期 2025-03-02】