pytorch 使用示例

记录通过pytorch编写cnn 模型示例,包括训练、模型、预测全流程代码结构,数据采集公共调制方式识别数据集,编写代码简单,以便进行pytorch学习。

train.py

import os
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from multi_scale_module import GoogLeNet
from center_loss import center_loss


# Torch的核心是流行的神经网络和简单易用的优化库
# 使用Torch能在实现复杂的神经网络拓扑结构的时候保持最大的灵活性
# 同时可以使用并行的方式对CPU和GPU进行更有效率的操作。

# tqdm 显示进度条

def main():
    # 检测GPU是否可用
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    # data_root = r'/home/wangchao/location/core/model/multi_scale_2/'
    data_root = r'/home/wc/'

    # 载入训练集
    train_dataset = np.load(os.path.join(data_root, 'train.npy'))

    labels = np.load(os.path.join(data_root, 'train_label.npy'))

    # 训练集划分
    x_train, x_test, y_train, y_test = train_test_split(train_dataset, labels, test_size=0.1, random_state=0)

    # 数据格式转换  train 训练集   val 测试集
    train_labels = []
    for i in y_train:
        train_labels.append(int(i[0]))

    train_set = []
    for i in x_train:
        train_set.append(i)

    val_labels = []
    for j in y_test:
        val_labels.append(j[0])

    val_set = []
    for j in x_test:
        val_set.append(j)

    # 设置每个batch大小
    batch_size = 128
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    # 创建x、y的张量  训练集、测试集
    x = torch.tensor(np.array(train_set))
    x = torch.tensor(x).type(torch.float)
    y = torch.tensor(np.array(train_labels))
    y = torch.tensor(y).type(torch.long)

    train_dataset = torch.utils.data.TensorDataset(x, y)

    x_val1 = torch.tensor(np.array(val_set))
    x_val1 = torch.tensor(x_val1).type(torch.float)
    y_val1 = torch.tensor(np.array(val_labels))
    y_val1 = torch.tensor(y_val1).type(torch.long)

    val_dataset = torch.utils.data.TensorDataset(x_val1, y_val1)

    # torch 数据载入
    train_num = len(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw, drop_last=True)

    val_num = len(val_dataset)
    validate_loader = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw, drop_last=True)

    print("using {} images for training, {} images for validation.".format(train_num,
                                                                           val_num))

    # 神经网络框架搭建
    # net
    net = GoogLeNet(num_classes=11, aux_logits=True, init_weights=True)
    net.to(device)
    # 损失函数
    loss_function = nn.CrossEntropyLoss()
    # 优化器
    optimizer = optim.SGD(net.parameters(), lr=0.003, momentum=0.9)

    epochs = 500  # 迭代次数
    best_acc = 0.0  # 精度

    # 网络结构保存路径
    save_path = './multiScaleNet.pth'

    train_steps = len(train_loader)
    for epoch in range(epochs):
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            images = images.reshape(128, 1024, 2, 1)

            optimizer.zero_grad()

            logits, aux_logits = net(images.to(device))
            aux_logits = torch.squeeze(aux_logits)
            # 计算损失函数
            loss0 = loss_function(logits, labels.to(device))
            loss_center = center_loss(aux_logits, labels.to(device), 0.5)
            loss = loss0 + loss_center * 0.5

            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)

        # validate
        net.eval()
        acc = 0.0
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                val_images = val_images.reshape(128, 1024, 2, 1)
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')


if __name__ == '__main__':
    main()

 

predict.py

import os
import json
import numpy as np
import torch
from tqdm import tqdm

from multi_scale_module import GoogLeNet


def main(validate_loader):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = GoogLeNet(num_classes=11, aux_logits=False).to(device)

    # load model weights
    weights_path = r"E:\python\modulation_identification\core\model\multi_scale_2\multiScaleNet.pth"
    assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)

    model.eval()
    acc = 0.0
    with torch.no_grad():
        # predict class
        val_bar = tqdm(validate_loader)
        for val_data in val_bar:
            val_images, val_labels = val_data
            val_images = val_images.reshape(32, 1024, 2, 1)
            outputs = torch.squeeze(model(val_images.to(device))).cpu()
            predicts = torch.max(outputs, dim=1)[1]
            acc += torch.eq(predicts, val_labels.to(device)).sum().item()

    val_accurate = acc / val_num
    print('val_accuracy: %.3f' % (val_accurate))

if __name__ == '__main__':
    data_root = r'E:\python\modulation_identification\data'
    test_dataset = np.load(os.path.join(data_root, 'test1.npy'))
    labels = np.load(os.path.join(data_root, 'test1_label.npy'))

    test_labels = []
    for i in labels:
        test_labels.append(int(i[0]))

    test_labels = torch.tensor(np.array(test_labels))

    test_set = []
    for i in test_dataset:
        test_set.append(i)

    test_set = torch.tensor(test_set).type(torch.float)

    dataset = torch.utils.data.TensorDataset(test_set, test_labels)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    val_num = len(dataset)
    validate_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw, drop_last=True)

    main(validate_loader)

 

model.py

import torch.nn as nn
import torch
import torch.nn.functional as F


class GoogLeNet(nn.Module):
    def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
        super(GoogLeNet, self).__init__()
        self.aux_logits = aux_logits

        self.conv4 = BasicConv2d(1024, 512, kernel_size=(3, 1), stride=2, padding=(1, 0))
        self.inception3a = Inception(512, 256, 256, 128, 128, 64, 64, 32)
        self.conv5 = BasicConv2d(480, 256, kernel_size=(3, 1), stride=2, padding=(1, 0))
        self.inception3b = Inception(256, 64, 128, 32, 64, 32, 32, 16)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc1 = nn.Linear(144, 72)
        self.fc2 = nn.Linear(72, num_classes)

        if init_weights:
            self._initialize_weights()

    def forward(self, x):

        x = self.conv4(x)
        x = self.inception3a(x)
        x = self.conv5(x)
        x = self.inception3b(x)

        x = self.avgpool(x)
        # 按列进行拼接
        x = torch.flatten(x, 1)
        x = F.dropout(x, 0.5, training=self.training)
        x1 = self.fc1(x)
        # x = F.dropout(x1, 0.5, training=self.training)
        x = self.fc2(x1)
        if self.training:
            return x, x1
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)


class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, ch7x7red, ch7x7):
        super(Inception, self).__init__()

        self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)

        self.branch2 = nn.Sequential(
            BasicConv2d(in_channels, ch3x3red, kernel_size=1),
            BasicConv2d(ch3x3red, ch3x3, kernel_size=(3, 1), padding=(1, 0))  # 保证输出大小等于输入大小(输出特征矩阵的高和宽等于输入特征矩阵的高和宽)
        )

        self.branch3 = nn.Sequential(
            BasicConv2d(in_channels, ch5x5red, kernel_size=1),
            BasicConv2d(ch5x5red, ch5x5, kernel_size=(5, 1), padding=(2, 0))  # 保证输出大小等于输入大小
        )

        self.branch4 = nn.Sequential(
            BasicConv2d(in_channels, ch7x7red, kernel_size=1),
            BasicConv2d(ch7x7red, ch7x7, kernel_size=(7, 1), padding=(3, 0))
        )

    def _forward(self, x):
        branch1 = self.branch1(x)
        branch2 = self.branch2(x)
        branch3 = self.branch3(x)
        branch4 = self.branch4(x)

        outputs = [branch1, branch2, branch3, branch4]
        return outputs

    def forward(self, x):
        outputs = self._forward(x)
        return torch.cat(outputs, 1)


# 辅助分类器
class InceptionAux(nn.Module):

    def __init__(self, in_channels, num_classes):
        super(InceptionAux, self).__init__()
        self.averagePool = nn.AvgPool2d(kernel_size=1, stride=1)
        self.conv = BasicConv2d(in_channels, 34, kernel_size=1)
        self.fc = nn.Linear(70176, num_classes)

    def forward(self, x):
        x = self.averagePool(x)
        # N x 128 x 4 x 4
        x = torch.flatten(x, 1)
        x = F.dropout(x, 0.5, training=self.training)
        # N x 2048
        x = F.relu(self.fc(x), inplace=True)
        return x


# 基础卷积层
class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
        self.relu = nn.ReLU(inplace=True)
        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.conv(x)
        x = self.relu(x)
        x = self.bn(x)
        return x

 

posted @ 2023-06-14 16:47  wangssd  阅读(248)  评论(0编辑  收藏  举报