VGG神经网络与PyTorch神经网络初试
//vgg神经网络
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | Created on Mon May 9 13 : 12 : 54 2022 @author : 12234 """ import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, regularizers import numpy as np import os import cv2 import matplotlib.pyplot as plt os.environ[ "CUDA_VISIBLE_DEVICES" ] = "1" resize = 224 path = "D:\\MyPython\\MyExp\\ExpWork\\train" def load_data(): imgs = os.listdir(path) num = len (imgs) train_data = np.empty(( 100 , resize, resize, 3 ), dtype = "int32" ) train_label = np.empty(( 100 , ), dtype = "int32" ) test_data = np.empty(( 100 , resize, resize, 3 ), dtype = "int32" ) test_label = np.empty(( 100 , ), dtype = "int32" ) for i in range ( 100 ): if i % 2 : train_data[i] = cv2.resize(cv2.imread(path + '/' + 'dog.' + str (i) + '.jpg' ), (resize, resize)) train_label[i] = 1 else : train_data[i] = cv2.resize(cv2.imread(path + '/' + 'cat.' + str (i) + '.jpg' ), (resize, resize)) train_label[i] = 0 for i in range ( 100 , 200 ): if i % 2 : test_data[i - 100 ] = cv2.resize(cv2.imread(path + '/' + 'dog.' + str (i) + '.jpg' ), (resize, resize)) test_label[i - 100 ] = 1 else : test_data[i - 100 ] = cv2.resize(cv2.imread(path + '/' + 'cat.' + str (i) + '.jpg' ), (resize, resize)) test_label[i - 100 ] = 0 return train_data, train_label, test_data, test_label def vgg16(): weight_decay = 0.0005 nb_epoch = 100 batch_size = 32 # layer1 model = keras.Sequential() model.add(layers.Conv2D( 64 , ( 3 , 3 ), padding = 'same' , input_shape = ( 224 , 224 , 3 ), kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.3 )) # layer2 model.add(layers.Conv2D( 64 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size = ( 2 , 2 ))) # layer3 model.add(layers.Conv2D( 128 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer4 model.add(layers.Conv2D( 128 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size = ( 2 , 2 ))) # layer5 model.add(layers.Conv2D( 256 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer6 model.add(layers.Conv2D( 256 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer7 model.add(layers.Conv2D( 256 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size = ( 2 , 2 ))) # layer8 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer9 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer10 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size = ( 2 , 2 ))) # layer11 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer12 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.Dropout( 0.4 )) # layer13 model.add(layers.Conv2D( 512 , ( 3 , 3 ), padding = 'same' , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(pool_size = ( 2 , 2 ))) model.add(layers.Dropout( 0.5 )) # layer14 model.add(layers.Flatten()) model.add(layers.Dense( 512 , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) # layer15 model.add(layers.Dense( 512 , kernel_regularizer = regularizers.l2(weight_decay))) model.add(layers.Activation( 'relu' )) model.add(layers.BatchNormalization()) # layer16 model.add(layers.Dropout( 0.5 )) model.add(layers.Dense( 2 )) model.add(layers.Activation( 'softmax' )) return model #if __name__ == '__main__': train_data, train_label, test_data, test_label = load_data() train_data = train_data.astype( 'float32' ) test_data = test_data.astype( 'float32' ) train_label = keras.utils.to_categorical(train_label, 2 ) test_label = keras.utils.to_categorical(test_label, 2 ) #定义训练方法,超参数设置 model = vgg16() sgd = tf.keras.optimizers.SGD(lr = 0.01 , decay = 1e - 6 , momentum = 0.9 , nesterov = True ) #设置优化器为SGD model. compile (loss = 'categorical_crossentropy' , optimizer = sgd, metrics = [ 'accuracy' ]) history = model.fit(train_data, train_label, batch_size = 20 , epochs = 10 , validation_split = 0.2 , #把训练集中的五分之一作为验证集 shuffle = True ) scores = model.evaluate(test_data,test_label,verbose = 1 ) print (scores) model.save( 'model/vgg16dogcat.h5' ) acc = history.history[ 'accuracy' ] # 获取训练集准确性数据 val_acc = history.history[ 'val_accuracy' ] # 获取验证集准确性数据 loss = history.history[ 'loss' ] # 获取训练集错误值数据 val_loss = history.history[ 'val_loss' ] # 获取验证集错误值数据 epochs = range ( 1 , len (acc) + 1 ) plt.plot(epochs, acc, 'bo' , label = 'Trainning acc' ) # 以epochs为横坐标,以训练集准确性为纵坐标 plt.plot(epochs, val_acc, 'b' , label = 'Vaildation acc' ) # 以epochs为横坐标,以验证集准确性为纵坐标 plt.legend() # 绘制图例,即标明图中的线段代表何种含义 plt.show() |
//PyTorch神经网络
Created on Sun May 15 20:30:36 2022 @author: 12234 """ import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel, 6 output channels, 5x5 square convolution # kernel self.conv1 = nn.Conv2d(1, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() print(net)
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· C#/.NET/.NET Core优秀项目和框架2025年2月简报
· 葡萄城 AI 搜索升级:DeepSeek 加持,客户体验更智能
· 什么是nginx的强缓存和协商缓存
· 一文读懂知识蒸馏