简单粗暴的tensorflow-CNN
01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | # CNN模型定义 class CNN(tf.keras.Model): def __init__( self ): super ().__init__() self .conv1 = tf.keras.layers.Conv2D( #卷积层定义 filters = 32 , # 卷积层神经元(卷积核)数目 kernel_size = [ 5 , 5 ], # 感受野大小 padding = 'same' , # padding策略(vaild 或 same) activation = tf.nn.relu # 激活函数 ) self .pool1 = tf.keras.layers.MaxPool2D(pool_size = [ 2 , 2 ], strides = 2 ) #池化层定义 self .conv2 = tf.keras.layers.Conv2D( filters = 64 , kernel_size = [ 5 , 5 ], padding = 'same' , activation = tf.nn.relu ) self .pool2 = tf.keras.layers.MaxPool2D(pool_size = [ 2 , 2 ], strides = 2 ) self .flatten = tf.keras.layers.Reshape(target_shape = ( 7 * 7 * 64 ,)) self .dense1 = tf.keras.layers.Dense(units = 1024 , activation = tf.nn.relu) self .dense2 = tf.keras.layers.Dense(units = 10 ) def call( self , inputs): x = self .conv1(inputs) # [batch_size, 28, 28, 32] x = self .pool1(x) # [batch_size, 14, 14, 32] x = self .conv2(x) # [batch_size, 14, 14, 64] x = self .pool2(x) # [batch_size, 7, 7, 64] x = self .flatten(x) # [batch_size, 7 * 7 * 64] x = self .dense1(x) # [batch_size, 1024] x = self .dense2(x) # [batch_size, 10] output = tf.nn.softmax(x) #输出最大概率 return output |
01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 | #典型神经网络VGG16 、 VGG19 、 ResNet 、 MobileNet import tensorflow as tf import tensorflow_datasets as tfds num_epoch = 5 batch_size = 50 learning_rate = 0.001 dataset = tfds.load( "tf_flowers" , split = tfds.Split.TRAIN, as_supervised = True ) dataset = dataset. map ( lambda img, label: (tf.image.resize(img, ( 224 , 224 )) / 255.0 , label)).shuffle( 1024 ).batch(batch_size) model = tf.keras.applications.MobileNetV2(weights = None , classes = 5 ) optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate) for e in range (num_epoch): for images, labels in dataset: with tf.GradientTape() as tape: labels_pred = model(images, training = True ) loss = tf.keras.losses.sparse_categorical_crossentropy(y_true = labels, y_pred = labels_pred) loss = tf.reduce_mean(loss) print ( "loss %f" % loss.numpy()) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(grads_and_vars = zip (grads, model.trainable_variables)) print (labels_pred) |
天道酬勤 循序渐进 技压群雄
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· Docker 太简单,K8s 太复杂?w7panel 让容器管理更轻松!