Two TensorFlow program structure template drafts

Simple one

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time : 2018/8/3 10:47
# @File : tensorflow_template.py
# @Author : yusisc (yusisc@gmail.com)
import numpy as np
import tensorflow as tf
# model
def model(xin):
x = xin
x = tf.layers.dense(x, 32)
yout = x
return yout
# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000
# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])
# loss
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=yy,
logits=model(xx)
)
# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()
# writer
writer = tf.summary.FileWriter(logdir='logs')
with tf.Session() as sess:
sess.run(init_op)
writer.add_graph(graph=sess.graph)
writer.flush()
data = np.random.rand(100, 11)
iter_let_N = int(data.shape[0] / batch_size)
for epoch_idx in range(epoch_N):
for iter_let_idx in range(iter_let_N):
batch_x, batch_y = \
data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
_, loss_val = sess.run([train_op, loss],
feed_dict={xx: batch_x,
yy: batch_y})
print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')
"""
1. `tf.placeholder` for store epoch_idx.
2. `tf.placeholder` for store `dropout_prob`?
3. Built-in epoch_idx in TensorFlow?
"""

Make use of multiple GPUs

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time : 2018/8/3 11:57
# @File : tensorflow_template_multiGPUs.py
# @Author : yusisc (yusisc@gmail.com)
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
def count_available_gpus():
local_devices = device_lib.list_local_devices()
gpu_names = [x.name for x in local_devices if x.device_type == 'GPU']
gpu_N = len(gpu_names)
print(f'{gpu_N} GPUs are detected : {gpu_names}')
return gpu_N
# model
def model(xin, keep_prob=1.0, reuse=False):
x = xin
with tf.variable_scope('dense1', reuse=reuse):
x = tf.layers.dense(x, 32, reuse=reuse)
yout = x
return yout
# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000
gpu_N = count_available_gpus()
# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])
# loss with multiply GPUs
losses = []
xx_split = tf.split(xx, gpu_N)
yy_split = tf.split(yy, gpu_N)
for gpu_id in range(gpu_N):
with tf.device(tf.DeviceSpec(device_type='GPU', device_index=gpu_id)):
with tf.variable_scope(tf.get_variable_scope(), reuse=(gpu_id > 0)):
loss_temp = tf.nn.softmax_cross_entropy_with_logits(
labels=yy_split[gpu_id],
logits=model(xx_split[gpu_id], reuse=gpu_id > 0)
)
losses.append(loss_temp)
loss = tf.reduce_mean(tf.concat(losses, axis=0))
# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()
# writer
writer = tf.summary.FileWriter(logdir='logs')
with tf.Session() as sess:
sess.run(init_op)
writer.add_graph(graph=sess.graph)
writer.flush()
data = np.random.rand(100, 11)
iter_let_N = int(data.shape[0] / batch_size)
for epoch_idx in range(epoch_N):
for iter_let_idx in range(iter_let_N):
batch_x, batch_y = \
data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
_, loss_val = sess.run([train_op, loss],
feed_dict={xx: batch_x,
yy: batch_y})
print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')

ref:

TensorFlow-Multi-GPUs/many-GPUs-MNIST.py at master · golbin/TensorFlow-Multi-GPUs
https://github.com/golbin/TensorFlow-Multi-GPUs/blob/master/many-GPUs-MNIST.py

posted on   yusisc  阅读(26)  评论(0编辑  收藏  举报

编辑推荐:
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
阅读排行:
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· AI技术革命,工作效率10个最佳AI工具

导航

< 2025年3月 >
23 24 25 26 27 28 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
30 31 1 2 3 4 5
点击右上角即可分享
微信分享提示