Two TensorFlow program structure template drafts

Simple one

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time    : 2018/8/3 10:47
# @File    : tensorflow_template.py
# @Author  : yusisc (yusisc@gmail.com)

import numpy as np
import tensorflow as tf


# model
def model(xin):
    x = xin
    x = tf.layers.dense(x, 32)
    yout = x
    return yout


# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000

# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])

# loss
loss = tf.nn.softmax_cross_entropy_with_logits(
    labels=yy,
    logits=model(xx)
)

# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()

# writer
writer = tf.summary.FileWriter(logdir='logs')

with tf.Session() as sess:
    sess.run(init_op)
    writer.add_graph(graph=sess.graph)
    writer.flush()

    data = np.random.rand(100, 11)

    iter_let_N = int(data.shape[0] / batch_size)

    for epoch_idx in range(epoch_N):
        for iter_let_idx in range(iter_let_N):
            batch_x, batch_y = \
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
            _, loss_val = sess.run([train_op, loss],
                                   feed_dict={xx: batch_x,
                                              yy: batch_y})
            print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')

"""
1. `tf.placeholder` for store epoch_idx.
2. `tf.placeholder` for store `dropout_prob`?
3. Built-in epoch_idx in TensorFlow?
"""

Make use of multiple GPUs

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time    : 2018/8/3 11:57
# @File    : tensorflow_template_multiGPUs.py
# @Author  : yusisc (yusisc@gmail.com)

import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib


def count_available_gpus():
    local_devices = device_lib.list_local_devices()
    gpu_names = [x.name for x in local_devices if x.device_type == 'GPU']
    gpu_N = len(gpu_names)
    print(f'{gpu_N} GPUs are detected : {gpu_names}')
    return gpu_N

# model
def model(xin, keep_prob=1.0, reuse=False):
    x = xin
    with tf.variable_scope('dense1', reuse=reuse):
        x = tf.layers.dense(x, 32, reuse=reuse)

    yout = x
    return yout


# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000
gpu_N = count_available_gpus()

# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])

# loss with multiply GPUs
losses = []
xx_split = tf.split(xx, gpu_N)
yy_split = tf.split(yy, gpu_N)
for gpu_id in range(gpu_N):
    with tf.device(tf.DeviceSpec(device_type='GPU', device_index=gpu_id)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=(gpu_id > 0)):
            loss_temp = tf.nn.softmax_cross_entropy_with_logits(
                labels=yy_split[gpu_id],
                logits=model(xx_split[gpu_id], reuse=gpu_id > 0)
            )
            losses.append(loss_temp)
loss = tf.reduce_mean(tf.concat(losses, axis=0))

# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()

# writer
writer = tf.summary.FileWriter(logdir='logs')

with tf.Session() as sess:
    sess.run(init_op)
    writer.add_graph(graph=sess.graph)
    writer.flush()

    data = np.random.rand(100, 11)

    iter_let_N = int(data.shape[0] / batch_size)

    for epoch_idx in range(epoch_N):
        for iter_let_idx in range(iter_let_N):
            batch_x, batch_y = \
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
            _, loss_val = sess.run([train_op, loss],
                                   feed_dict={xx: batch_x,
                                              yy: batch_y})
            print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')

ref:

TensorFlow-Multi-GPUs/many-GPUs-MNIST.py at master · golbin/TensorFlow-Multi-GPUs
https://github.com/golbin/TensorFlow-Multi-GPUs/blob/master/many-GPUs-MNIST.py

posted on 2018-08-03 12:19  yusisc  阅读(26)  评论(0编辑  收藏  举报

导航