测试gpu_矩阵计算tensorflow2|pytorch

tensorflow

import tensorflow as tf
import timeit

physical_gpus = tf.config.list_physical_devices("GPU")  # 获得本地GPU列表
physical_cpus = tf.config.list_physical_devices("CPU")  # 获得本地CPU列表
print(f"GPU:{physical_gpus}")
print(f"CPU:{physical_cpus}")
print("GPU个数:", len(physical_gpus))
print("CPU个数:", len(physical_cpus))
tf.config.set_visible_devices(devices=physical_gpus[0], device_type="GPU")  # 设置使用第一块GPU

tf.config.list_physical_devices('GPU')
tf.test.is_gpu_available()

def cpu_gpu_compare(n):
    with tf.device('/gpu:0'):
        gpu_a = tf.random.normal([10,n])
        gpu_b = tf.random.normal([n,10])
    print(gpu_a.device,gpu_b.device)
    def gpu_run():
        with tf.device('/gpu:0'):              ##矩阵乘法,此操作采用gpu计算
            c = tf.matmul(gpu_a,gpu_b)
        return c
    ##第一次计算需要热身,避免将初始化时间计算在内
    gpu_time = timeit.timeit(gpu_run,number=10)
    print('warmup:',gpu_time)
    ##正式计算10次,取平均值
    gpu_time = timeit.timeit(gpu_run,number=10)
    print('run_time:',gpu_time)
    return cpu_time,gpu_time
n_list1 = range(1,2000,5)
n_list2 = range(2001,10000,100)
n_list = list(n_list1)+list(n_list2)
for n in n_list:
    cpu_gpu_compare(n)

pytorch

import torch
import time

torch.cuda.device_count()
torch.cuda.is_available()

 in_row,in_f,out_f = 2,2,3
device = 'cuda:0'
s = time.time()
A = torch.randn(in_row,in_f).to(device)
linear= torch.nn.Linear(in_f,out_f).to(device)
 for _ in range(10000):
     linear(A)
 torch.cuda.synchronize()
 print('CUDA take time:',time.time()-s)
posted @ 2023-07-12 23:45  ho_ho  阅读(112)  评论(0编辑  收藏  举报