GPU 编程实例
GPU是多核技术的代表之一,在一块芯片上集成多个较低功耗的核心,单个核心频率基本不变,一般在1~3GHz,设计重心转向到多核的集成技术,GPU是一种特殊的多核处理器。本文在联想深腾7000G GPU集群上进行实验,该集群有100个节点,每个节点包含两个4核CPU(Intel XEON),16GB内存,其中16个节点配置一块GPU卡,18个节点配置两块GPU卡。
编译GPU程序:nvcc –o vectorAdd vectorAdd.cu
运行:
为了方便,写了简单的shell脚本,具体内容如下:
[cpp] view plain copy
- if [ -f $@.log ]; then
- rm $@.log
- fi
- if [ -f $@.err ]; then
- rm $@.err
- fi
- bsub -q c2050 -o $@.log -e $@.err ./$@
示例:
1. 向量加法
[cpp] view plain copy
- #include<stdio.h>
- #define N 200000
- #define M 500
- __global__ void kernelvectorAdd(int *dev_a,int *dev_b,int *dev_c)
- {
- int tid=blockIdx.x*blockDim.x+threadIdx.x;
- if(tid<N)
- {
- dev_c[tid]=dev_a[tid]+dev_b[tid];
- }
- }
- int main(void)
- {
- int a[N],b[N],c[N];
- int *dev_a,*dev_b,*dev_c;
- cudaMalloc((void**)&dev_a,N*sizeof(int));
- cudaMalloc((void**)&dev_b,N*sizeof(int));
- cudaMalloc((void**)&dev_c,N*sizeof(int));
- for(int i=0;i<N;i++)
- {
- a[i]=i+1;
- b[i]=i+1;
- }
- cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
- cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice);
- kernelvectorAdd<<<(N+M-1)/M,M>>>(dev_a,dev_b,dev_c);
- cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
- cudaFree(dev_a);
- cudaFree(dev_b);
- cudaFree(dev_c);
- for(int i=0;i<N;i++)
- {
- printf("a[%d] is %d, b[%d] is %d, c[%d] is %d\n",i,a[i],i,b[i],i,c[i]);
- }
- }
比较简单,看程序就能看明白。
2. 矩阵乘法
[cpp] view plain copy
- #include<stdio.h>
- #include <malloc.h>
- #include <stdlib.h>
- #define N 1000
- void MatrixMul(int *A, int *B, int *C, int Width) {
- int i, j, k;
- for(i=0; i<Width; i++)
- for(j=0; j<Width; j++){
- int s=0;
- for(k=0; k<Width; k++)
- s+=A[i*Width+k]*B[k*Width+j];
- C[i*Width+j]=s;
- }
- }
- #define TILE_WIDTH 16
- __global__ void KernelMatrixMul(int* Md, int* Nd, int* Pd, int Width)
- {
- int x = threadIdx.x+blockIdx.x*blockDim.x;
- int y = threadIdx.y+blockIdx.y*blockDim.y;
- int Pvalue = 0;
- for (int k = 0; k < Width; ++k)
- Pvalue+=Md[y * Width + k]*Nd[k * Width + x];
- Pd[y*Width + x] = Pvalue;
- }
- int main(){
- int *A=(int*)malloc(N*N*sizeof(int));
- int *B=(int*)malloc(N*N*sizeof(int));
- int *C=(int*)malloc(N*N*sizeof(int));
- int i;
- for(i=0;i<N*N;i++){
- A[i] = 1;
- B[i] = 2;
- }
- //MatrixMul(A,B,C,N);
- int *dev_A,*dev_B,*dev_C;
- dim3 dimGrid(N/TILE_WIDTH,N/TILE_WIDTH);
- dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
- cudaMalloc((void**)&dev_A,N*N*sizeof(int));
- cudaMalloc((void**)&dev_B,N*N*sizeof(int));
- cudaMalloc((void**)&dev_C,N*N*sizeof(int));
- cudaMemcpy(dev_A,A,N*N*sizeof(int),cudaMemcpyHostToDevice);
- cudaMemcpy(dev_B,B,N*N*sizeof(int),cudaMemcpyHostToDevice);
- KernelMatrixMul<<<dimGrid,dimBlock>>>(dev_A,dev_B,dev_C,N);
- cudaThreadSynchronize();
- cudaMemcpy(C,dev_C,N*N*sizeof(int),cudaMemcpyDeviceToHost);
- cudaFree(dev_A);
- cudaFree(dev_B);
- cudaFree(dev_C);
- int m,n;
- for(m=0;m<N;m++){
- for(n=0;n<N;n++)
- printf("C[%d][%d] = %d\n",m,n,C[m*N+n]);
- }
- return 0;
- }
3.实验结果:
最终的输出结果会保存在 *.log下,如果执行过程中出错,则错误信息保存在 *.err中,下面是结果截图: