TVM中的调度原语

TVM中的调度原语

TVM是一种用于高效内核构造的领域专用语言。             

本文将展示如何通过TVM提供的,各种原语调度计算。

from __future__ import absolute_import, print_function

 

import tvm

from tvm import te

import numpy as np

通常存在多种方法来计算相同的结果,不同的方法会导致不同的局部性和性能。TVM要求用户提供,如何执行称为Schedule的计算。             

调度是一组计算转换,转换程序中的计算循环。

# declare some variables for use later

n = te.var("n")

m = te.var("m")

可以从操作列表中创建调度,默认情况下,调度按行主要顺序,串行方式计算张量。

# declare a matrix element-wise multiply

A = te.placeholder((m, n), name="A")

B = te.placeholder((m, n), name="B")

C = te.compute((m, n), lambda i, j: A[i, j] * B[i, j], name="C")

 

s = te.create_schedule([C.op])

# lower will transform the computation from definition to the real

# callable function. With argument `simple_mode=True`, it will

# return you a readable C like statement, we use it here to print the

# schedule result.

print(tvm.lower(s, [A, B, C], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle, C_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type="auto"),

             C: Buffer(C_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_4: int32, stride_5: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B, C_1: C} {

  for (i: int32, 0, m) {

    for (j: int32, 0, n) {

      C_2[((i*stride_2) + (j*stride_3))] = ((float32*)A_2[((i*stride_4) + (j*stride_5))]*(float32*)B_2[((i*stride) + (j*stride_1))])

    }

  }

}

一个调度由多个阶段组成,一个阶段代表一个算子的进度。提供各种方法来分派每个阶段。             

split分裂              

split拆分”可以按因子factor,将指定的轴拆分为两个轴。

A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] * 2, name="B")
 
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B} {

  for (i.outer: int32, 0, floordiv((m + 31), 32)) {

    for (i.inner: int32, 0, 32) {

      if @tir.likely((((i.outer*32) + i.inner) < m), dtype=bool) {

        B_2[(((i.outer*32) + i.inner)*stride)] = ((float32*)A_2[(((i.outer*32) + i.inner)*stride_1)]*2f32)

      }

    }

  }

}

可以按nparts拆分轴,这将与factor相反拆分轴。

A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i], name="B")
 
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], nparts=32)
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B} {

  for (i.outer: int32, 0, 32) {

    for (i.inner: int32, 0, floordiv((m + 31), 32)) {

      if @tir.likely(((i.inner + (i.outer*floordiv((m + 31), 32))) < m), dtype=bool) {

        B_2[((i.inner + (i.outer*floordiv((m + 31), 32)))*stride)] = (float32*)A_2[((i.inner + (i.outer*floordiv((m + 31), 32)))*stride_1)]

      }

    }

  }

}

tile

tile help you execute the computation tile by tile over two axises.

A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
 
s = te.create_schedule(B.op)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B} {

  for (i.outer: int32, 0, floordiv((m + 9), 10)) {

    for (j.outer: int32, 0, floordiv((n + 4), 5)) {

      for (i.inner: int32, 0, 10) {

        if @tir.likely((((i.outer*10) + i.inner) < m), dtype=bool) {

          for (j.inner: int32, 0, 5) {

            if @tir.likely((((j.outer*5) + j.inner) < n), dtype=bool) {

              B_2[((((i.outer*10) + i.inner)*stride) + (((j.outer*5) + j.inner)*stride_1))] = (float32*)A_2[((((i.outer*10) + i.inner)*stride_2) + (((j.outer*5) + j.inner)*stride_3))]

            }

          }

        }

      }

    }

  }

}

fuse

fuse can fuse two consecutive axises of one computation.

A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
 
s = te.create_schedule(B.op)
# tile to four axises first: (i.outer, j.outer, i.inner, j.inner)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
# then fuse (i.inner, j.inner) into one axis: (i.inner.j.inner.fused)
fused = s[B].fuse(xi, yi)
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B} {

  for (i.outer: int32, 0, floordiv((m + 9), 10)) {

    for (j.outer: int32, 0, floordiv((n + 4), 5)) {

      for (i.inner.j.inner.fused: int32, 0, 50) {

        if @tir.likely((((i.outer*10) + floordiv(i.inner.j.inner.fused, 5)) < m), dtype=bool) {

          if @tir.likely((((j.outer*5) + floormod(i.inner.j.inner.fused, 5)) < n), dtype=bool) {

            B_2[((((i.outer*10) + floordiv(i.inner.j.inner.fused, 5))*stride) + (((j.outer*5) + floormod(i.inner.j.inner.fused, 5))*stride_1))] = (float32*)A_2[((((i.outer*10) + floordiv(i.inner.j.inner.fused, 5))*stride_2) + (((j.outer*5) + floormod(i.inner.j.inner.fused, 5))*stride_3))]

          }

        }

      }

    }

  }

}

reorder

reorder can reorder the axises in the specified order.

A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
 
s = te.create_schedule(B.op)
# tile to four axises first: (i.outer, j.outer, i.inner, j.inner)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
# then reorder the axises: (i.inner, j.outer, i.outer, j.inner)
s[B].reorder(xi, yo, xo, yi)
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32, n: int32], [stride: int32, stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m, n], [stride_2: int32, stride_3: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B} {

  for (i.inner: int32, 0, 10) {

    for (j.outer: int32, 0, floordiv((n + 4), 5)) {

      for (i.outer: int32, 0, floordiv((m + 9), 10)) {

        if @tir.likely((((i.outer*10) + i.inner) < m), dtype=bool) {

          for (j.inner: int32, 0, 5) {

            if @tir.likely((((j.outer*5) + j.inner) < n), dtype=bool) {

              B_2[((((i.outer*10) + i.inner)*stride) + (((j.outer*5) + j.inner)*stride_1))] = (float32*)A_2[((((i.outer*10) + i.inner)*stride_2) + (((j.outer*5) + j.inner)*stride_3))]

            }

          }

        }

      }

    }

  }

}

bind

bind can bind a specified axis with a thread axis, often used in gpu programming.

A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] * 2, name="B")
 
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
print(tvm.lower(s, [A, B], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle) -> ()
  attr = {"global_symbol": "main", "tir.noalias": True}
  buffers = {B: Buffer(B_2: Pointer(float32), float32, [n: int32], [stride: int32], type="auto"),
             A: Buffer(A_2: Pointer(float32), float32, [n], [stride_1: int32], type="auto")}
  buffer_map = {A_1: A, B_1: B} {
  attr [IterVar(blockIdx.x: int32, (nullptr), "ThreadIndex", "blockIdx.x")] "thread_extent" = floordiv((n + 63), 64);
  attr [IterVar(threadIdx.x: int32, (nullptr), "ThreadIndex", "threadIdx.x")] "thread_extent" = 64;
  if @tir.likely((((blockIdx.x*64) + threadIdx.x) < n), dtype=bool) {
    B_2[(((blockIdx.x*64) + threadIdx.x)*stride)] = ((float32*)A_2[(((blockIdx.x*64) + threadIdx.x)*stride_1)]*2f32)
  }
}

compute_at

For a schedule that consists of multiple operators, TVM will compute tensors at the root separately by default.

A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
C = te.compute((m,), lambda i: B[i] * 2, name="C")
 
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle, C_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {C: Buffer(C_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             B: Buffer(B_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_2: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B, C_1: C} {

  for (i: int32, 0, m) {

    B_2[(i*stride_1)] = ((float32*)A_2[(i*stride_2)] + 1f32)

  }

  for (i_1: int32, 0, m) {

    C_2[(i_1*stride)] = ((float32*)B_2[(i_1*stride_1)]*2f32)

  }

}

compute_at can move computation of B into the first axis of computation of C.

A = te.placeholder((m,), name="A")

B = te.compute((m,), lambda i: A[i] + 1, name="B")

C = te.compute((m,), lambda i: B[i] * 2, name="C")

 

s = te.create_schedule(C.op)

s[B].compute_at(s[C], C.op.axis[0])

print(tvm.lower(s, [A, B, C], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle, C_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             C: Buffer(C_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_2: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B, C_1: C} {

  for (i: int32, 0, m) {

    B_2[(i*stride)] = ((float32*)A_2[(i*stride_2)] + 1f32)

    C_2[(i*stride_1)] = ((float32*)B_2[(i*stride)]*2f32)

  }

}

compute_inline

compute_inline can mark one stage as inline, then the body of computation will be expanded and inserted at the address where the tensor is required.

A = te.placeholder((m,), name="A")

B = te.compute((m,), lambda i: A[i] + 1, name="B")

C = te.compute((m,), lambda i: B[i] * 2, name="C")

 

s = te.create_schedule(C.op)

s[B].compute_inline()

print(tvm.lower(s, [A, B, C], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle, C_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {C: Buffer(C_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             B: Buffer(B_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_2: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B, C_1: C} {

  for (i: int32, 0, m) {

    C_2[(i*stride)] = (((float32*)A_2[(i*stride_2)] + 1f32)*2f32)

  }

}

compute_root

compute_root can move computation of one stage to the root.

A = te.placeholder((m,), name="A")

B = te.compute((m,), lambda i: A[i] + 1, name="B")

C = te.compute((m,), lambda i: B[i] * 2, name="C")

 

s = te.create_schedule(C.op)

s[B].compute_at(s[C], C.op.axis[0])

s[B].compute_root()

print(tvm.lower(s, [A, B, C], simple_mode=True))

Out:

primfn(A_1: handle, B_1: handle, C_1: handle) -> ()

  attr = {"global_symbol": "main", "tir.noalias": True}

  buffers = {B: Buffer(B_2: Pointer(float32), float32, [m: int32], [stride: int32], type="auto"),

             C: Buffer(C_2: Pointer(float32), float32, [m], [stride_1: int32], type="auto"),

             A: Buffer(A_2: Pointer(float32), float32, [m], [stride_2: int32], type="auto")}

  buffer_map = {A_1: A, B_1: B, C_1: C} {

  for (i: int32, 0, m) {

    B_2[(i*stride)] = ((float32*)A_2[(i*stride_2)] + 1f32)

  }

  for (i_1: int32, 0, m) {

    C_2[(i_1*stride_1)] = ((float32*)B_2[(i_1*stride)]*2f32)

  }

}

Summary
本文介绍tvm中的调度原语,它允许用户轻松灵活地调度计算。
为了获得性能良好的内核实现,一般的工作流程通常是:
通过一系列的运算来描述计算。
尝试用基元来调度计算。
编译运行,查看性能差异。
根据运行结果调整日程调度。
下载Python源代码:schedule_primitives.py
下载Jupyter笔记本:schedule_primitives.ipynb

posted @ 2020-12-14 10:12  吴建明wujianming  阅读(473)  评论(0编辑  收藏  举报