mxnet系列之-mshadow

mshadow是一个模板库,支持CPU和GPU

目录结构为

├── CHANGES.md
├── cmake
│   ├── Cuda.cmake
│   ├── mshadow.cmake
│   └── Utils.cmake
├── doc
│   ├── Doxyfile
│   ├── mkdoc.sh
│   └── README.md
├── guide
│   ├── basic.cpp
│   ├── basic_stream.cu
│   ├── config.mk
│   ├── defop.cpp
│   ├── exp-template
│   │   ├── exp_lazy.cpp
│   │   ├── exp_template.cpp
│   │   ├── exp_template_op.cpp
│   │   ├── Makefile
│   │   └── README.md
│   ├── Makefile
│   ├── mshadow-ps
│   │   ├── 2-levels.png
│   │   ├── config.mk
│   │   ├── dbstr.h
│   │   ├── dist_async_sum.cpp
│   │   ├── dist_async_sum-inl.h
│   │   ├── local.sh
│   │   ├── local_sum.cpp
│   │   ├── local_sum.cu
│   │   ├── local_sum-inl.h
│   │   ├── Makefile
│   │   └── README.md
│   ├── neuralnet
│   │   ├── config.mk
│   │   ├── convnet.cu
│   │   ├── Makefile
│   │   ├── nnet.cu
│   │   ├── nnet_ps.cu
│   │   ├── README.md
│   │   └── util.h
│   └── README.md
├── LICENSE
├── make
│   ├── mshadow.mk
│   └── README.md
├── mshadow
│   ├── base.h
│   ├── cuda
│   │   ├── reduce.cuh
│   │   └── tensor_gpu-inl.cuh
│   ├── dot_engine-inl.h
│   ├── expr_engine-inl.h
│   ├── expression.h
│   ├── expr_scalar-inl.h
│   ├── extension
│   │   ├── broadcast.h
│   │   ├── broadcast_with_axis.h
│   │   ├── channel_pool.h
│   │   ├── channel_unpool.h
│   │   ├── choose.h
│   │   ├── complex.h
│   │   ├── concat.h
│   │   ├── crop.h
│   │   ├── fill.h
│   │   ├── flip.h
│   │   ├── implicit_gemm.h
│   │   ├── mask.h
│   │   ├── mirror.h
│   │   ├── one_hot.h
│   │   ├── pack_col2patch.h
│   │   ├── pad.h
│   │   ├── range.h
│   │   ├── reduceto1d.h
│   │   ├── reduce_with_axis.h
│   │   ├── reshape.h
│   │   ├── slice_ex.h
│   │   ├── slice.h
│   │   ├── spatial_pool.h
│   │   ├── spatial_unpool.h
│   │   ├── spatial_upsampling_nearest.h
│   │   ├── swapaxis.h
│   │   ├── take_grad.h
│   │   ├── take.h
│   │   ├── transpose.h
│   │   └── unpack_patch2col.h
│   ├── extension.h
│   ├── half.h
│   ├── io.h
│   ├── logging.h
│   ├── packet
│   │   ├── plain-inl.h
│   │   └── sse-inl.h
│   ├── packet-inl.h
│   ├── random.h
│   ├── README.md
│   ├── stream_gpu-inl.h
│   ├── tensor_blob.h
│   ├── tensor_container.h
│   ├── tensor_cpu-inl.h
│   ├── tensor_gpu-inl.h
│   └── tensor.h
├── mshadow-ps
│   ├── mshadow_ps.h
│   ├── ps_dist-inl.h
│   ├── ps_local-inl.h
│   ├── ps_rabit-inl.h
│   ├── README.md
│   ├── thread.h
│   └── thread_util.h
├── README.md
├── scripts
│   └── travis_script.sh
└── test
    ├── Makefile
    ├── pairtest.cu
    ├── pool.cu
    ├── reshape.cu
    ├── test.cu
    ├── test.h
    ├── test_tblob.cc
    └── unpack.cu

所有的加减操作都是element-wise形式的,例如

void UpdateSGD(Tensor<cpu, 2> weight, Tensor<cpu, 2> grad, float eta, float lambda) {
  weight -= eta * (grad + lambda * weight);
}
将被翻译成

void UpdateSGD(Tensor<cpu,2> weight, Tensor<cpu,2> grad, float eta, float lambda) {
  for (index_t y = 0; y < weight.size(0); ++y) {
    for (index_t x = 0; x < weight.size(1); ++x) {
      weight[y][x] -= eta * (grad[y][x] + lambda * weight[y][x]);
    }
  }
}
通过mshadow写一份xpu代码,就可以不用再写gpu和cpu代码了

上面的代码也已写为

template<typename xpu>
void UpdateSGD(Tensor<xpu, 2> weight, const Tensor<xpu, 2> &grad,
               float eta, float lambda) {
  weight -= eta * (grad + lambda * weight);
}
basic.cpp中有基本的用法

// header file to use mshadow
#include "mshadow/tensor.h"
// this namespace contains all data structures, functions
using namespace mshadow;
// this namespace contains all operator overloads
using namespace mshadow::expr;

int main(void) {
  // intialize tensor engine before using tensor operation, needed for CuBLAS
  InitTensorEngine<cpu>();
  // assume we have a float space
  float data[20];
  // create a 2 x 5 x 2 tensor, from existing space
  Tensor<cpu, 3> ts(data, Shape3(2,5,2));
    // take first subscript of the tensor
  Tensor<cpu, 2> mat = ts[0];
  // Tensor object is only a handle, assignment means they have same data content
  // we can specify content type of a Tensor, if not specified, it is float bydefault
  Tensor<cpu, 2, float> mat2 = mat;

  // shape of matrix, note size order is the same as numpy
  printf("%u X %u matrix\n", mat.size(0), mat.size(1));

  // initialize all element to zero
  mat = 0.0f;
  // assign some values
  mat[0][1] = 1.0f; mat[1][0] = 2.0f;
  // elementwise operations
  mat += (mat + 10.0f) / 10.0f + 2.0f;

  // print out matrix, note: mat2 and mat1 are handles(pointers)
  for (index_t i = 0; i < mat.size(0); ++i) {
    for (index_t j = 0; j < mat.size(1); ++j) {
      printf("%.2f ", mat2[i][j]);
    }
    printf("\n");
  }
  // shutdown tensor enigne after usage
  ShutdownTensorEngine<cpu>();
  return 0;
}


可以在mxnet/src/operator/m_shadow_op.h中增加新的操作(里面有sign函数)

struct sigmoid {
  MSHADOW_XINLINE static float Map(float a) {
    return 1.0f / (1.0f + expf(-a));
  }
};

具体使用例子为

template<typename xpu>
void ExampleSigmoid(Tensor<xpu, 2> out, const Tensor<xpu, 2> &in) {
  out = F<sigmoid>(in * 2.0f) + 1.0f;
  out = F<sigmoid>(in);
}






[1] 源代码

[2] 教程

posted @ 2017-04-20 17:32  开往春天的拖拉机  阅读(139)  评论(0编辑  收藏  举报