Autoware 笔记No.8 ENet 障碍物识别(vision segment ENet detect)
一. 前言
人人为我,我为人人
我不喜欢讲一些网络上抄的网络模型,全是干货,让大家直接上手干,如果想探讨可以联系我。
请大家按照我的Autoware 1.14安装
二. 安装
(1)下载ENet,一定要安装在home目录下,否则vision_segment_enet_detect.launch文件中的network_definition_file和pretrained_model_file路径会有变化。
$ cd ~ $ git clone --recursive https://github.com/TimoSaemann/ENet.git $ cd ENet/caffe-enet
(2)修改Makefile.config,我用的CUDA10.0,直接贴出我的Makefile.config
## Refer to http://caffe.berkeleyvision.org/installation.html # Contributions simplifying and improving our build system are welcome! # cuDNN acceleration switch (uncomment to build with cuDNN). USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). # CPU_ONLY := 1 # uncomment to disable IO dependencies and corresponding data layers USE_OPENCV := 1 USE_LEVELDB := 1 USE_LMDB := 1 # uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary) # You should not set this flag if you will be reading LMDBs with any # possibility of simultaneous read and write # ALLOW_LMDB_NOLOCK := 1 # Uncomment if you're using OpenCV 3 OPENCV_VERSION := 3 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ # CUSTOM_CXX := g++ # CUDA directory contains bin/ and lib/ directories that we need. CUDA_DIR := /usr/local/cuda-10.0 # On Ubuntu 14.04, if cuda tools are installed via # "sudo apt-get install nvidia-cuda-toolkit" then use this instead: # CUDA_DIR := /usr # CUDA architecture setting: going with all of them. # For CUDA < 6.0, comment the *_50 lines for compatibility. # For CUDA < 8.0, comment the *_60 and *_61 lines for compatibility. # For CUDA >= 9.0, comment the *_20 and *_21 lines for compatibility. CUDA_ARCH := -gencode arch=compute_61,code=compute_61 # BLAS choice: # atlas for ATLAS (default) # mkl for MKL # open for OpenBlas BLAS := open # Custom (MKL/ATLAS/OpenBLAS) include and lib directories. # Leave commented to accept the defaults for your choice of BLAS # (which should work)! BLAS_INCLUDE := /usr/local/cuda-10.0/targets/x86_64-linux/include BLAS_LIB := /usr/local/cuda-10.0/targets/x86_64-linux/lib # Homebrew puts openblas in a directory that is not on the standard search path # BLAS_INCLUDE := $(shell brew --prefix openblas)/include # BLAS_LIB := $(shell brew --prefix openblas)/lib # This is required only if you will compile the matlab interface. # MATLAB directory should contain the mex binary in /bin. # MATLAB_DIR := /usr/local # MATLAB_DIR := /Applications/MATLAB_R2012b.app # NOTE: this is required only if you will compile the python interface. # We need to be able to find Python.h and numpy/arrayobject.h. PYTHON_INCLUDE := /usr/include/python2.7 \ /usr/lib/python2.7/dist-packages/numpy/core/include # Anaconda Python distribution is quite popular. Include path: # Verify anaconda location, sometimes it's in root. # ANACONDA_HOME := $(HOME)/anaconda # PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ # $(ANACONDA_HOME)/include/python2.7 \ # $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ # Uncomment to use Python 3 (default is Python 2) # PYTHON_LIBRARIES := boost_python3 python3.5m # PYTHON_INCLUDE := /usr/include/python3.5m \ # /usr/lib/python3.5/dist-packages/numpy/core/include # We need to be able to find libpythonX.X.so or .dylib. PYTHON_LIB := /usr/lib # PYTHON_LIB := $(ANACONDA_HOME)/lib # Homebrew installs numpy in a non standard path (keg only) # PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include # PYTHON_LIB += $(shell brew --prefix numpy)/lib # Uncomment to support layers written in Python (will link against Python libs) # WITH_PYTHON_LAYER := 1 # Whatever else you find you need goes here. INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include /usr/include/hdf5/serial/ LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib /usr/lib/x86_64-linux-gnu # If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies # INCLUDE_DIRS += $(shell brew --prefix)/include # LIBRARY_DIRS += $(shell brew --prefix)/lib # Uncomment to use `pkg-config` to specify OpenCV library paths. # (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) # USE_PKG_CONFIG := 1 # N.B. both build and distribute dirs are cleared on `make clean` BUILD_DIR := build DISTRIBUTE_DIR := distribute # Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 # DEBUG := 1 # The ID of the GPU that 'make runtest' will use to run unit tests. TEST_GPUID := 0 # enable pretty build (comment to see full commands) Q ?= @
(3)修改Makefile
打开Makefile,查找LIBRARIES,修改为:
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_serial_hl hdf5_serial
(4)编译ENet
make && make distribute
如果出现错误如下:
./include/caffe/util/cudnn.hpp:113:70: error: too few arguments to function ‘cudnnStatus_t cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t, int, int, int, int, int, int, cudnnConvolutionMode_t, cudnnDataType_t)’ pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION));
修改/home/xxx/ENet/caffe-enet/include/caffe/util/cudnn.hpp为如下代码。
#ifndef CAFFE_UTIL_CUDNN_H_ #define CAFFE_UTIL_CUDNN_H_ #ifdef USE_CUDNN #include <cudnn.h> #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #define CUDNN_VERSION_MIN(major, minor, patch) \ (CUDNN_VERSION >= (major * 1000 + minor * 100 + patch)) #define CUDNN_CHECK(condition) \ do { \ cudnnStatus_t status = condition; \ CHECK_EQ(status, CUDNN_STATUS_SUCCESS) << " "\ << cudnnGetErrorString(status); \ } while (0) inline const char* cudnnGetErrorString(cudnnStatus_t status) { switch (status) { case CUDNN_STATUS_SUCCESS: return "CUDNN_STATUS_SUCCESS"; case CUDNN_STATUS_NOT_INITIALIZED: return "CUDNN_STATUS_NOT_INITIALIZED"; case CUDNN_STATUS_ALLOC_FAILED: return "CUDNN_STATUS_ALLOC_FAILED"; case CUDNN_STATUS_BAD_PARAM: return "CUDNN_STATUS_BAD_PARAM"; case CUDNN_STATUS_INTERNAL_ERROR: return "CUDNN_STATUS_INTERNAL_ERROR"; case CUDNN_STATUS_INVALID_VALUE: return "CUDNN_STATUS_INVALID_VALUE"; case CUDNN_STATUS_ARCH_MISMATCH: return "CUDNN_STATUS_ARCH_MISMATCH"; case CUDNN_STATUS_MAPPING_ERROR: return "CUDNN_STATUS_MAPPING_ERROR"; case CUDNN_STATUS_EXECUTION_FAILED: return "CUDNN_STATUS_EXECUTION_FAILED"; case CUDNN_STATUS_NOT_SUPPORTED: return "CUDNN_STATUS_NOT_SUPPORTED"; case CUDNN_STATUS_LICENSE_ERROR: return "CUDNN_STATUS_LICENSE_ERROR"; #if CUDNN_VERSION_MIN(6, 0, 0) case CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING: return "CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING"; #endif #if CUDNN_VERSION_MIN(7, 0, 0) case CUDNN_STATUS_RUNTIME_IN_PROGRESS: return "CUDNN_STATUS_RUNTIME_IN_PROGRESS"; case CUDNN_STATUS_RUNTIME_FP_OVERFLOW: return "CUDNN_STATUS_RUNTIME_FP_OVERFLOW"; #endif } return "Unknown cudnn status"; } namespace caffe { namespace cudnn { template <typename Dtype> class dataType; template<> class dataType<float> { public: static const cudnnDataType_t type = CUDNN_DATA_FLOAT; static float oneval, zeroval; static const void *one, *zero; }; template<> class dataType<double> { public: static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; static double oneval, zeroval; static const void *one, *zero; }; template <typename Dtype> inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) { CUDNN_CHECK(cudnnCreateTensorDescriptor(desc)); } template <typename Dtype> inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, int n, int c, int h, int w, int stride_n, int stride_c, int stride_h, int stride_w) { CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(*desc, dataType<Dtype>::type, n, c, h, w, stride_n, stride_c, stride_h, stride_w)); } template <typename Dtype> inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, int n, int c, int h, int w) { const int stride_w = 1; const int stride_h = w * stride_w; const int stride_c = h * stride_h; const int stride_n = c * stride_c; setTensor4dDesc<Dtype>(desc, n, c, h, w, stride_n, stride_c, stride_h, stride_w); } template <typename Dtype> inline void createFilterDesc(cudnnFilterDescriptor_t* desc, int n, int c, int h, int w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); #if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType<Dtype>::type, CUDNN_TENSOR_NCHW, n, c, h, w)); #else CUDNN_CHECK(cudnnSetFilter4dDescriptor_v4(*desc, dataType<Dtype>::type, CUDNN_TENSOR_NCHW, n, c, h, w)); #endif } template <typename Dtype> inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { CUDNN_CHECK(cudnnCreateConvolutionDescriptor(conv)); } template <typename Dtype> inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, int pad_h, int pad_w, int stride_h, int stride_w) { #if CUDNN_VERSION_MIN(6, 0, 0) CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION, dataType<Dtype>::type)); #else CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); #endif } template <typename Dtype> inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, PoolingParameter_PoolMethod poolmethod, cudnnPoolingMode_t* mode, int h, int w, int pad_h, int pad_w, int stride_h, int stride_w) { switch (poolmethod) { case PoolingParameter_PoolMethod_MAX: *mode = CUDNN_POOLING_MAX; break; case PoolingParameter_PoolMethod_AVE: *mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; break; default: LOG(FATAL) << "Unknown pooling method."; } CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); #if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); #else CUDNN_CHECK(cudnnSetPooling2dDescriptor_v4(*pool_desc, *mode, CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); #endif } template <typename Dtype> inline void createActivationDescriptor(cudnnActivationDescriptor_t* activ_desc, cudnnActivationMode_t mode) { CUDNN_CHECK(cudnnCreateActivationDescriptor(activ_desc)); CUDNN_CHECK(cudnnSetActivationDescriptor(*activ_desc, mode, CUDNN_PROPAGATE_NAN, Dtype(0))); } } // namespace cudnn } // namespace caffe #endif // USE_CUDNN #endif // CAFFE_UTIL_CUDNN_H_
编译成功后,进入/home/xxx/ENet/enet_weights_zoo/,执行
$ sudo chmod a+x cityscapes_weights.sh $ sh cityscapes_weights.sh
得到cityscapes_weights.caffemodel和cityscapes_weights_before_bn_merge.caffemodel。
(5)执行vision_segment_enet_detect节点
$ cd ~/autoware.ai $ source install/setup.bash $ roslaunch vision_segment_enet_detect vision_segment_enet_detect.launch
如果没有image_segmenter_enet.launch文件,删除build和install里面的image_segmenter_enet,单独编译vision_segment_enet_detect。
$ cd ~/autoware.ai $ AUTOWARE_COMPILE_WITH_CUDA=1 colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release --packages-select vision_segment_enet_detect
打开moriyama数据集测试。
如果出现:libcaffe.so.1.0.0-rc3: cannot open shared object file: No such file or directory错误
需要执行
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:~/ENet/caffe-enet/distribute/lib
$ roslaunch vision_segment_enet_detect vision_segment_enet_detect.launch
原创博文,转载请标明出处。