传统弱校HFUT的蒟蒻,真相只有一个

Centos下编译tensorflow1.14 c++ API的姿势

centos下编译tensorflow c++ API坑比较多,最近有幸都踩了一遍

tensorflow版本和bazel版本protobuf版本都必须对应的上,比如

  • tf1.14对应:bazel-0.24.1-installer-linux-x86_64.sh       protobuf-all-3.7.0.zip(3.7.1据说也可以)
  • tf1.13对应:bazel-0.19.2       protobuf-3.6.1.2

其中tf1.13的编译参考 1、https://blog.csdn.net/pursuit_zhangyu/article/details/104473245  这里的步骤是可以的用,tf1.13编译失败过好多次最后还是成功了,主要是从外网下载坑多。

主要讲一下tensorflow1.14编译和无网下的编译姿势。

参考:1、https://blog.csdn.net/sinat_37532065/article/details/83211988

2、http://www.liuxiao.org/2018/08/ubuntu-tensorflow-c-%E4%BB%8E%E8%AE%AD%E7%BB%83%E5%88%B0%E9%A2%84%E6%B5%8B1%EF%BC%9A%E7%8E%AF%E5%A2%83%E6%90%AD%E5%BB%BA/

3、https://webcache.googleusercontent.com/search?q=cache:66ahCJLzX6EJ:https://www.twblogs.net/a/5c35ee3ebd9eee35b3a56e76+&cd=2&hl=zh-CN&ct=clnk&gl=hk

 

一、下载资源

bazel-0.24.1-installer-linux-x86_64.sh       protobuf-all-3.7.0.zip      jdk-8u261-linux-x64.tar.gz      cmake-3.10.2.tar.gz       Anaconda3-5.2.0-Linux-x86_64.sh

其中,bazel和protobuf的版本比较固定。java、cmake、python3.6或3.7环境有就可以,版本根据需要来。

二、安装依赖

yum install gcc g++ gcc-c++ libstdc++-devel gcc-6 g++-6
yum install zip unzip
yum install make
yum install git
yum install future
yum install zlib1g-dev zlib zlib-devel.x86_64
yum install autoconf automake libtool curl patch
pip install  distributed future

三、源码安装cmake

tar -zxvf cmake-3.6.2.tar.gz 
cd cmake-3.6.2
./bootstrap
make & make install
cmake --version

四、安装java

参考:https://www.cnblogs.com/stulzq/p/9286878.html

注:适用于只写入临时变量,关闭终端即失效的场景。
长久生效的配置见链接 https://www.cnblogs.com/stulzq/p/9286878.html
tar -zxvf jdk-8u261-linux-x64.tar.gz
export JAVA_HOME=**************/jdk1.8.0_261
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH

五、安装bazel

chmod +x bazel-0.24.1-installer-linux-x86_64.sh 
./bazel-0.24.1-installer-linux-x86_64.sh 
bazel安装后卸载:删除~/bin文件夹,删除~/.bazel(/root/.cache/bazel)文件夹,修改设置的环境变量
注:bazel: /usr/local/bin/bazel /usr/local/lib/bazel
export PATH="$PATH:$HOME/bin"   注:只写入临时变量的方法,关闭终端即失效的场景。

长久生效配置见链接 https://www.cnblogs.com/stulzq/p/9286878.html

六、安装protobuf

tar zxvf protobuf-all-3.7.0.zip
unzip protobuf-all-3.7.0.zip
mkdir protobuf_bin
./autogen.sh
./configure --prefix=************************/protobuf_bin
make -j4
make install 
export PATH=$PATH:*********************/protobuf_bin/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:************/protobuf_bin/lib
export CPLUS_INCLUDE_PATH=*************/protobuf_bin/include
长久生效配置见链接 https://www.cnblogs.com/stulzq/p/9286878.html

 

七、配置编译tensorflow1.14

unzip tensorflow-r1.14.zip
cd tensorflow-r1.14/
./configure

如需编译GPU版本 CUDA support选择Y,其他选项全部N;如需CPU版本全部N

// 有显卡
bazel build --config=opt --config=cuda //tensorflow:libtensorflow_cc.so
// 无显卡
bazel build --config=opt //tensorflow:libtensorflow_cc.so

注意这里是要连外网下载依赖包的,最好挂代理,否则可能会一直报错失败,

网络畅通的话请忽略掉下面这些操作

主要讲一下无网情况下的处理方法!

修改目录下的WORKSPACE文件和tensorflow/workspace.bzl文件

http_archive(
name = "io_bazel_rules_closure",
sha256 = "e0a111000aeed2051f29fcc7a3f83be3ad8c6c93c186e64beb1ad313f0c7f9f9",
strip_prefix = "rules_closure-cf1e44edb908e9616030cc83d085989b8e6cd6df",
urls = [
"file://///var/www/html/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz",
#注:所有类似下载地址的地方全都加上本地地址,并且自行先下载下来然后放到指定位置即可,
#需要下载的地方稍微有点多且不限于这两个文件,所以要根据编译时的报错定位然后修改,
#有时通过链接下载的文件名需要修改,保证本地文件名能对的上即可
#将包直接放在/root/.cache/bazel/_bazel_root/*****/external/protobuf_archive/目录下每次编译都会直接被清空不可行

"http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz",
"https://github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz",], )

八、编译第三方库

cd ~/tensorflow-r1.14/tensorflow/contrib/makefile/
./build_all_linux.sh

编译Eigen3

cd tensorflow-r1.14/tensorflow/contrib/makefile/downloads/eigen

mkdir build
cd build
cmake ..
make
sudo make install

usr/local/include目录下会出现eigen3文件夹,可以移动到需要的地方

 九、整理和打包库环境:

sudo mkdir -p /usr/local/include/tf/tensorflow
sudo cp -r bazel-genfiles/ /usr/local/include/tf
sudo cp -r tensorflow/cc /usr/local/include/tf/tensorflow
sudo cp -r tensorflow/core /usr/local/include/tf/tensorflow
sudo cp -r /tensorflow/contrib /usr/local/include/tf/tensorflow
sudo cp -r third_party /usr/local/include/tf
sudo cp bazel-bin/tensorflow/libtensorflow_cc.so /usr/local/lib
sudo cp bazel-bin/tensorflow/libtensorflow_cc.so.1 /usr/local/lib
sudo cp bazel-bin/tensorflow/libtensorflow_framework.so /usr/local/lib
sudo cp bazel-bin/tensorflow/libtensorflow_framework.so.1 /usr/local/lib

期间遇到过一个问题,libtensorflow_framework.so找不到,搜索/root/.cache/bazel里面也没有

解决方法,将libtensorflow_framework.so.1 重命名成 libtensorflow_framework.so 可以用了

放到新系统上跑需要的文件:

/usr/local/include/tf
/usr/local/lib (或者单独拷出来那四个libtensorflow_****)
protobuf_bin
usr/local/include/eigen3

十、测试一下

编写CMakeLists.txt    链接的位置改成实际的位置

#指定 cmake 的最小版本
cmake_minimum_required(VERSION 3.6.2)
#项目名称/工程名
project(cpptest)
#设置c++编译器
set(CMAKE_CXX_STANDARD 14)
SET(CMAKE_BUILD_TYPE "Debug")
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
 
#aux_source_directory(./src DIR_SRCS)  # 搜索当前目录下的所有.cpp文件  
 
#设置TENSORFLOW_DIR变量,变量内容为安装的tensorflow文件夹路径
set(TENSORFLOW_DIR *******************/tf)
include_directories(${TENSORFLOW_DIR}
                                 ${TENSORFLOW_DIR}/bazel-genfiles
                    ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/absl
                    ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/nsync/public
                    ${TENSORFLOW_DIR}/third_party/eigen3/unsupported
                    *****************/eigen3
                    *****************/lib
                    *****************/protobuf_bin/include)
link_directories(${TENSORFLOW_DIR} ***************/lib)  #动态链接库目录
#add_executable(cpptest ${DIR_SRCS})    ## 生成可执行文件 
add_executable(cpptest main) 

#添加可执行文件所需要的库,连接libtensorflow_cc.so和libtensorflow_framework库,链接动态链接库
target_link_libraries(cpptest tensorflow_cc tensorflow_framework)
#include <fstream>
#include <iostream>
#include <map>
#include <tensorflow/core/platform/env.h>
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"

using namespace std ;
using namespace tensorflow;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
//GraphDef graph_def;
//graph::SetDefaultDevice("/gpu:1", &graph_def);

//从文件名中读取数据
Status ReadTensorFromImageFile(string file_name, const int input_height,
                               const int input_width,
                               vector<Tensor>* out_tensors) {
    auto root = Scope::NewRootScope();
    using namespace ops;

    auto file_reader = ops::ReadFile(root.WithOpName("file_reader"),file_name);
    const int wanted_channels = 3;
    Output image_reader;
    std::size_t found = file_name.find(".png");
    //判断文件格式
    if (found!=std::string::npos) {
        image_reader = DecodePng(root.WithOpName("png_reader"), file_reader,DecodePng::Channels(wanted_channels));
    }
    else {
        image_reader = DecodeJpeg(root.WithOpName("jpeg_reader"), file_reader,DecodeJpeg::Channels(wanted_channels));
    }
    // 下面几步是读取图片并处理
    auto float_caster =Cast(root.WithOpName("float_caster"), image_reader, DT_FLOAT);
    auto dims_expander = ExpandDims(root, float_caster, 0);
    auto resized = ResizeBilinear(root, dims_expander,Const(root.WithOpName("resize"), {input_height, input_width}));
    // Div(root.WithOpName(output_name), Sub(root, resized, {input_mean}),{input_std});
    Transpose(root.WithOpName("transpose"),resized,{0,2,1,3});

    GraphDef graph;
    root.ToGraphDef(&graph);
    
    unique_ptr<Session> session(NewSession(SessionOptions()));
    session->Create(graph);
    session->Run({}, {"transpose"}, {}, out_tensors);//Run,获取图片数据保存到Tensor中
    return Status::OK();
}

int main(int argc, char* argv[]) {
    string graph_path = "tf_model.pb";
    GraphDef graph_def;
    graph::SetDefaultDevice("/gpu:1", &graph_def);
    //读取模型文件
    if (!ReadBinaryProto(Env::Default(), graph_path, &graph_def).ok()) {
        cout << "Read model .pb failed"<<endl;
        return -1;
    }
    cout << "Read model .pb AC"<<endl;

    //新建session
    unique_ptr<Session> session;
    SessionOptions sess_opt;
    sess_opt.config.mutable_gpu_options()->set_allow_growth(true);
    (&session)->reset(NewSession(sess_opt));
    if (!session->Create(graph_def).ok()) {
        cout<<"Create graph failed"<<endl;
        return -1;
    }
    cout<<"Create graph AC"<<endl;

    //读取图像到inputs中
    int input_height = 800;
    int input_width = 800;
    vector<Tensor> inputs;
    // string image_path(argv[1]);
    string image_path("test.jpg");
    if (!ReadTensorFromImageFile(image_path, input_height, input_width,&inputs).ok()) {
        cout<<"Read image file failed"<<endl;
        return -1;
    }
    cout<<"Read image AC"<<endl;

    vector<Tensor> outputs;
    string input = "input_1";
    string output = "segme_out/Sigmoid";//graph中的输入节点和输出节点,需要预先知道

    pair<string,Tensor>img(input,inputs[0]);
    Status status = session->Run({img},{output}, {}, &outputs);//Run,得到运行结果,存到outputs中
    if (!status.ok()) {
        cout<<"Running model failed"<<endl;
        cout<<status.ToString()<<endl;
        return -1;
    }
    cout<<"Running model AC"<<endl;


    //得到模型运行结果
    Tensor t = outputs[0];
    auto tmap = t.tensor<float, 4>();
    int output_dim0 = t.shape().dim_size(0);
    int output_dim1 = t.shape().dim_size(1);
    int output_dim2 = t.shape().dim_size(2);
    int output_dim3 = t.shape().dim_size(3);
    cout<<typeid(tmap).name()<<endl;
    std::vector< vector<float> >feat(800,vector<float>(800,0));
    ofstream outfile;
    outfile.open("out.txt");
    for(int i=0;i<800;i++)
    {
        for(int j=0;j<800;j++)
        {
            feat[i][j]=tmap(0,i,j,0);
            outfile<<feat[i][j]<<endl;
        }
    }
    outfile.close();
    return 0;
}
main.cpp

可自行编写调用模型代码,然后

mkdir build
cmake .. & make
./cpptest

ok

 

至此终于编译完成,肝了好几天

 

posted @ 2020-08-12 23:59  未名亚柳  阅读(1034)  评论(0编辑  收藏  举报