libtorch1.7 函数传出tensor发现值变化了

这是一个奇怪的问题!

torch::Tensor pre_img(const cv::Mat &img, std::vector<float>v_mean, std::vector<float>v_std, int standard_h)
{
    cv::Mat m_resize, m_stand;
    if(1 == img.channels()) { cv::cvtColor(img,img,CV_GRAY2BGR); }

    m_resize = resize_with_specific_height(img,standard_h);
    m_stand = normalize_img(m_resize, v_mean, v_std);

    std::vector<int64_t> sizes = {m_stand.rows, m_stand.cols, m_stand.channels()};
    torch::TensorOptions options = torch::TensorOptions().dtype(torch::kFloat32);
    torch::Tensor tensor_image = torch::from_blob(m_stand.data, torch::IntList(sizes), options);
    // Permute tensor, shape is (C, H, W)
    tensor_image = tensor_image.permute({2, 0, 1});
    tensor_image.unsqueeze_(0);

    std::cout<<tensor_image[0][0][0][0]<<std::endl;
    std::cout<<tensor_image[0][0][0][1]<<std::endl;
    std::cout<<tensor_image[0][0][0][2]<<std::endl;
    std::cout<<tensor_image[0][0][0][3]<<std::endl;
    std::cout<<tensor_image[0][0][0][4]<<std::endl;
    std::cout<<"~~~~~~~~~~~~~~~~~~~~~~~~~~~"<<std::endl;

//    std::cout<<tensor_image<<std::endl;
//    while(1);
    return tensor_image;
}

在main函数调用的地方:

 torch::Tensor input = pre_img(img, v_mean, v_std, standard_h);
    std::cout<<input[0][0][0][0]<<std::endl;
    std::cout<<input[0][0][0][1]<<std::endl;
    std::cout<<input[0][0][0][2]<<std::endl;
    std::cout<<input[0][0][0][3]<<std::endl;
    std::cout<<input[0][0][0][4]<<std::endl;

    while(1);

发现居然打印的数值不一样!!

0.741177
[ CPUFloatType{} ]
0.803922
[ CPUFloatType{} ]
0.811765
[ CPUFloatType{} ]
0.74902
[ CPUFloatType{} ]
0.458824
[ CPUFloatType{} ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
18393.3
[ CPUFloatType{} ]
0
[ CPUFloatType{} ]
5.19193e-37
[ CPUFloatType{} ]
0.74902
[ CPUFloatType{} ]
0.458824
[ CPUFloatType{} ]

前三个数值不一样,后面开始一样了。而且我每次重新跑,前三个数值都不一样,好像随机变化。
我凌乱了,之前都是这么用的为啥没有发现这个问题呢?就是函数传出去一个变量而已就不一样了?
#############################################################################################

最大的可能就是在函数里面,
torch::Tensor tensor_image = torch::from_blob(m_stand.data, torch::IntList(sizes), options);
退出函数的时候m_stand这个临时变量的内存空间被释放了,导致的。但是为啥就前3位不一样呢?。。。不知道,只是猜测。

然后加了device就可以了。

torch::Tensor pre_img(const cv::Mat &img, std::vector<float>v_mean, std::vector<float>v_std, int standard_h, const torch::Device &device)
{
    cv::Mat m_resize, m_stand;
    if(1 == img.channels()) { cv::cvtColor(img,img,CV_GRAY2BGR); }

    m_resize = resize_with_specific_height(img,standard_h);
    m_stand = normalize_img(m_resize, v_mean, v_std);

    std::vector<int64_t> sizes = {m_stand.rows, m_stand.cols, m_stand.channels()};
    torch::TensorOptions options = torch::TensorOptions().dtype(torch::kFloat32);
    torch::Tensor tensor_image = torch::from_blob(m_stand.data, torch::IntList(sizes), options).to(device);
    // Permute tensor, shape is (C, H, W)
    tensor_image = tensor_image.permute({2, 0, 1});
    tensor_image.unsqueeze_(0);

    std::cout<<tensor_image[0][0][0][0]<<std::endl;
    std::cout<<tensor_image[0][0][0][1]<<std::endl;
    std::cout<<tensor_image[0][0][0][2]<<std::endl;
    std::cout<<tensor_image[0][0][0][3]<<std::endl;
    std::cout<<tensor_idevice_type = torch::kCPU;mage[0][0][0][4]<<std::endl;
    std::cout<<"~~~~~~~~~~~~~~~~~~~~~~~~~~~"<<std::endl;


//    std::cout<<tensor_image<<std::endl;
//    while(1);
    return tensor_image;
}




int main()
{
    std::string path_pt = "/data_2/project_2021/crnn/torchocr_libtorch/model.pt";
    std::string path_img_dir = "/data_1/everyday/0524/img_dir";
    std::vector<float> v_mean = {0.5,0.5,0.5};
    std::vector<float> v_std = {0.5,0.5,0.5};
    int standard_h = 32;

    torch::DeviceType device_type;
    if (torch::cuda::is_available() ) {
        device_type = torch::kCUDA;
    } else {
        device_type = torch::kCPU;
    }
    torch::Device device(device_type);

    vector<string> v_path;
    GetFileInDir(path_img_dir, v_path);
    for(int i=0;i<v_path.size();i++)
    {
        std::cout<<i<<"  "<<v_path[i]<<std::endl;
    }

    torch::Device m_device(torch::kCUDA);
    torch::jit::script::Module m_module = torch::jit::load(path_pt);
    m_module.to(m_device);
    std::cout<<"success load model"<<std::endl;

    int cnt_all = 0;
    int cnt_right = 0;

    std::string path_img = "/data_2/project_2021/crnn/torchocr_libtorch/chepai20210303_success_122.jpg";
    cv::Mat img = cv::imread(path_img);
    torch::Tensor input = pre_img(img, v_mean, v_std, standard_h, device);
    std::cout<<input[0][0][0][0]<<std::endl;
    std::cout<<input[0][0][0][1]<<std::endl;
    std::cout<<input[0][0][0][2]<<std::endl;
    std::cout<<input[0][0][0][3]<<std::endl;
    std::cout<<input[0][0][0][4]<<std::endl;

    while(1);

这样就一样了。真是有点儿奇怪啊。

不对!!!我是又试了另
device_type = torch::kCPU;
然后发现和之前一样,前三位随机变化。弄到cuda上面的时候不会变化。

$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
然后的然后加了个static,发现都一致了。应该就是临时变量内存删了的问题把,加了static确实一致。

torch::Tensor pre_img(const cv::Mat &img, std::vector<float>v_mean, std::vector<float>v_std, int standard_h, const torch::Device &device)
{
    cv::Mat m_resize;//, m_stand;
    static cv::Mat m_stand;
    if(1 == img.channels()) { cv::cvtColor(img,img,CV_GRAY2BGR); }

    m_resize = resize_with_specific_height(img,standard_h);
    m_stand = normalize_img(m_resize, v_mean, v_std);

    std::vector<int64_t> sizes = {m_stand.rows, m_stand.cols, m_stand.channels()};
    torch::TensorOptions options = torch::TensorOptions().dtype(torch::kFloat32);
    torch::Tensor tensor_image = torch::from_blob(m_stand.data, torch::IntList(sizes), options).to(device);
    // Permute tensor, shape is (C, H, W)
    tensor_image = tensor_image.permute({2, 0, 1});
    tensor_image.unsqueeze_(0);

    std::cout<<tensor_image[0][0][0][0]<<std::endl;
    std::cout<<tensor_image[0][0][0][1]<<std::endl;
    std::cout<<tensor_image[0][0][0][2]<<std::endl;
    std::cout<<tensor_image[0][0][0][3]<<std::endl;
    std::cout<<tensor_image[0][0][0][4]<<std::endl;
    std::cout<<"~~~~~~~~~~~~~~~~~~~~~~~~~~~"<<std::endl;


//    std::cout<<tensor_image<<std::endl;
//    while(1);
    return tensor_image;
}

也算解决问题了,奇怪的是我之前都是按照一开始写的,没发现这个问题,为啥就前3个不一样,不知道了

posted @ 2021-05-24 14:32  无左无右  阅读(321)  评论(0编辑  收藏  举报