caffe的model和prototxt转pytorch的model

#由于系统没有同时安装caffe和pytorch,一个在系统下,一个在conda中,应该是隔离的python环境,一般不能用。
#因而只能用numpy当做中间媒介,下面代码是numpy存储的caffe网络,将之转成pytorch
#我没有自动化那个prototxt的转换,没没必要,自己写的一摸一样的pytorch网络
def net_from_caffe(n,re): #n是pytorch的model, re 是numpy存储的caffemodel i=-1 for name, l1 in n.named_children(): try: l2 = getattr(n, name) l2.weight # skip ReLU / Dropout except Exception: continue i+=1 while len(re[i]['weights'])==0 and i<len(re): #在numpy中非conv和全连接层是没有weights的,只对齐这两个layer就行了 i+=1 w=torch.from_numpy(re[i]['weights'][0])# b=torch.from_numpy(re[i]['weights'][1]) assert w.size() == l2.weight.size() assert b.size() == l2.bias.size() l2.weight.data.copy_(w) l2.bias.data.copy_(b)

坑点:

1.pil在打开图片时,默认rgb,默认0-1范围。要搞成0-255的自己去乘

2.有个注意的点,pytorch在第一次con到全联接的时候,要做一个展开操作,直接h=h.view(h.size(0),-1)就可以和caffe的一一对应

3.rgb转bgr:im=im[[2,0,1],...]

 

 

torch.load的两种方式:

1.直接存model

但是这样子model的数据类型是固定的,你必须让这个数据类型在调用出可见才能打开

2.存state_dict

比较灵活,直接对参数赋值,没有外面包裹的数据类型,就是多了点麻烦

 

所有代码:

trans2yiNet.py:

import torch
import numpy
import torch.nn as nn
import torch.nn.functional as F

# def conv_from_caffe(conv,re):
#     assert re['type']=='Convolution'
#     w=torch.from_numpy(re['weights'][0])
#     b=torch.from_numpy(re['weights'][1])
#     assert conv.weight.data.size() == re['weight'][0].size()
#     assert conv.bias.data.size() == re['weight'][1].size()
#     conv.weight.data.copy_(w)
#     conv.bias.data.copy_(b)
# def fc_from_caffe(fc,re):
#     assert re['type']=='InnerProduct'
#     w=torch.from_numpy(re['weights'][0])
#     b=torch.from_numpy(re['weights'][1])
#     assert fc.weight.data.size() == re['weight'][0].size()
#     assert fc.bias.data.size() == re['weight'][1].size()
#     fc.weight.data.copy_(w)
#     fc.bias.data.copy_(b)
def net_from_caffe(n,re):
    i=-1
    for name, l1 in n.named_children():
        try:
            l2 = getattr(n, name)
            l2.weight  # skip ReLU / Dropout
        except Exception:
            continue
        i+=1
        while len(re[i]['weights'])==0 and i<len(re):
            i+=1
        w=torch.from_numpy(re[i]['weights'][0])
        b=torch.from_numpy(re[i]['weights'][1])
        
        assert w.size() == l2.weight.size()
        assert b.size() == l2.bias.size()
        l2.weight.data.copy_(w)
        l2.bias.data.copy_(b)

class yiNet(nn.Module):
    def __init__(self):
        super(yiNet, self).__init__()
        self.conv1_1 = nn.Conv2d(3, 64, 3,padding=1)
        self.relu1_1 = nn.ReLU(inplace=True)
        self.conv1_2 = nn.Conv2d(64, 64, 3,padding=1)
        self.relu1_2 = nn.ReLU(inplace=True)
        self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)  # ceil or floor # 5

        # conv2
        self.conv2_1 = nn.Conv2d(64, 128, 3,padding=1)
        self.relu2_1 = nn.ReLU(inplace=True)
        self.conv2_2 = nn.Conv2d(128, 128, 3,padding=1)
        self.relu2_2 = nn.ReLU(inplace=True)
        self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)  # 1/4

        # conv3
        self.conv3_1 = nn.Conv2d(128, 256, 3,padding=1) # 11
        self.relu3_1 = nn.ReLU(inplace=True)
        self.conv3_2 = nn.Conv2d(256, 256, 3,padding=1)
        self.relu3_2 = nn.ReLU(inplace=True)
        self.conv3_3 = nn.Conv2d(256, 256, 3,padding=1)
        self.relu3_3 = nn.ReLU(inplace=True)
        self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)  # 1/8

        # conv4
        self.conv4_1 = nn.Conv2d(256, 512, 3,padding=1) # 18
        self.relu4_1 = nn.ReLU(inplace=True)
        self.conv4_2 = nn.Conv2d(512, 512, 3,padding=1)
        self.relu4_2 = nn.ReLU(inplace=True)
        self.conv4_3 = nn.Conv2d(512, 512, 3,padding=1)
        self.relu4_3 = nn.ReLU(inplace=True)
        self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)  # 1/16

        # conv5
        self.conv5_1 = nn.Conv2d(512, 512, 3,padding=1) # 25
        self.relu5_1 = nn.ReLU(inplace=True)
        self.conv5_2 = nn.Conv2d(512, 512, 3,padding=1)
        self.relu5_2 = nn.ReLU(inplace=True)
        self.conv5_3 = nn.Conv2d(512, 512, 3,padding=1)
        self.relu5_3 = nn.ReLU(inplace=True)
        self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)  # 1/32

        # output length should be 4096 , go check it

        # fc6
        self.fc6 = nn.Linear(25088, 4096) # always input first , and the output second. this is different with caffe
        self.relu6 = nn.ReLU(inplace=True)
        self.drop6 = nn.Dropout2d() # study the dropout  # 34

        # fc7
        self.fc7 = nn.Linear(4096, 4096)
        self.relu7 = nn.ReLU(inplace=True)
        self.drop7 = nn.Dropout2d()

        # here is a fc7_drop7_0_split op # 38
        self.classifier_color=nn.Linear(4096,32)
        self.classifier_elements=nn.Linear(4096,195)
        self.classifier_style=nn.Linear(4096,24)
        self.classifier_types=nn.Linear(4096,224)
        self.classifier_material=nn.Linear(4096,82) # 43
        self.classifier_attributes=nn.Linear(4096,100)
        


    def forward(self, x):
        
        h = x
        h = self.relu1_1(self.conv1_1(h))
        h = self.relu1_2(self.conv1_2(h))
        h = self.pool1(h)


        h = self.relu2_1(self.conv2_1(h))
        h = self.relu2_2(self.conv2_2(h))
        h = self.pool2(h)

        h = self.relu3_1(self.conv3_1(h))
        h = self.relu3_2(self.conv3_2(h))
        h = self.relu3_3(self.conv3_3(h))
        h = self.pool3(h)

        h = self.relu4_1(self.conv4_1(h))
        h = self.relu4_2(self.conv4_2(h))
        h = self.relu4_3(self.conv4_3(h))
        h = self.pool4(h)

        h = self.relu5_1(self.conv5_1(h))
        h = self.relu5_2(self.conv5_2(h))
        h = self.relu5_3(self.conv5_3(h))
        h = self.pool5(h)

        h=h.view(h.size(0),-1)
        h = self.relu6(self.fc6(h))
        h = self.drop6(h)


        h = self.relu7(self.fc7(h))
        h = self.drop7(h)


        color      = self.classifier_color(h)
        elements   = self.classifier_elements(h)
        style      = self.classifier_style(h)
        types      = self.classifier_types(h)
        materials  = self.classifier_material(h)
        attributes = self.classifier_attributes(h)

        
        h=torch.cat((color,elements,style,types,materials,attributes),1)

        return h
def main():
    numpy_model_pth='./np.npy'
    n=yiNet()
    numpy_model=numpy.load(numpy_model_pth,encoding = 'latin1')
    net_from_caffe(n,numpy_model)
    torch.save(n.state_dict(), './th.state')
    torch.save(n,'./yinet.pth')
#    then later:
#    the_model = TheModelClass(*args, **kwargs)
#    the_model.load_state_dict(torch.load(PATH))
#    in this way , you can detach the parameters with netModule type



if __name__ == '__main__':
    main()
View Code

yinet.py:

import torch
import numpy
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms
from trans2yiNet import yiNet # you have to import this class if you want to unpickle it.

tran=transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor()
])



im='./23.jpg'
im=Image.open(im)
im=tran(im)
im=im*255 # 255 or 256 i don't know
im-=128.0
im=im[[2,0,1],...]

im.unsqueeze_(0)

nums=[32,195,24,224,82,100]
start=[0,32,227,251,475,557,657]



# print(im.size())
# print(im.mode())
# im.show()
# print(im)

f=open('./ShoppingAttr/tag_indx_list')
s=[]
i=0
for ff in f:
    if i in start:
        s.append([])
    s[-1].append(ff)
    i+=1





def soft(output):
    out=output.data
    print(type(out))
    anss=[]
    for batch in range(out.size(0)):
        ans=[]
        b=out[batch]
        for j in range(6):
            ans.append(b[start[j]:start[j+1]])
            # print(len(ans[-1]))
        anss.append(ans)
        # print(len(ans))
    return anss


yinet=torch.load('./yinet.pth')

out=yinet(im)
ans=soft(out)


for i in range(len(ans)):
    sss=''
    for j in range(len(ans[i])):
        ind=numpy.argmax(ans[i][j])
        sss+=s[j][ind].split()[1]
        print(s[j][ind],end='')
    print(sss)
View Code

 

posted @ 2018-10-23 18:25  Cloud.9  阅读(4842)  评论(0编辑  收藏  举报