国产 无码 综合区,色欲AV无码国产永久播放,无码天堂亚洲国产AV,国产日韩欧美女同一区二区

pytorch模型轉(zhuǎn)caffe模型

這篇具有很好參考價(jià)值的文章主要介紹了pytorch模型轉(zhuǎn)caffe模型。希望對(duì)大家有所幫助。如果存在錯(cuò)誤或未考慮完全的地方,請(qǐng)大家不吝賜教,您也可以點(diǎn)擊"舉報(bào)違法"按鈕提交疑問。

記錄一個(gè)好用的pytorch模型轉(zhuǎn)caffe模型的方法,源碼鏈接如下:

https://github.com/xxradon/PytorchToCaffe

把代碼clone下來后,進(jìn)入example目錄便可查看示例,

cd example
python resnet_pytorch_2_caffe.py
import sys
sys.path.insert(0,'.')
import torch
from torch.autograd import Variable
from torchvision.models import resnet
import pytorch_to_caffe

if __name__=='__main__':
    ## 轉(zhuǎn)換后的模型名稱
    name='resnet18'

    ## 待轉(zhuǎn)換的pytorch模型
    resnet18=resnet.resnet18()

    ## pt模型權(quán)重
    checkpoint = torch.load("/home/shining/Downloads/resnet18-5c106cde.pth")

    ## 加載模型
    resnet18.load_state_dict(checkpoint)
    resnet18.eval()

    ## 輸入張量
    input=torch.ones([1, 3, 224, 224])

    ## 模型轉(zhuǎn)換
    pytorch_to_caffe.trans_net(resnet18, input,name)
    pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))

如果想實(shí)現(xiàn)把pytorch的flatten轉(zhuǎn)換至caffe中,需修改腳本:pytorch_to_caffe.py。修改后的腳本內(nèi)容如下,文章來源地址http://www.zghlxwxcb.cn/news/detail-818038.html

import torch
import torch.nn as nn
import traceback
from Caffe import caffe_net
import torch.nn.functional as F
from torch.autograd import Variable
from Caffe import layer_param
from torch.nn.modules.utils import _pair
import numpy as np


class Blob_LOG():
    def __init__(self):
        self.data={}
    def __setitem__(self, key, value):
        self.data[key]=value
    def __getitem__(self, key):
        return self.data[key]
    def __len__(self):
        return len(self.data)

NET_INITTED=False


class TransLog(object):
    def __init__(self):
        """
        doing init() with inputs Variable before using it
        """
        self.layers={}
        self.detail_layers={}  
        self.detail_blobs={}  
        self._blobs=Blob_LOG()
        self._blobs_data=[]
        self.cnet=caffe_net.Caffemodel('')
        self.debug=True

    def init(self,inputs):
        """
        :param inputs: is a list of input variables
        """
        self.add_blobs(inputs)
    
    def add_layer(self,name='layer'):
        if name in self.layers:
            return self.layers[name]
        if name not in self.detail_layers.keys():
            self.detail_layers[name] =0
        self.detail_layers[name] +=1
        name='{}{}'.format(name,self.detail_layers[name])
        self.layers[name]=name
        if self.debug:
            print("{} was added to layers".format(self.layers[name]))
        return self.layers[name]

    def add_blobs(self, blobs,name='blob',with_num=True):
        rst=[]
        for blob in blobs:
            self._blobs_data.append(blob) # to block the memory address be rewrited
            blob_id=int(id(blob))
            if name not in self.detail_blobs.keys():
                self.detail_blobs[name] =0
            self.detail_blobs[name] +=1           
            if with_num:
                rst.append('{}{}'.format(name,self.detail_blobs[name]))
            else:
                rst.append('{}'.format(name))
            if self.debug:
                print("{}:{} was added to blobs".format(blob_id,rst[-1]))
            # print('Add blob {} : {}'.format(rst[-1].center(21),blob.size()))
            self._blobs[blob_id]=rst[-1]
        return rst
    def blobs(self, var):
        var=id(var)
        # if self.debug:
        #     print("{}:{} getting".format(var, self._blobs[var]))
        try:
            return self._blobs[var]
        except:
            print("WARNING: CANNOT FOUND blob {}".format(var))
            return None

log=TransLog()

layer_names={}
def _conv2d(raw,input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
    print('conv: ',log.blobs(input))
    x=raw(input,weight,bias,stride,padding,dilation,groups)
    name=log.add_layer(name='conv')
    log.add_blobs([x],name='conv_blob')
    layer=caffe_net.Layer_param(name=name, type='Convolution',
                                bottom=[log.blobs(input)], top=[log.blobs(x)])
    layer.conv_param(x.size()[1],weight.size()[2:],stride=_pair(stride),
                     pad=_pair(padding),dilation=_pair(dilation),bias_term=bias is not None,groups=groups)
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())
    else:
        layer.param.convolution_param.bias_term=False
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x

def _conv_transpose2d(raw,input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
    x=raw(input, weight, bias, stride, padding, output_padding, groups, dilation)
    name=log.add_layer(name='conv_transpose')
    log.add_blobs([x],name='conv_transpose_blob')
    layer=caffe_net.Layer_param(name=name, type='Deconvolution',
                                bottom=[log.blobs(input)], top=[log.blobs(x)])
    layer.conv_param(x.size()[1],weight.size()[2:],stride=_pair(stride),
                     pad=_pair(padding),dilation=_pair(dilation),bias_term=bias is not None, groups = groups)
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())
    else:
        layer.param.convolution_param.bias_term=False
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x

def _linear(raw,input, weight, bias=None):
    x=raw(input,weight,bias)
    layer_name=log.add_layer(name='fc')
    top_blobs=log.add_blobs([x],name='fc_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',
                                bottom=[log.blobs(input)],top=top_blobs)
    layer.fc_param(x.size()[1],has_bias=bias is not None)
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x

def _split(raw,tensor, split_size, dim=0):
    # split in pytorch is slice in caffe
    x=raw(tensor, split_size, dim)
    layer_name=log.add_layer('split')
    top_blobs=log.add_blobs(x,name='split_blob')
    layer=caffe_net.Layer_param(name=layer_name, type='Slice',
                                bottom=[log.blobs(tensor)], top=top_blobs)
    slice_num=int(np.floor(tensor.size()[dim]/split_size))
    slice_param=caffe_net.pb.SliceParameter(axis=dim,slice_point=[split_size*i for i in range(1,slice_num)])
    layer.param.slice_param.CopyFrom(slice_param)
    log.cnet.add_layer(layer)
    return x


def _pool(type,raw,input,x,kernel_size,stride,padding,ceil_mode):
    # TODO dilation,ceil_mode,return indices
    layer_name = log.add_layer(name='{}_pool'.format(type))
    top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Pooling',
                                  bottom=[log.blobs(input)], top=top_blobs)
    # TODO w,h different kernel, stride and padding
    # processing ceil mode
    layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
                     pad=padding, type=type.upper() , ceil_mode = ceil_mode)
    log.cnet.add_layer(layer)
    if ceil_mode==False and stride is not None:
        oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])
        owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])
        if oheight!=0 or owidth!=0:
            caffe_out=raw(input, kernel_size, stride, padding, ceil_mode=True)
            print("WARNING: the output shape miss match at {}: "
            
                  "input {} output---Pytorch:{}---Caffe:{}\n"
                  "This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\n"
                  "You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. ".format(layer_name,input.size(),x.size(),caffe_out.size()))

def _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1,
               ceil_mode=False, return_indices=False):
    x = raw(input, kernel_size, stride, padding, dilation,ceil_mode, return_indices)
    _pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)
    return x


def _avg_pool2d(raw, input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad = True, divisor_override=None): 
    x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad) 
    _pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode) 
    return x

def _adaptive_avg_pool2d(raw, input, output_size):
    x = raw(input, output_size)
    if isinstance(output_size, int):
        out_dim = output_size
    else:
        out_dim = output_size[0]
    tmp = max(input.shape[2], input.shape[3])
    stride = tmp //out_dim
    kernel_size = tmp - (out_dim - 1) * stride
    _pool('ave', raw, input, x, kernel_size, stride, 0, False)
    return x

def _max(raw,*args):
    x=raw(*args)
    if len(args)==1:
        # TODO max in one tensor
        assert NotImplementedError
    else:
        bottom_blobs=[]
        for arg in args:
            bottom_blobs.append(log.blobs(arg))
        layer_name=log.add_layer(name='max')
        top_blobs=log.add_blobs([x],name='max_blob')
        layer=caffe_net.Layer_param(name=layer_name,type='Eltwise',
                                    bottom=bottom_blobs,top=top_blobs)
        layer.param.eltwise_param.operation =2
        log.cnet.add_layer(layer)
    return x

def _cat(raw, inputs, dimension=0):
    x=raw(inputs, dimension)
    bottom_blobs=[]
    for input in inputs:
        bottom_blobs.append(log.blobs(input))
    layer_name=log.add_layer(name='cat')
    top_blobs=log.add_blobs([x],name='cat_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Concat',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.concat_param.axis =dimension
    log.cnet.add_layer(layer)
    return x

def _dropout(raw,input,p=0.5, training=False, inplace=False):
    x=raw(input,p, training, inplace)
    bottom_blobs=[log.blobs(input)]
    layer_name=log.add_layer(name='dropout')
    top_blobs=log.add_blobs([x],name=bottom_blobs[0],with_num=False)
    layer=caffe_net.Layer_param(name=layer_name,type='Dropout',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.dropout_param.dropout_ratio = p
    layer.param.include.extend([caffe_net.pb.NetStateRule(phase=0)]) # 1 for test, 0 for train
    log.cnet.add_layer(layer)
    return x

def _threshold(raw,input, threshold, value, inplace=False):
    # for threshold or relu
    if threshold==0 and value==0:
        x = raw(input,threshold, value, inplace)
        bottom_blobs=[log.blobs(input)]
        name = log.add_layer(name='relu')
        log.add_blobs([x], name='relu_blob')
        layer = caffe_net.Layer_param(name=name, type='ReLU',
                                      bottom=bottom_blobs, top=[log.blobs(x)])
        log.cnet.add_layer(layer)
        return x
    if value!=0:
        raise NotImplemented("value !=0 not implemented in caffe")
    x=raw(input,input, threshold, value, inplace)
    bottom_blobs=[log.blobs(input)]
    layer_name=log.add_layer(name='threshold')
    top_blobs=log.add_blobs([x],name='threshold_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Threshold',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.threshold_param.threshold = threshold
    log.cnet.add_layer(layer)
    return x

def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs=[log.blobs(input)]
    name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = caffe_net.Layer_param(name=name, type='PReLU',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    if weight.size()[0]==1:
        layer.param.prelu_param.channel_shared=True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x

def _leaky_relu(raw, input, negative_slope=0.01, inplace=False):
    x = raw(input, negative_slope)
    name = log.add_layer(name='leaky_relu')
    log.add_blobs([x], name='leaky_relu_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    layer.param.relu_param.negative_slope=negative_slope
    log.cnet.add_layer(layer)
    return x

def _tanh(raw, input):
    # for tanh activation
    x = raw(input)
    name = log.add_layer(name='tanh')
    log.add_blobs([x], name='tanh_blob')
    layer = caffe_net.Layer_param(name=name, type='TanH',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x

def _softmax(raw, input, dim=None, _stacklevel=3):
    # for F.softmax
    x=raw(input, dim=dim)
    if dim is None:
        dim=F._get_softmax_dim('softmax', input.dim(), _stacklevel)
    bottom_blobs=[log.blobs(input)]
    name = log.add_layer(name='softmax')
    log.add_blobs([x], name='softmax_blob')
    layer = caffe_net.Layer_param(name=name, type='Softmax',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    layer.param.softmax_param.axis=dim
    log.cnet.add_layer(layer)
    return x

def _batch_norm(raw,input, running_mean, running_var, weight=None, bias=None,
               training=False, momentum=0.1, eps=1e-5):
    # because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters

    x = raw(input, running_mean, running_var, weight, bias,
               training, momentum, eps)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='batch_norm')
    top_blobs = log.add_blobs([x], name='batch_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0,eps=eps)
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
        running_mean_clone = running_mean.clone()
        running_var_clone = running_var.clone()
        layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x

def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
                  bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
    # TODO: the batch size!=1 view operations
    print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
    x= torch.batch_norm(
        input, weight, bias, running_mean, running_var,
        use_input_stats, momentum, eps,torch.backends.cudnn.enabled)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0,eps=eps)
        running_mean=torch.zeros(input.size()[1])
        running_var=torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x


#upsample layer
def _interpolate(raw, input,size=None, scale_factor=None, mode='nearest', align_corners=None):
    # 定義的參數(shù)包括 scale,即輸出與輸入的尺寸比例,如 2;scale_h、scale_w,
    # 同 scale,分別為 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,僅在 scale 為 2 時(shí)
    # 有用,對(duì)輸出進(jìn)行額外 padding 在 h、w 方向上的數(shù)值;upsample_h、upsample_w,輸
    # 出圖像尺寸的數(shù)值。在 Upsample 的相關(guān)代碼中,推薦僅僅使用 upsample_h、
    # upsample_w 準(zhǔn)確定義 Upsample 層的輸出尺寸,其他所有的參數(shù)都不推薦繼續(xù)使用。
    # for nearest _interpolate
    if mode != "nearest" or align_corners != None:
        raise NotImplementedError("not implement F.interpolate totoaly")
    x = raw(input,size , scale_factor ,mode)

    layer_name = log.add_layer(name='upsample')
    top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Upsample',
                                  bottom=[log.blobs(input)], top=top_blobs)

    layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)
    log.cnet.add_layer(layer)
    return x


#sigmid layer
def _sigmoid(raw, input):
    # Applies the element-wise function:
    # 
    # Sigmoid(x)= 1/(1+exp(?x))
    # 
    # ?	
    x = raw(input)
    name = log.add_layer(name='sigmoid')
    log.add_blobs([x], name='sigmoid_blob')
    layer = caffe_net.Layer_param(name=name, type='Sigmoid',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x

#tanh layer
def _tanh(raw, input):
    # Applies the element-wise function:
    # 
    # torch.nn.Tanh
    # 
    # ?	
    x = raw(input)
    name = log.add_layer(name='tanh')
    log.add_blobs([x], name='tanh_blob')
    layer = caffe_net.Layer_param(name=name, type='TanH',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x

def _hardtanh(raw, input, min_val, max_val, inplace):
    # Applies the element-wise function:
    #
    # torch.nn.ReLu6
    #
    # ?
    print('relu6: ', log.blobs(input))
    x = raw(input, min_val, max_val)
    name = log.add_layer(name='relu6')
    log.add_blobs([x], name='relu6_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU6',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x

#L2Norm layer
def _l2Norm(raw, input, weight, eps):
    # Applies the element-wise function:
    #
    # L2Norm in vgg_ssd
    #
    # ?
    x = raw(input, weight, eps)
    name = log.add_layer(name='normalize')
    log.add_blobs([x], name='normalize_blob')
    layer = caffe_net.Layer_param(name=name, type='Normalize',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    layer.norm_param(eps)

    layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x

def _div(raw,inputs, inputs2):
    x=raw(inputs, inputs2)
    log.add_blobs([x],name='div_blob')
    return x


# ----- for Variable operations --------

def _view(input, *args):
    x=raw_view(input, *args)
    if not NET_INITTED:
        return x
    layer_name=log.add_layer(name='view')
    top_blobs=log.add_blobs([x],name='view_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Reshape',
                                bottom=[log.blobs(input)],top=top_blobs)
    # TODO: reshpae added to nn_tools layer
    dims=list(args)
    dims[0]=0 # the first dim should be batch_size
    layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
    log.cnet.add_layer(layer)
    return x

def _mean(input, *args,**kwargs):
    x=raw_mean(input, *args,**kwargs)
    if not NET_INITTED:
        return x
    layer_name=log.add_layer(name='mean')
    top_blobs=log.add_blobs([x],name='mean_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Reduction',
                                bottom=[log.blobs(input)],top=top_blobs)
    if len(args)==1:
        dim=args[0]
    elif 'dim' in kwargs:
        dim=kwargs['dim']
    else:
        raise NotImplementedError('mean operation must specify a dim')
    layer.param.reduction_param.operation=4
    layer.param.reduction_param.axis=dim
    log.cnet.add_layer(layer)
    return x

def _add(input, *args):
    x = raw__add__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='add')
    top_blobs = log.add_blobs([x], name='add_blob')
    if log.blobs(args[0]) == None:
        log.add_blobs([args[0]], name='extra_blob')
    else:
        layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                      bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
        layer.param.eltwise_param.operation = 1 # sum is 1
        log.cnet.add_layer(layer)
    return x

def _iadd(input, *args):
    x = raw__iadd__(input, *args)
    if not NET_INITTED:
        return x
    x=x.clone()
    layer_name = log.add_layer(name='add')
    top_blobs = log.add_blobs([x], name='add_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    log.cnet.add_layer(layer)
    return x

def _sub(input, *args):
    x = raw__sub__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='sub')
    top_blobs = log.add_blobs([x], name='sub_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    layer.param.eltwise_param.coeff.extend([1.,-1.])
    log.cnet.add_layer(layer)
    return x

def _isub(input, *args):
    x = raw__isub__(input, *args)
    if not NET_INITTED:
        return x
    x=x.clone()
    layer_name = log.add_layer(name='sub')
    top_blobs = log.add_blobs([x], name='sub_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    log.cnet.add_layer(layer)
    return x

def _mul(input, *args):
    x = raw__mul__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='mul')
    top_blobs = log.add_blobs([x], name='mul_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 0  # product is 1
    log.cnet.add_layer(layer)
    return x

def _imul(input, *args):
    x = raw__imul__(input, *args)
    if not NET_INITTED:
        return x
    x = x.clone()
    layer_name = log.add_layer(name='mul')
    top_blobs = log.add_blobs([x], name='mul_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 0  # product is 1
    layer.param.eltwise_param.coeff.extend([1., -1.])
    log.cnet.add_layer(layer)
    return x


#Permute layer
def _permute(input, *args):
    x = raw__permute__(input, *args)
    name = log.add_layer(name='permute')
    log.add_blobs([x], name='permute_blob')
    layer = caffe_net.Layer_param(name=name, type='Permute',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    order1 = args[0]
    order2 = args[1]
    order3 = args[2]
    order4 = args[3]

    layer.permute_param(order1, order2, order3, order4)
    log.cnet.add_layer(layer)
    return x

#contiguous
def _contiguous(input, *args):
    x = raw__contiguous__(input, *args)
    name = log.add_layer(name='contiguous')
    log.add_blobs([x], name='contiguous_blob')
    layer = caffe_net.Layer_param(name=name, type='NeedRemove',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x

#pow
def _pow(input, *args):
    x = raw__pow__(input, *args)
    log.add_blobs([x], name='pow_blob')
    return x

#sum
def _sum(input, *args):
    x = raw__sum__(input, *args)
    log.add_blobs([x], name='sum_blob')
    return x

# sqrt
def _sqrt(input, *args):
    x = raw__sqrt__(input, *args)
    log.add_blobs([x], name='sqrt_blob')
    return x

# unsqueeze
def _unsqueeze(input, *args):
    x = raw__unsqueeze__(input, *args)
    log.add_blobs([x], name='unsqueeze_blob')
    return x

def _expand_as(input, *args):
    # only support expand A(1, 1, H, W) to B(1, C, H, W)
    
    x = raw__expand_as__(input, *args)
    layer_name = log.add_layer(name="expand_as", with_num=True)
    log.add_blobs([x], name='expand_as_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Convolution',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])

    def constant_weight(shape):
        weights = np.ones(shape, dtype='float32')
        return weights

    channels = args[0].size(1)
    weight = constant_weight([channels, 1, 1, 1])
    layer.conv_param(channels, kernel_size = 1, bias_term=False, weight_filler_type='xavier')
    layer.add_data(weight)
    log.cnet.add_layer(layer)
    return x

## 這里修改
def _flatten(raw , input, *args):
    x = raw(input, *args)
    if not NET_INITTED:
        return x
    layer_name=log.add_layer(name='flatten')
    top_blobs=log.add_blobs([x],name='flatten_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Reshape',
                                bottom=[log.blobs(input)],top=top_blobs)
    start_dim = args[0]
    end_dim = len(x.shape)
    if len(args) > 1:
        end_dim = args[1]
    dims = []
    for i in range(start_dim):
        dims.append(x.shape[i])
    cum = 1
    for i in range(start_dim, end_dim):
        cum = cum * x.shape[i]
    dims.append(cum)
    if end_dim != len(x.shape):
        cum = 1
        for i in range(end_dim, len(x.shape)):
            cum = cum * x.shape[i]
        dims.append(cum)
    layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
    log.cnet.add_layer(layer)
    return x


class Rp(object):
    def __init__(self,raw,replace,**kwargs):
        # replace the raw function to replace function
        self.obj=replace
        self.raw=raw

    def __call__(self,*args,**kwargs):
        if not NET_INITTED:
            return self.raw(*args,**kwargs)
        for stack in traceback.walk_stack(None):
            if 'self' in stack[0].f_locals:
                layer=stack[0].f_locals['self']
                if layer in layer_names:
                    log.pytorch_layer_name=layer_names[layer]
                    print(layer_names[layer])
                    break
        out=self.obj(self.raw,*args,**kwargs)
        # if isinstance(out,Variable):
        #     out=[out]
        return out


F.conv2d=Rp(F.conv2d,_conv2d)
F.linear=Rp(F.linear,_linear)
F.relu=Rp(F.relu,_relu)
F.leaky_relu=Rp(F.leaky_relu,_leaky_relu)
F.max_pool2d=Rp(F.max_pool2d,_max_pool2d)
F.avg_pool2d=Rp(F.avg_pool2d,_avg_pool2d)
F.adaptive_avg_pool2d = Rp(F.adaptive_avg_pool2d,_adaptive_avg_pool2d)
F.dropout=Rp(F.dropout,_dropout)
F.threshold=Rp(F.threshold,_threshold)
F.prelu=Rp(F.prelu,_prelu)
F.batch_norm=Rp(F.batch_norm,_batch_norm)
F.instance_norm=Rp(F.instance_norm,_instance_norm)
F.softmax=Rp(F.softmax,_softmax)
F.conv_transpose2d=Rp(F.conv_transpose2d,_conv_transpose2d)
F.interpolate = Rp(F.interpolate,_interpolate)
F.sigmoid = Rp(F.sigmoid,_sigmoid)
F.tanh = Rp(F.tanh,_tanh)
F.tanh = Rp(F.tanh,_tanh)
F.hardtanh = Rp(F.hardtanh,_hardtanh)
# F.l2norm = Rp(F.l2norm,_l2Norm)

torch.split=Rp(torch.split,_split)
torch.max=Rp(torch.max,_max)
torch.cat=Rp(torch.cat,_cat)
torch.div=Rp(torch.div,_div)
## 這里也需要修改
torch.flatten = Rp(torch.flatten,_flatten)

# TODO: other types of the view function
try:
    raw_view=Variable.view
    Variable.view=_view
    raw_mean=Variable.mean
    Variable.mean=_mean
    raw__add__=Variable.__add__
    Variable.__add__=_add
    raw__iadd__=Variable.__iadd__
    Variable.__iadd__=_iadd
    raw__sub__=Variable.__sub__
    Variable.__sub__=_sub
    raw__isub__=Variable.__isub__
    Variable.__isub__=_isub
    raw__mul__ = Variable.__mul__
    Variable.__mul__ = _mul
    raw__imul__ = Variable.__imul__
    Variable.__imul__ = _imul
except:
    # for new version 0.4.0 and later version
    for t in [torch.Tensor]:
        raw_view = t.view
        t.view = _view
        raw_mean = t.mean
        t.mean = _mean
        raw__add__ = t.__add__
        t.__add__ = _add
        raw__iadd__ = t.__iadd__
        t.__iadd__ = _iadd
        raw__sub__ = t.__sub__
        t.__sub__ = _sub
        raw__isub__ = t.__isub__
        t.__isub__ = _isub
        raw__mul__ = t.__mul__
        t.__mul__=_mul
        raw__imul__ = t.__imul__
        t.__imul__ = _imul
        raw__permute__ = t.permute
        t.permute = _permute
        raw__contiguous__ = t.contiguous
        t.contiguous = _contiguous
        raw__pow__ = t.pow
        t.pow = _pow
        raw__sum__ = t.sum
        t.sum = _sum
        raw__sqrt__ = t.sqrt
        t.sqrt = _sqrt
        raw__unsqueeze__ = t.unsqueeze
        t.unsqueeze = _unsqueeze
        raw__expand_as__ = t.expand_as
        t.expand_as = _expand_as


def trans_net(net, input_var, name='TransferedPytorchModel'):
    print('Starting Transform, This will take a while')
    log.init([input_var])
    log.cnet.net.name=name
    log.cnet.net.input.extend([log.blobs(input_var)])
    log.cnet.net.input_dim.extend(input_var.size())
    global NET_INITTED
    NET_INITTED=True
    for name,layer in net.named_modules():
        layer_names[layer]=name
    print("torch ops name:", layer_names)
    out = net.forward(input_var)
    print('Transform Completed')

def save_prototxt(save_name):
    log.cnet.remove_layer_by_type("NeedRemove")
    log.cnet.save_prototxt(save_name)

def save_caffemodel(save_name):
    log.cnet.save(save_name)

到了這里,關(guān)于pytorch模型轉(zhuǎn)caffe模型的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!

本文來自互聯(lián)網(wǎng)用戶投稿,該文觀點(diǎn)僅代表作者本人,不代表本站立場(chǎng)。本站僅提供信息存儲(chǔ)空間服務(wù),不擁有所有權(quán),不承擔(dān)相關(guān)法律責(zé)任。如若轉(zhuǎn)載,請(qǐng)注明出處: 如若內(nèi)容造成侵權(quán)/違法違規(guī)/事實(shí)不符,請(qǐng)點(diǎn)擊違法舉報(bào)進(jìn)行投訴反饋,一經(jīng)查實(shí),立即刪除!

領(lǐng)支付寶紅包贊助服務(wù)器費(fèi)用

相關(guān)文章

覺得文章有用就打賞一下文章作者

支付寶掃一掃打賞

博客贊助

微信掃一掃打賞

請(qǐng)作者喝杯咖啡吧~博客贊助

支付寶掃一掃領(lǐng)取紅包,優(yōu)惠每天領(lǐng)

二維碼1

領(lǐng)取紅包

二維碼2

領(lǐng)紅包