日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

合并BN层到卷积层的原理及实验

發布時間:2024/9/21 编程问答 36 豆豆
生活随笔 收集整理的這篇文章主要介紹了 合并BN层到卷积层的原理及实验 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

1.? 為什么要合并BN層

在訓練深度網絡模型時,BN(Batch Normalization)層能夠加速網絡收斂,并且能夠控制過擬合,一般放在卷積層之后。BN 層將數據歸一化后,能夠有效解決梯度消失與梯度爆炸問題。雖然 BN 層在訓練時起到了積極作用,然而,在網絡前向推斷時多了一些層的運算,影響了模型的性能,且占用了更多的內存或者顯存空間。目前,很多先進的網絡模型(ResNet,MobileNet,Xception,ShuffleNet 等)都使用了BN技術,因此,我們有必要將 BN 層的參數合并到卷積層,來提升模型前向推斷的速度。

2.? BN層與卷積層合并的數學原理

卷積層中

卷積權重: W,卷積偏置:B

卷積層運算:

BN 層中
均值:?,方差:,縮放因子:,偏移:,?一個較小數(防止分母為0):

?? ? ? ? ? ?

? ? ? ??

BN層和卷積層合并后:

3.? 實驗結果

機器:顯卡 GTX 1080Ti,i7 CPU

本實驗對比了Resnet50 模型合并BN層前后的性能,分類精度保持不變,速度顯著提升。

模型CPU前向時間GPU前向時間
Resnet50(合并前)176.17ms11.03ms
Resnet50(合并后)161.69ms7.3ms
提升10%51%

?4.? 合并的python腳本

該腳本需要caffe的python接口

#!/usr/bin/env python# -*- coding: UTF-8 -*-import numpy as npimport sysimport osimport os.path as ospimport google.protobuf as pbimport google.protobuf.text_formatfrom argparse import ArgumentParserimport caffecaffe.set_mode_cpu()def load_and_fill_biases(src_model, src_weights, dst_model, dst_weights):with open(src_model) as f:model = caffe.proto.caffe_pb2.NetParameter()pb.text_format.Merge(f.read(), model)for i, layer in enumerate(model.layer):if layer.type == 'Convolution': # or layer.type == 'Scale':# Add bias layer if neededif layer.convolution_param.bias_term == False:layer.convolution_param.bias_term = Truelayer.convolution_param.bias_filler.type = 'constant'layer.convolution_param.bias_filler.value = 0.0with open(dst_model, 'w') as f:f.write(pb.text_format.MessageToString(model))caffe.set_mode_cpu()net_src = caffe.Net(src_model, src_weights, caffe.TEST)net_dst = caffe.Net(dst_model, caffe.TEST)for key in net_src.params.keys():for i in range(len(net_src.params[key])):net_dst.params[key][i].data[:] = net_src.params[key][i].data[:]if dst_weights is not None:# Store paramspassreturn net_dstdef merge_conv_and_bn(net, i_conv, i_bn, i_scale):# This is based on Kyeheyon's workassert(i_conv != None)assert(i_bn != None)def copy_double(data):return np.array(data, copy=True, dtype=np.double)key_conv = net._layer_names[i_conv]key_bn = net._layer_names[i_bn]key_scale = net._layer_names[i_scale] if i_scale else None# Copybn_mean = copy_double(net.params[key_bn][0].data)bn_variance = copy_double(net.params[key_bn][1].data)num_bn_samples = copy_double(net.params[key_bn][2].data)# and Invalidate the BN layernet.params[key_bn][0].data[:] = 0net.params[key_bn][1].data[:] = 1net.params[key_bn][2].data[:] = 1if num_bn_samples[0] == 0:num_bn_samples[0] = 1if net.params.has_key(key_scale):print 'Combine {:s} + {:s} + {:s}'.format(key_conv, key_bn, key_scale)scale_weight = copy_double(net.params[key_scale][0].data)scale_bias = copy_double(net.params[key_scale][1].data)net.params[key_scale][0].data[:] = 1net.params[key_scale][1].data[:] = 0else:print 'Combine {:s} + {:s}'.format(key_conv, key_bn)scale_weight = 1scale_bias = 0weight = copy_double(net.params[key_conv][0].data)bias = copy_double(net.params[key_conv][1].data)alpha = scale_weight / np.sqrt(bn_variance / num_bn_samples[0] + 1e-5)net.params[key_conv][1].data[:] = bias * alpha + (scale_bias - (bn_mean / num_bn_samples[0]) * alpha)for i in range(len(alpha)):net.params[key_conv][0].data[i] = weight[i] * alpha[i]def merge_batchnorms_in_net(net):# for each BNfor i, layer in enumerate(net.layers):if layer.type != 'BatchNorm':continuel_name = net._layer_names[i]l_bottom = net.bottom_names[l_name]assert(len(l_bottom) == 1)l_bottom = l_bottom[0]l_top = net.top_names[l_name]assert(len(l_top) == 1)l_top = l_top[0]can_be_absorbed = True# Search all (bottom) layersfor j in xrange(i - 1, -1, -1):tops_of_j = net.top_names[net._layer_names[j]]if l_bottom in tops_of_j:if net.layers[j].type not in ['Convolution', 'InnerProduct']:can_be_absorbed = Falseelse:# There must be only one layerconv_ind = jbreakif not can_be_absorbed:continue# find the following Scalescale_ind = Nonefor j in xrange(i + 1, len(net.layers)):bottoms_of_j = net.bottom_names[net._layer_names[j]]if l_top in bottoms_of_j:if scale_ind:# Followed by two or more layersscale_ind = Nonebreakif net.layers[j].type in ['Scale']:scale_ind = jtop_of_j = net.top_names[net._layer_names[j]][0]if top_of_j == bottoms_of_j[0]:# On-the-fly => Can be mergedbreakelse:# Followed by a layer which is not 'Scale'scale_ind = Nonebreakmerge_conv_and_bn(net, conv_ind, i, scale_ind)return netdef process_model(net, src_model, dst_model, func_loop, func_finally):with open(src_model) as f:model = caffe.proto.caffe_pb2.NetParameter()pb.text_format.Merge(f.read(), model)for i, layer in enumerate(model.layer):map(lambda x: x(layer, net, model, i), func_loop)map(lambda x: x(net, model), func_finally)with open(dst_model, 'w') as f:f.write(pb.text_format.MessageToString(model))# Functions to remove (redundant) BN and Scale layersto_delete_empty = []def pick_empty_layers(layer, net, model, i):if layer.type not in ['BatchNorm', 'Scale']:returnbottom = layer.bottom[0]top = layer.top[0]if (bottom != top):# Not supperted yetreturnif layer.type == 'BatchNorm':zero_mean = np.all(net.params[layer.name][0].data == 0)one_var = np.all(net.params[layer.name][1].data == 1)if zero_mean and one_var:print 'Delete layer: {}'.format(layer.name)to_delete_empty.append(layer)if layer.type == 'Scale':no_scaling = np.all(net.params[layer.name][0].data == 1)zero_bias = np.all(net.params[layer.name][1].data == 0)if no_scaling and zero_bias:print 'Delete layer: {}'.format(layer.name)to_delete_empty.append(layer)def remove_empty_layers(net, model):map(model.layer.remove, to_delete_empty)# A function to add 'engine: CAFFE' param into 1x1 convolutionsdef set_engine_caffe(layer, net, model, i):if layer.type == 'Convolution':if layer.convolution_param.kernel_size == 1\or (layer.convolution_param.kernel_h == layer.convolution_param.kernel_w == 1):layer.convolution_param.engine = dict(layer.convolution_param.Engine.items())['CAFFE']def main():# Set default output file namesif args.output_model is None:file_name = osp.splitext(args.model)[0]args.output_model = file_name + '_inference.prototxt'if args.output_weights is None:file_name = osp.splitext(args.weights)[0]args.output_weights = file_name + '_inference.caffemodel'net = load_and_fill_biases(args.model, args.weights, args.model + '.temp.pt', None)net = merge_batchnorms_in_net(net)process_model(net, args.model + '.temp.pt', args.output_model,[pick_empty_layers, set_engine_caffe],[remove_empty_layers])# Store paramsnet.save(args.output_weights)if __name__ == '__main__':parser = ArgumentParser(description="Generate Batch Normalized model for inference")parser.add_argument('--model', default="MobileNetSSD_deploy.prototxt", help="The net definition prototxt")parser.add_argument('--weights', default="MobileNetSSD_deploy.caffemodel", help="The weights caffemodel")parser.add_argument('--output_model')parser.add_argument('--output_weights')args = parser.parse_args()main()

腳本下載地址:

https://download.csdn.net/download/kangdi7547/10578152
--------------------- ?
作者:小麥草 ?
來源:CSDN ?
原文:https://blog.csdn.net/kangdi7547/article/details/81348254 ?
版權聲明:本文為博主原創文章,轉載請附上博文鏈接!

總結

以上是生活随笔為你收集整理的合并BN层到卷积层的原理及实验的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。