[文档]classMobileNetV2(nn.Module):def__init__(self,num_classes=1000,width_mult=1.0,inverted_residual_setting=None,round_nearest=8):""" MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding """super(MobileNetV2,self).__init__()block=InvertedResidualinput_channel=32last_channel=1280ifinverted_residual_settingisNone:inverted_residual_setting=[# t, c, n, s[1,16,1,1],[6,24,2,2],[6,32,3,2],[6,64,4,2],[6,96,3,1],[6,160,3,2],[6,320,1,1],]# only check the first element, assuming user knows t,c,n,s are requirediflen(inverted_residual_setting)==0orlen(inverted_residual_setting[0])!=4:raiseValueError("inverted_residual_setting should be non-empty ""or a 4-element list, got {}".format(inverted_residual_setting))# building first layerinput_channel=_make_divisible(input_channel*width_mult,round_nearest)self.last_channel=_make_divisible(last_channel*max(1.0,width_mult),round_nearest)features=[ConvBNReLU(3,input_channel,stride=2)]# building inverted residual blocksfort,c,n,sininverted_residual_setting:output_channel=_make_divisible(c*width_mult,round_nearest)foriinrange(n):stride=sifi==0else1features.append(block(input_channel,output_channel,stride,expand_ratio=t))input_channel=output_channel# building last several layersfeatures.append(ConvBNReLU(input_channel,self.last_channel,kernel_size=1))# make it nn.Sequentialself.features=nn.Sequential(*features)self.quant=QuantStub()self.dequant=DeQuantStub()# building classifierself.classifier=nn.Sequential(nn.Dropout(0.2),nn.Linear(self.last_channel,num_classes),)# weight initializationforminself.modules():ifisinstance(m,nn.Conv2d):nn.init.kaiming_normal_(m.weight,mode='fan_out')ifm.biasisnotNone:nn.init.zeros_(m.bias)elifisinstance(m,nn.BatchNorm2d):nn.init.ones_(m.weight)nn.init.zeros_(m.bias)elifisinstance(m,nn.Linear):nn.init.normal_(m.weight,0,0.01)nn.init.zeros_(m.bias)defforward(self,x):x=self.quant(x)x=self.features(x)x=x.mean([2,3])x=self.classifier(x)x=self.dequant(x)returnx# Fuse Conv+BN and Conv+BN+Relu modules prior to quantization# This operation does not change the numericsdeffuse_model(self):forminself.modules():iftype(m)==ConvBNReLU:torch.quantization.fuse_modules(m,['0','1','2'],inplace=True)iftype(m)==InvertedResidual:foridxinrange(len(m.conv)):iftype(m.conv[idx])==nn.Conv2d:torch.quantization.fuse_modules(m.conv,[str(idx),str(idx+1)],inplace=True)