deeplabv3+系列之ResNet骨干网络

更新时间:2023-07-27 11:36:56 阅读: 评论:0

deeplabv3+系列之ResNet⾻⼲⽹络
deeplabv3+系列之ResNet⾻⼲⽹络代码实现(包括了
ResNet18,ResNet34,ResNet50,ResNet101,ResNet152)
基于paddlepaddle2.0版本的搭建。最近飞桨2.0版本出来啦!也挺好⽤的,所以就参考⼀些其他版本的代码,⽤paddlepaddle2.0版本重新写⼀下deeplabv3+ResNet⽹络,这篇⽂章为⾻⼲⽹络部分。deeplabv3部分可以看下⼀篇⽂章:
原论⽂地址:
⼀.⽹络结构图:
⼆.可直接运⾏的代码:
import numpy as np
import paddle
as nn
functional as F
from paddle.fluid.dygraph.ba import to_variable
def SyncBatchNorm(*args, **kwargs):
"""⼀个是cpu情况下的归⼀化,⼀个是GPU情况下的归⼀化"""
_device() == 'cpu':
return nn.BatchNorm2D(*args, **kwargs)
el:
return nn.SyncBatchNorm(*args, **kwargs)
class ConvBNLayer(nn.Layer):
def __init__(lf,
in_channels,
out_channels,
kernel_size,
stride=1,
groups=1,
act=None,
dilation=1,
padding=None,
name=None):
super(ConvBNLayer, lf).__init__(name)
if padding is None:
padding = (kernel_size-1)//2
el:
padding=padding
out_channels=out_channels,
standup是什么意思
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
dilation=dilation,
bias_attr=Fal)prent
lf.bn = SyncBatchNorm(out_channels)
lf.act = act
lf._act_op = nn.ReLU()
def forward(lf, inputs):
y = lf.conv(inputs)
y = lf.bn(y)
if lf.act is 'relu':
y = lf._act_op(y)
el:
y = y
return y
class BasicBlock(nn.Layer):
expansion = 1  # expand ratio for last conv output channel in each block
def __init__(lf,
in_channels,
out_channels,
dilation=1,
stride=1,
padding=None,
shortcut=True,
name=None):
super(BasicBlock, lf).__init__(name)
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
name=name)
out_channels=out_channels,
kernel_size=3,
act=None,
name=name)
if not shortcut:
lf.short = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,solo什么意思
stride=stride,
act=None,
name=name)
lf.shortcut = shortcut
def forward(lf, inputs):
conv0 = lf.conv0(inputs)
conv1 = lf.conv1(conv0)
if lf.shortcut:
short = inputs
el:
short = lf.short(inputs)
#该OP是逐元素相加算⼦,输⼊ x 与输⼊ y 逐元素相加,并将各个位置的输出元素保存到返回结果中        y = paddle.add(x=short, y=conv1)
y = F.relu(y)
return y
召唤的拼音class BottleneckBlock(nn.Layer):
expansion = 4
def __init__(lf,
in_channels,
out_channels,
stride=1,
shortcut=True,
dilation=1,
padding=None,
name=None):
super(BottleneckBlock, lf).__init__(name)
out_channels=out_channels,
kernel_size=1,
act='relu')
#                                name=name)
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
act='relu',
dilation=dilation)
#                                name=name)
out_channels=out_channels * 4,
kernel_size=1,
stride=1)
#                                name=name)
if not shortcut:
lf.short = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
stride=stride)
animosity
#                                    name=name)
lf.shortcut = shortcut
lf.num_channel_out = out_channels * 4催眠培训
def forward(lf, inputs):
conv0 = lf.conv0(inputs)
#print('conv0 shape=',conv0.shape)
conv1 = lf.conv1(conv0)
#print('conv1 shape=', conv1.shape)
conv2 = lf.conv2(conv1)
#print('conv2 shape=', conv2.shape)
if lf.shortcut:
short = inputs
el:
short = lf.short(inputs)
#print('short shape=', short.shape)
#该OP是逐元素相加算⼦,输⼊ x 与输⼊ y 逐元素相加,并将各个位置的输出元素保存到返回结果中        y = paddle.add(x=short, y=conv2)
y = F.relu(y)
火星时代
return y
class ResNet(nn.Layer):
def __init__(lf, layers=50, num_class=1000, multi_grid=[1, 2, 4], duplicate_blocks=Fal):
super(ResNet, lf).__init__()
lf.layers = layers
supported_layers = [18, 34, 50, 101, 152]
asrt layers in supported_layers
mgr = [1, 2, 4] # multi grid rate for duplicated blocks
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34:
depth = [3, 4, 6, 3]
elif layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
if layers < 50:
in_channels = [64, 64, 128, 256, 512]
el:
in_channels = [64, 256, 512, 1024, 2048]        lf.out_channels = [64, 128, 256, 512]
kernel_size=7,
stride=2,
act='relu')
lf.pool2d_max = nn.MaxPool2D(
kernel_size=3,
stride=2,
padding=1)
if layers < 50:
block = BasicBlock
l1_shortcut=True
el:
block = BottleneckBlock
l1_shortcut=Fal
lf.layer1 = nn.Sequential(
*lf.make_layer(block,
in_channels[0],
lf.out_channels[0],
depth[0],
stride=1,
shortcut=l1_shortcut,
name='layer1'))
lf.layer2 = nn.Sequential(
*lf.make_layer(block,
in_channels[1],
lf.out_channels[1],
depth[1],
stride=2,
name='layer2'))
lf.layer3 = nn.Sequential(
*lf.make_layer(block,
in_channels[2],
lf.out_channels[2],
depth[2],
stride=1,
dilation=2,
name='layer3'))
# add multi grid [1, 2, 4]
lf.layer4 = nn.Sequential(
*lf.make_layer(block,
in_channels[3],
lf.out_channels[3],
depth[3],
stride=1,
name='layer4',
islamistdilation=multi_grid))
lf.layer5 = nn.Sequential(
*lf.make_layer(block,
in_channels[4],
lf.out_channels[3],
depth[3],
stride=1,
name='layer5',
dilation=[x*mgr[0] for x in multi_grid]))
lf.layer6 = nn.Sequential(
*lf.make_layer(block,
in_channels[4],
lf.out_channels[3],
depth[3],
stride=1,
name='layer6',
dilation=[x*mgr[1] for x in multi_grid]))
lf.layer7 = nn.Sequential(
*lf.make_layer(block,
in_channels[4],
lf.out_channels[3],
depth[3],
stride=1,
name='layer7',
dilation=[x*mgr[2] for x in multi_grid]))
lf.last_pool = nn.AdaptiveAvgPool2D(output_size=(1,1))#平均⾃适应池化
lf.fc = nn.Linear(in_features=lf.out_channels[-1] * pansion,
out_features =num_class)
lf.out_dim = lf.out_channels[-1] * pansion
def forward(lf, inputs):
x = lf.conv(inputs)
x = lf.pool2d_max(x)
#print(x.shape)
x = lf.layer1(x)
#print(x.shape)
x = lf.layer2(x)
#print(x.shape)
x = lf.layer3(x)班门弄斧歇后语
#print(x.shape)
x = lf.layer4(x)
#print(x.shape)
x = lf.last_pool(x)
x = shape(x, shape=[-1, lf.out_dim])
x = lf.fc(x)
return x
def make_layer(lf, block, in_channels, out_channels, depth, stride, dilation=1, shortcut=Fal, name=None):        layers = []
if isinstance(dilation, int):
dilation = [dilation] * depth
elif isinstance(dilation, (list, tuple)):
asrt len(dilation) == 3, "Wrong dilation rate for multi-grid | len should be 3"
asrt depth ==3, "multi-grid can only applied to blocks with depth 3"
padding = []
信仰的英文for di in dilation:
if di>1:
padding.append(di)

本文发布于:2023-07-27 11:36:56,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/90/190273.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:版本   元素   保存
相关文章
留言与评论(共有 0 条评论)
   
验证码:
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图