PointPillar所需模块Pcdet-PointPillars原始模型结构
⽹络部分包含4部分:
(1)PillarVFE pcdet/models/backbones_3d/vfe/pillar_vfe.py # 3D卷积
(2)PointPillarScatter pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py # 2D卷积(3)BaBEVBackbone pcdet/models/backbones_2d/ba_bev_backbone.py # 2D卷积
(4)AnchorHeadSingle pcdet/models/den_heads/anchor_head_single.py # 预测类别、box 定位:pcdet/models/detectors/pointpillar.py
for cur_module dule_list: # 遍历所需模块
print('遍历所需模块:','\n',str(cur_module))
遍历所需模块:
PillarVFE
(
(pfn_layers): ModuleList(
(0): PFNLayer(
(linear): Linear(in_features=10, out_features=64, bias=Fal)
(norm): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True) )
)
)
遍历所需模块:
PointPillarScatter()
遍历所需模块:
for idx in range(num_levels): # 3
cur_layers =[
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=Fal
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
共产主义社会是什么样的社会
for k in range(layer_nums[idx]): # 3:0-2, 5:0-4, 5:0-4
d([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=Fal),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
如下:
党员的义务和权利BaBEVBackbone(
(blocks): ModuleList(
(0): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
(2): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(8): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
宝宝反复高烧(11): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
)
(1): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
(2): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(11): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
(13): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(14): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(15): ReLU()
(16): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(17): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(18): ReLU()
)
(2): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
(2): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(11): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
(13): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(14): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(15): ReLU()
(16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(17): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(18): ReLU()
)
)
(deblocks): ModuleList(
(0): Sequential(
(0): ConvTranspo2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
(1): Sequential(
(0): ConvTranspo2d(128, 128, kernel_size=(2, 2), stride=(2, 2), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
(2): Sequential(
(0): ConvTranspo2d(256, 128, kernel_size=(4, 4), stride=(4, 4), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
)
)
)
遍历所需模块:
AnchorHeadSingle(
显卡在哪里
(cls_loss_func): SigmoidFocalClassificationLoss()
(reg_loss_func): WeightedSmoothL1Loss()
(dir_loss_func): WeightedCrossEntropyLoss()
(conv_cls): Conv2d(384, 18, kernel_size=(1, 1), stride=(1, 1))
霍乱的传播途径(conv_box): Conv2d(384, 42, kernel_size=(1, 1), stride=(1, 1))
(conv_dir_cls): Conv2d(384, 12, kernel_size=(1, 1), stride=(1, 1))
)
pointpillar配置⽂件
PointPillar
(
(vfe): PillarVFE(
(pfn_layers): ModuleList(
(0): PFNLayer(
(linear): Linear(in_features=10, out_features=64, bias=Fal)
(norm): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True) )
)
)
(backbone_3d): None # 不使⽤3D卷积托马斯巴赫
(map_to_bev_module): PointPillarScatter()
(pfe): None # 不使⽤pfe pcdet/models/backbones_3d/pfe
(backbone_2d): BaBEVBackbone(
(blocks): ModuleList(
(0): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
(2): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(11): BatchNorm2d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
)
(1): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
裤子尺码表(2): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(11): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
(13): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(14): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(15): ReLU()
教师主要工作业绩(16): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(16): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(17): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(18): ReLU()
)
(2): Sequential(
(0): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
(1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), bias=Fal)
(2): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(5): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(6): ReLU()
(7): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(8): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(9): ReLU()
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(11): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(12): ReLU()
(13): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(14): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(15): ReLU()
(16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=Fal)
(17): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(18): ReLU()
)
)
(deblocks): ModuleList(
(0): Sequential(
(0): ConvTranspo2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
(1): Sequential(
(0): ConvTranspo2d(128, 128, kernel_size=(2, 2), stride=(2, 2), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
(2): Sequential(
(0): ConvTranspo2d(256, 128, kernel_size=(4, 4), stride=(4, 4), bias=Fal)
(1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
(2): ReLU()
)
)
)
(den_head): AnchorHeadSingle(
(cls_loss_func): SigmoidFocalClassificationLoss()
(reg_loss_func): WeightedSmoothL1Loss()
(dir_loss_func): WeightedCrossEntropyLoss()
(conv_cls): Conv2d(384, 18, kernel_size=(1, 1), stride=(1, 1))
(conv_box): Conv2d(384, 42, kernel_size=(1, 1), stride=(1, 1))
(conv_dir_cls): Conv2d(384, 12, kernel_size=(1, 1), stride=(1, 1))
)
(point_head): None # 不使⽤
(roi_head): None # 不使⽤
)