图像分类的训练基本过程——以MobileNet为例

更新时间:2023-06-09 11:26:31 阅读: 评论:0

图像分类的训练基本过程——以MobileNet为例经验总结⾃b站up主——
1.判断使⽤gpu还是cpu进⾏处理
device = torch.device("cuda:0" if torch.cuda.is_available() el "cpu")
2.定义数据预处理的⽅法
1data_transform = {
2        "train": transforms.Compo([transforms.RandomResizedCrop(224),
3                                    transforms.RandomHorizontalFlip(),
4                                    transforms.ToTensor(),
5                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
6        "val": transforms.Compo([transforms.Resize(256),
7                                  transforms.CenterCrop(224),
8                                  transforms.ToTensor(),
9                                  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
3.定义数据集⽂件⽬录
1data_root = os.path.abspath(os.path.wd(), "../.."))
2image_path = os.path.join(data_root, "data_t", "flower_data")
3train_datat = datats.ImageFolder(root = os.path.join(image_path, "train"),
4                                    transform = data_transform["train"])
4.获取数据集class_to_idx后将键和键值进⾏调换,调换后再写⼊⽂件
1train_num = len(train_datat)
2
3    # {'daisy':0, 'dandelion':1, 'ros':2, 'sunflower':3, 'tulips':4}
4    flower_list = train_datat.class_to_idx
5    cla_dict = dict((val, key) for key, val in flower_list.items())
6    # write dict into json file
7    json_str = json.dumps(cla_dict, indent=4)
8    with open('class_indices.json', 'w') as json_file:
9        json_file.write(json_str)
5.⽣成迭代器
1 train_loader = torch.utils.data.DataLoader(train_datat,
2                                              batch_size=batch_size, shuffle=True,
3                                              num_workers=0)
4
5    validate_datat = datats.ImageFolder(root=os.path.join(image_path, "val"),
6                                            transform=data_transform["val"])
7    val_num = len(validate_datat)
8    validate_loader = torch.utils.data.DataLoader(validate_datat,
9                                                  batch_size=batch_size, shuffle=Fal,
10                                                  num_workers=0)
6.实例化神经⽹络
net = mobilenet_v3_large(num_class=5)
7.读取预训练权重⽂件并且删除分类的权重
1# load pretrain weights
2    # download url: download.pytorch/models/mobilenet_v2-b0353104.pth
3    model_weight_path = "./mobilenet_v3_large.pth"
4    asrt ists(model_weight_path), "file {} do not exist.".format(model_weight_path)
5    pre_weights = torch.load(model_weight_path, map_location=device)
6
7    # delete classifier weights
8    pre_dict = {k: v for k, v in pre_weights.items() if net.state_dict()[k].numel() == v.numel()}
9    missing_keys, unexpected_keys = net.load_state_dict(pre_dict, strict=Fal)
8.冻结部分或者全部权重
1# freeze features weights
2    for param in net.features.parameters():
3        quires_grad = Fal
关于冬天的诗9.定义损失函数和优化器
1    # define loss function
2    loss_function = nn.CrossEntropyLoss()
3
4    # construct an optimizer
5    params = [p for p in net.parameters() quires_grad]
6    optimizer = optim.Adam(params, lr=0.0001)
10.开始训练
1best_acc = 0.0
2    # 保存训练权重路径
3    save_path = './MobileNetV3.pth'
4    train_steps = len(train_loader)
5    for epoch in range(epochs):
6        # train
7        ain()
8        running_loss = 0.0
9        train_bar = tqdm(train_loader)
10        for step, data in enumerate(train_bar):
11            images, labels = data
12            _grad()
13            logits = (device))
14            loss = loss_function(logits, (device))
15            loss.backward()
16            optimizer.step()
17
18            # print statistics
19            running_loss += loss.item()
20
21            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
22                                                                    epochs,
23                                                                    loss)
11.验证并且保存训练权重⽂件
1        # validate
2        net.eval()
3        acc = 0.0  # accumulate accurate number / epoch
4        _grad():
5            val_bar = tqdm(validate_loader)
6            for val_data in val_bar:
7                val_images, val_labels = val_data
8                outputs = net((device))
9                # loss = loss_function(outputs, test_labels)
10                predict_y = torch.max(outputs, dim=1)[1]
11                acc += torch.eq(predict_y, (device)).sum().item()
12
13                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1,
王者荣耀嬴政
14                                                          epochs)
15        val_accurate = acc / val_num
16        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
17              (epoch + 1, running_loss / train_steps, val_accurate))
18
19        if val_accurate > best_acc:
20            best_acc = val_accurate
21            torch.save(net.state_dict(), save_path)
另附MobileNet_v3⽹络代码
1from typing import Callable, List, Optional
2
3import torch
4from torch import nn, Tensor
import functional as F
6from functools import partial
7
8
9def _make_divisible(ch, divisor=8, min_ch=None):
10    """
11    This function is taken from the original tf repo.
12    It ensures that all layers have a channel number that is divisible by 8
13    It can be en here:
14    /tensorflow/models/blob/master/rearch/slim/nets/mobilenet/mobilenet.py
15    """
16    if min_ch is None:
17        min_ch = divisor
18    new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
19    # Make sure that round down does not go down by more than 10%.
20    if new_ch < 0.9 * ch:
21        new_ch += divisor
22    return new_ch
23
24
25class ConvBNActivation(nn.Sequential):
26    def __init__(lf,
27                in_planes: int,
28                out_planes: int,
电脑卡怎么办29                kernel_size: int = 3,
30                stride: int = 1,
31                groups: int = 1,
32                norm_layer: Optional[Callable[..., nn.Module]] = None,
33                activation_layer: Optional[Callable[..., nn.Module]] = None):
34        padding = (kernel_size - 1) // 2
35        if norm_layer is None:
36            norm_layer = nn.BatchNorm2d
37        if activation_layer is None:
38            activation_layer = nn.ReLU6
39        super(ConvBNActivation, lf).__init__(nn.Conv2d(in_channels=in_planes,
40                                                        out_channels=out_planes,
41                                                        kernel_size=kernel_size,
42                                                        stride=stride,
43                                                        padding=padding,家乡的英文
44                                                        groups=groups,
45                                                        bias=Fal),
46                                              norm_layer(out_planes),
47                                              activation_layer(inplace=True))
48
49
50# 注意⼒机制
51class SqueezeExcitation(nn.Module):
52    def __init__(lf, input_c: int, squeeze_factor: int = 4):
53        super(SqueezeExcitation, lf).__init__()
54        squeeze_c = _make_divisible(input_c // squeeze_factor, 8)
55        lf.fc1 = nn.Conv2d(input_c, squeeze_c, 1)
56        lf.fc2 = nn.Conv2d(squeeze_c, input_c, 1)
57
58    def forward(lf, x: Tensor) -> Tensor:
59        scale = F.adaptive_avg_pool2d(x, output_size=(1, 1))
60        scale = lf.fc1(scale)
61        scale = F.relu(scale, inplace=True)
62        scale = lf.fc2(scale)
63        scale = F.hardsigmoid(scale, inplace=True)
64        return scale * x
65
66
67class InvertedResidualConfig:
68    def __init__(lf,
69                input_c: int,
70                kernel: int,
71                expanded_c: int,
手束真知子72                out_c: int,
73                u_: bool,
74                activation: str,
75                stride: int,
76                width_multi: float):
77        lf.input_c = lf.adjust_channels(input_c, width_multi)
78        lf.kernel = kernel
79        lf.expanded_c = lf.adjust_channels(expanded_c, width_multi)
80        lf.out_c = lf.adjust_channels(out_c, width_multi)
81        lf.u_ = u_
82        lf.u_hs = activation == "HS"  # whether using h-swish activation
83        lf.stride = stride
84
85    @staticmethod
86    def adjust_channels(channels: int, width_multi: float):
87        return _make_divisible(channels * width_multi, 8)
88
89
90class InvertedResidual(nn.Module):
91    def __init__(lf,
92                cnf: InvertedResidualConfig,
93                norm_layer: Callable[..., nn.Module]):
94        super(InvertedResidual, lf).__init__()
95
96        if cnf.stride not in [1, 2]:
97            rai ValueError("illegal stride value.")
98
99        lf.u_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c)
100
101        layers: List[nn.Module] = []
102        activation_layer = nn.Hardswish if cnf.u_hs el nn.ReLU
103
104        # expand
105        panded_c != cnf.input_c:
106            layers.append(ConvBNActivation(cnf.input_c,
107                                          panded_c,
108                                          kernel_size=1,
109                                          norm_layer=norm_layer,
110                                          activation_layer=activation_layer))
111
112        # depthwi
113        layers.append(panded_c,
114                                      panded_c,
115                                      kernel_size=cnf.kernel,
116                                      stride=cnf.stride,
117                                      panded_c,
118                                      norm_layer=norm_layer,
119                                      activation_layer=activation_layer))
120
121        if cnf.u_:
122            layers.append(panded_c))
123
124        # project
125        layers.append(panded_c,
126                                      cnf.out_c,
127                                      kernel_size=1,
128                                      norm_layer=norm_layer,
129                                      activation_layer=nn.Identity))
130
131        lf.block = nn.Sequential(*layers)
132        lf.out_channels = cnf.out_c
抽油烟机拆卸图解
133        lf.is_strided = cnf.stride > 1
134
135    def forward(lf, x: Tensor) -> Tensor:
136        result = lf.block(x)
137        if lf.u_res_connect:
138            result += x
139
140        return result
141
142
143class MobileNetV3(nn.Module):
144    def __init__(lf,
145                inverted_residual_tting: List[InvertedResidualConfig],
146                last_channel: int,
147                num_class: int = 1000,
148                block: Optional[Callable[..., nn.Module]] = None,
149                norm_layer: Optional[Callable[..., nn.Module]] = None):
150        super(MobileNetV3, lf).__init__()
151
152        if not inverted_residual_tting:
好看的头像女生可爱153            rai ValueError("The inverted_residual_tting should not be empty.")
154        elif not (isinstance(inverted_residual_tting, List) and
155                  all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_tting])):
156            rai TypeError("The inverted_residual_tting should be List[InvertedResidualConfig]") 157
158        if block is None:
159            block = InvertedResidual
160
161        if norm_layer is None:
猪脚怎么做162            norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
163
164        layers: List[nn.Module] = []
165
166        # building first layer
167        firstconv_output_c = inverted_residual_tting[0].input_c
168        layers.append(ConvBNActivation(3,

本文发布于:2023-06-09 11:26:31,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/89/1030539.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:权重   训练   调换   分类   保存   键值
相关文章
留言与评论(共有 0 条评论)
   
验证码:
推荐文章
排行榜
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图