深度学习-ResNet18模型分类CIFAR10数据集详解
简介:
⾸先,ResNet是何凯明⼤神在2015年提出的,该模型提出后⽴刻引起轰动。因为在传统卷积神经⽹络中,当深度越来越深,就会出现梯度消失或者梯度爆炸等问题,从⽽使准确率降低。
结构理解:
残差块的短路部分被称作Shortcut Connection,单个残差块的期待输出为H(x),H(x)是由传统卷积层的输出F(x)加短路部分携带的初始数据x求得。
特征变换:
为了更直观的显⽰特征图的⼤⼩在执⾏过程中是怎样变换的,所以在这⾥详细列了⼀下。
代码实现:
resnet.py页⾯:
'''ResNet-18 Image classfication for cifar-10 with PyTorch
Author 'Sun-qian'.
'''
import torch
as nn
functional as F
"""每⼀个残差块"""
class ResidualBlock(nn.Module): #继承nn.Module
def __init__(lf, inchannel, outchannel, stride=1): #__init()中必须⾃⼰定义可学习的参数
super(ResidualBlock, lf).__init__() #调⽤nn.Module的构造函数
lf.left = nn.Sequential( #左边,指残差块中按顺序执⾏的普通卷积⽹络
nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=Fal),
nn.BatchNorm2d(outchannel), #最常⽤于卷积⽹络中(防⽌梯度消失或爆炸)
nn.ReLU(inplace=True), #implace=True是把输出直接覆盖到输⼊中,节省内存
nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=Fal),
nn.BatchNorm2d(outchannel)
)
lf.shortcut = nn.Sequential()
if stride != 1 or inchannel != outchannel: #只有步长为1并且输⼊通道和输出通道相等特征图⼤⼩才会⼀样,如果不⼀样,需要在合并之前进⾏统⼀ lf.shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=Fal),
nn.BatchNorm2d(outchannel)
)
def forward(lf, x): #实现前向传播过程
out = lf.left(x) #先执⾏普通卷积神经⽹络
out += lf.shortcut(x) #再加上原始x数据
out = F.relu(out)
return out
"""整个卷积⽹络,包含若⼲个残差块"""
class ResNet(nn.Module):
def __init__(lf, ResidualBlock, num_class=10):
super(ResNet, lf).__init__()
lf.inchannel = 64
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=Fal),
nn.BatchNorm2d(64), #设置参数为卷积的输出通道数
nn.ReLU(),
)
lf.layer1 = lf.make_layer(ResidualBlock, 64, 2, stride=1) #⼀个残差单元,每个单元中国包含2个残差块
lf.layer2 = lf.make_layer(ResidualBlock, 128, 2, stride=2)
lf.layer3 = lf.make_layer(ResidualBlock, 256, 2, stride=2)
lf.layer4 = lf.make_layer(ResidualBlock, 512, 2, stride=2)
lf.fc = nn.Linear(512, num_class) #全连接层(1,512)-->(1,10)
def make_layer(lf, block, channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1) #将该单元中所有残差块的步数做成⼀个⼀个向量,第⼀个残差块的步数由传⼊参数指定,后边num_blocks-1个残差块 layers = []
for stride in strides: #对每个残差块的步数进⾏迭代
layers.append(block(lf.inchannel, channels, stride)) #执⾏每⼀个残差块,定义向量存储每个残差块的输出值
lf.inchannel = channels
return nn.Sequential(*layers) #如果*加在了实参上,代表的是将向量拆成⼀个⼀个的元素
def forward(lf, x):
out = lf.conv1(x)
out = lf.layer1(out)
out = lf.layer2(out)
out = lf.layer3(out)
out = lf.layer4(out)
out = F.avg_pool2d(out, 4) #平均池化,4*4的局部特征取平均值,最后欸(512,1,1)
out = out.view(out.size(0), -1) #转换为(1,512)的格式
out = lf.fc(out)
return out
def ResNet18():
return ResNet(ResidualBlock)
CIFAR_Test.py页⾯:
import torch
as nn
import torch.optim as optim
import torchvision
ansforms as transforms
import argpar
from resnet import ResNet18
# 定义是否使⽤GPU
device = torch.device("cuda" if torch.cuda.is_available() el "cpu")
# 参数设置,使得我们能够⼿动输⼊命令⾏参数,就是让风格变得和Linux命令⾏差不多
parr = argpar.ArgumentParr(description='PyTorch CIFAR10 Training')
parr.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') #输出结果保存路径
parr.add_argument('--net', default='./model/Resnet18.pth', help="path to net (to continue training)") #恢复训练时的模型路径
args = parr.par_args()
# 超参数设置
EPOCH = 135 #遍历数据集次数
pre_epoch = 0 # 定义已经遍历数据集的次数
BATCH_SIZE = 128 #批处理尺⼨(batch_size)
LR = 0.1 #学习率
# 准备数据集并预处理
transform_train = transforms.Compo([
transform_train = transforms.Compo([
transforms.RandomCrop(32, padding=4), #先四周填充0,在吧图像随机裁剪成32*32
transforms.RandomHorizontalFlip(), #图像⼀半的概率翻转,⼀半的概率不翻转
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归⼀化⽤到的均值和⽅差
])
transform_test = transforms.Compo([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
traint = torchvision.datats.CIFAR10(root='./data', train=True, download=Fal, transform=transform_train) #训练数据集
trainloader = torch.utils.data.DataLoader(traint, batch_size=BATCH_SIZE, shuffle=True, num_wor
kers=2) #⽣成⼀个个batch进⾏批训练,组成batch的时候顺序
testt = torchvision.datats.CIFAR10(root='./data', train=Fal, download=Fal, transform=transform_test)
testloader = torch.utils.data.DataLoader(testt, batch_size=100, shuffle=Fal, num_workers=2)
# Cifar-10的标签
class = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'hor', 'ship', 'truck')
# 模型定义-ResNet
net = ResNet18().to(device)
# 定义损失函数和优化⽅式
criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多⽤于多分类问题
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4) #优化⽅式为mini-batch momentum-SGD,并采⽤L2正则化(权重衰减)
# 训练
if __name__ == "__main__":
best_acc = 85 #2 初始化best test accuracy
print("Start Training, Resnet-18!") # 定义遍历数据集的次数
with open("", "w") as f:
with open("", "w")as f2:
for epoch in range(pre_epoch, EPOCH):
print('\nEpoch: %d' % (epoch + 1))
sum_loss = 0.0
correct = 0.0
total = 0.0
for i, data in enumerate(trainloader, 0):
# 准备数据
length = len(trainloader) #获取训练数据总长度
inputs, labels = data
inputs, labels = (device), (device)
<_grad()
# forward + backward
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 每训练1个batch打印⼀次loss和准确率
sum_loss += loss.item() #损失加和(越来越⼩)
_, predicted = torch.max(outputs.data, 1) #输出这⼀批次128的对应分类
total += labels.size(0)
correct += predicted.eq(labels.data).cpu().sum() #判断这⼀批次的正确个数,并进⾏加和
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total))
f2.write('%03d %05d |Loss: %.03f | Acc: %.3f%% '
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total))
f2.write('\n')
f2.flush()
# 每训练完⼀个epoch测试⼀下准确率
print("Waiting Test!")
print("Waiting Test!")
_grad(): #⾥边的数据不需要计算梯度,不需要进⾏反向传播
correct = 0
total = 0
for data in testloader:
net.eval() #测试模型时使⽤该语句,因为模型已经训练完毕,参数不会再更改,所以直接计算训练时所有batch的均值和⽅差
images, labels = data
images, labels = (device), (device)
outputs = net(images)
# 取得分最⾼的那个类 (outputs.data的索引号)
_, predicted = torch.max(outputs.data, 1) # 取得分最⾼的那个类 (outputs.data的索引号)
total += labels.size(0)
correct += (predicted == labels).sum()
print('测试分类准确率为:%.3f%%' % (100 * correct / total))
acc = 100. * correct / total
# 将每次测试结果实时写⼊⽂件中
print('')
torch.save(net.state_dict(), '%s/net_%03d.pth' % (args.outf, epoch + 1))
f.write("EPOCH=%03d,Accuracy= %.3f%%" % (epoch + 1, acc))
f.write('\n')
f.flush()
# 记录最佳测试分类准确率并写⼊⽂件中
if acc > best_acc:
f3 = open("", "w")
f3.write("EPOCH=%d,best_acc= %.3f%%" % (epoch + 1, acc))
f3.clo()
best_acc = acc
print("Training Finished, TotalEPOCH=%d" % EPOCH)
运⾏结果:
运⾏结果已删除,因为昨天刚知道⾃⼰修改的代码实际上是错误的,所以把⾃⼰修改的代码替换成了未修改的,运⾏结果同学在服务器上跑着⼤约91%。