PyTorch——从零实现图片分类(附完整代码)

更新时间:2023-07-09 12:05:51 阅读: 评论:0

PyTorch——从零实现图⽚分类(附完整代码)
参考链接
1. /Dive-into-DL-PyTorch/#/chapter03_DL-basics/3.4_softmax-regression
2. /Dive-into-DL-PyTorch/#/chapter03_DL-basics/
3.5_fashion-mnist
3. /Dive-into-DL-PyTorch/#/chapter03_DL-basics/3.7_softmax-regression-pytorch 分类问题
和线性回归不同,softmax回归的输出单元从⼀个变成了多个,且引⼊了softmax运算使输出更适合离散值的预测和训练。交叉熵损失函数
softmax运算将输出变成⼀个合法的类别预测分布,可以更⽅便地与离散标签计算误差。实际上,真实标签也可以⽤类别分布表达:对于样本,我们构造向量,使得第(样本类别的离散数值)个元素为1,其余为0.这样我们的训练⽬标就可以设为使预测概率分布尽可能接近真实的标签概率分布。
我们可以像线性回归那样使⽤平⽅损失函数。然⽽,平⽅损失过于严格,想要预测分类结果正确,我们其实并不需要预测
概率完全等于标签概率。
改善上述问题的⼀个⽅法是使⽤更适合衡量两个概率分别差异的测量函数。其中,交叉熵(cross entropy)是⼀个常⽤的衡量⽅法:其中,带下标的是向量中⾮0即1的元素。在上式中,我们知道向量中只有第个元素为1,其余全为0,于是
。也就是说,交叉熵只关⼼对正确类别的预测概率,因为只要其值⾜够⼤,就可以确保分类结果正确。当然,遇到⼀个样本有多个标签时,例如图像⾥含有不⽌⼀个物体时,我们并不能做这⼀步简化。但即便对于这种情况,交叉熵同样只关⼼对图像中出现的物体类别的概率。
假设训练数据集的样本为,交叉熵损失函数定义为
其中,代表模型参数,同样地,如果每个样本只有⼀个标签,那么交叉熵损失可以简写成,从另⼀个⾓度来看,我们知道最⼩化等价于最⼤化,即最⼩化交叉熵函数函数等价于最⼤化训练数据集所有标签类别的联合预测概率。加载数据
使⽤的是“Fashion-MNIST”数据集,运⾏后会⾃动下载。参数transform = transforms.ToTensor()使所
有数据转换为Tensor,如果不进⾏转换则返回的是PIL图⽚。transforms.ToTensor()将尺⼨为 (H x W x C) 且数据位于[0, 255]的PIL图⽚或者数据类型为np.uint8的NumPy数组转换为尺⼨为(C x H x W)且数据类型为torch.float32且位于[0.0, 1.0]的Tensor。
i y ∈i ()R q y i ()i y ^i ()y i ()−y /2∥∥∥y ^i ()i ()∥∥∥2H y ,=(i ()y ^i ())−y log  ∑j =1q j i ()y ^j
i ()y j i ()y i ()y i ()y i ()y y i ()i ()H y ,=(i ()y
^i ())−log  y ^y i ()i ()n l Θ=()H y ,n 1∑i =1n (i ()y
^i ())Θl Θ=
()−log  n 1∑i =1n y ^y i ()i ()l Θ()exp −nl Θ=
(())∏i =1n y
^y i ()i ()
failure
mnist_train = torchvision.datats.FashionMNIST(root='~/Datats/FashionMNIST', train=True, download=True, transform=transforms.ToTensor()) mnist_test = torchvision.datats.FashionMNIST(
root='~/Datats/FashionMNIST', train=Fal, download=True, transform=transforms.ToTensor())
def load_data_fashion_mnist(mnist_train, mnist_test, batch_size):
if sys.platform.startswith('win'):
num_workers =0
el:
num_workers =4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=Fal, num_workers=num_workers)
return train_iter, test_iter
batch_size =256
powermanagementtrain_iter, test_iter = load_data_fashion_mnist(mnist_train, mnist_test, batch_size)
构建模型
简单地使⽤⼀层全连接层作为模型,接上softmax得到输出。
使⽤矩阵⽅式实现
num_inputs =784
num_outputs =10
W = sor(al(0,0.01,(num_inputs, num_outputs)), dtype=torch.float, requires_grad=True)
firefliesb = s(num_outputs, dtype=torch.float, requires_grad=True)
def net(X):
return (X.view((-1, num_inputs)), W)+ b)
使⽤pytorch的全连接层实现
num_inputs =784
num_outputs =10
class LinearNet(nn.Module):
def__init__(lf, num_inputs, num_outputs):
super(LinearNet, lf).__init__()
lf.linear = nn.Linear(num_inputs, num_outputs)
def forward(lf, x):# x shape: (batch, 1, 28, 28)
y = lf.linear(x.view(x.shape[0],-1))
return y
net = LinearNet(num_inputs, num_outputs)
### softmax实现
def softmax(X):
X_exp = X.exp()
partition = X_exp.sum(dim=1, keepdim=True)
return X_exp / partition
定义损失函数
best是什么意思
⾃定义实现交叉熵
def cross_entropy(y_hat, y):
return- torch.log(y_hat.gather(1, y.view(-1,1)))
pytorch提供的交叉熵
这是⼀个包括softmax运算和交叉熵损失计算的函数
loss = nn.CrossEntropyLoss()
定义优化⽅法
⾃定义随机梯度下降
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * ad / batch_size # 注意这⾥更改param时⽤的param.data
pytorch提供的优化器
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
开始训练
num_epochs, lr =5,0.1
def train(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n =0.0,0.0,0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
<_grad()
elif params is not None and params[0].grad is not None:
for param in params:
<_()
l.backward()
if optimizer is None:
sgd(params, lr, batch_size)
el:
optimizer.step()
ttings是什么意思
train_l_sum += l.item()
train_acc_sum +=(y_hat.argmax(dim=1)== y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
%(epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))
完整代码
使⽤autograd实现
import torch
import torchvision
import numpy as np
ansforms as transforms
import sys
from matplotlib import pyplot as plt
mnist_train = torchvision.datats.FashionMNIST(root='~/Datats/FashionMNIST', train=True, dow
ketchupnload=True, transform=transforms.ToTensor()) mnist_test = torchvision.datats.FashionMNIST(root='~/Datats/FashionMNIST', train=Fal, download=True, transform=transforms.ToTensor())
def get_fashion_mnist_labels(labels):
text_labels =['t-shirt','trour','pullover','dress','coat',
text_labels =['t-shirt','trour','pullover','dress','coat',
'sandal','shirt','sneaker','bag','ankle boot']
return[text_labels[int(i)]for i in labels]
def load_data_fashion_mnist(mnist_train, mnist_test, batch_size):
if sys.platform.startswith('win'):
num_workers =0
el:
num_workers =4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=Fal, num_workers=num_workers) return train_iter, test_iter
def show_fashion_mnist(images, labels):
_, figs = plt.subplots(1,len(images), figsize=(12,12))
for f, img, lbl in zip(figs, images, labels):
f.imshow(im
g.view((28,28)).numpy())
f.t_title(lbl)
_xaxis().t_visible(Fal)
_yaxis().t_visible(Fal)
plt.show()
batch_size =256
train_iter, test_iter = load_data_fashion_mnist(mnist_train, mnist_test, batch_size)
num_inputs =784
num_outputs =10
W = sor(al(0,0.01,(num_inputs, num_outputs)), dtype=torch.float, requires_grad=True)
b = s(num_outputs, dtype=torch.float, requires_grad=True)
def softmax(X):
X_exp = X.exp()
partition = X_exp.sum(dim=1, keepdim=True)
return X_exp / partition
def net(X):
return (X.view((-1, num_inputs)), W)+ b)
def cross_entropy(y_hat, y):
return- torch.log(y_hat.gather(1, y.view(-1,1)))arpu
def evaluate_accuracy(data_iter, net):
acc_sum, n =0.0,0
for X, y in data_iter:
acc_sum +=(net(X).argmax(dim=1)== y).float().sum().item()
n += y.shape[0]spooky
return acc_sum / n
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * ad / batch_size # 注意这⾥更改param时⽤的param.data
num_epochs, lr =5,0.1
def train(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n =0.0,0.0,0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
<_grad()
elif params is not None and params[0].grad is not None:
for param in params:
<_()
l.backward()
if optimizer is None:
sgd(params, lr, batch_size)
el:
optimizer.step()
train_l_sum += l.item()
train_acc_sum +=(y_hat.argmax(dim=1)== y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
%(epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))
train(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size,[W, b], lr)
X, y =iter(test_iter).next()
true_labels = get_fashion_mnist_labels(y.numpy())
pred_labels = get_fashion_mnist_labels(net(X).argmax(dim=1).numpy())
titles =[true +'\n'+ pred for true, pred in zip(true_labels, pred_labels)]
show_fashion_mnist(X[0:9], titles[0:9])
简洁实现
import torch
as nn
import init
import torchvision
lost dir是什么意思import numpy as np
ansforms as transforms
import sys
from matplotlib import pyplot as plt
mnist_train = torchvision.datats.FashionMNIST(root='~/Datats/FashionMNIST', train=True, download=True, transform=transforms.ToTensor()) mnist_test = torchvision.datats.FashionMNIST(root='~/Datats/FashionMNIST', train=Fal, download=True, t
ransform=transforms.ToTensor())
def get_fashion_mnist_labels(labels):
text_labels =['t-shirt','trour','pullover','dress','coat',
'sandal','shirt','sneaker','bag','ankle boot']
return[text_labels[int(i)]for i in labels]
def load_data_fashion_mnist(mnist_train, mnist_test, batch_size):
if sys.platform.startswith('win'):
num_workers =0
el:
num_workers =4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=Fal, num_workers=num_workers)
劳动节的名人名言return train_iter, test_iter
def show_fashion_mnist(images, labels):
_, figs = plt.subplots(1,len(images), figsize=(12,12))
for f, img, lbl in zip(figs, images, labels):
f.imshow(im
g.view((28,28)).numpy())
f.t_title(lbl)

本文发布于:2023-07-09 12:05:51,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/90/171946.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:交叉   预测   类别   概率   标签   函数   损失   样本
相关文章
留言与评论(共有 0 条评论)
   
验证码:
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图