前言
该篇文章记述从零如何实现CNN,以及LeNet对于之前数据集分类的提升效果。
从零实现卷积核
import torch
def conv2d(X,k):h,w=k.shapeY=torch.zeros((X.shape[0]-h+1,X.shape[1]-w+1))for i in range(Y.shape[0]):for j in range(Y.shape[1]):Y[i,j]=(X[i:i+h,j:j+w]*k).sum()return Y
X=torch.tensor([[0.,1.,2.],[3.,4.,5.],[6.,7.,8.]])
k=torch.tensor([[0.,1.],[2.,3.]])
conv2d(X,k)
卷积层
from torch import nn
class Conv2D(nn.Module):def __init__(self,kernel_size):super.__init__()self.weight=nn.Parameter(torch.rand(kernel_size))self.bias=nn.Parameter(torch.zeros(1))def forward(self,x):return conv2d(x,self.weight)+self.bias
验证卷积层对于图像的检测作用
x=torch.ones((6,8))
x[:,2:6]=0
x
k=torch.tensor([[1.0,-1.0]])
y=conv2d(x,k)
y
很明显这个卷积核提取到了垂直上的特征
conv2d(x.t(),k)
并没有学习到水平特征
学习卷积核
我们可以让卷积核自己学习里面的参数以达到对不同图像提取的作用
conv2d=nn.Conv2d(1,1,kernel_size=(1,2),bias=False)x=x.reshape((1,1,x.shape[0],x.shape[1]))
y=y.reshape((1,1,6,7))
lr=3e-2for i in range(10):y_hat=conv2d(x)l=(y_hat-y)**2conv2d.zero_grad()l.sum().backward()conv2d.weight.data[:]-=lr*conv2d.weight.gradprint(f"第{i}轮,loss为{l.sum()}")
conv2d.weight.data
填充
def comp_conv2d(conv2d,x):#(1,1)添加batch大小和通道数x=x.reshape((1,1)+x.shape)y=conv2d(x)return y.reshape(y.shape[2:])
conv2d=nn.Conv2d(1,1,kernel_size=3,padding=1)
x=torch.rand(size=(8,8))
comp_conv2d(conv2d,x).shape
conv2d=nn.Conv2d(1,1,kernel_size=(5,3),padding=(2,1))
x=torch.rand(size=(8,8))
comp_conv2d(conv2d,x).shape
步幅
conv2d=nn.Conv2d(1,1,kernel_size=(3,3),padding=1,stride=2)
x=torch.rand(size=(8,8))
comp_conv2d(conv2d,x).shape
多通道
from d2l import torch as d2l
def corr2d_multi_in(X,K):return sum(d2l.corr2d(x,k) for x,k in zip(X,K))
x=torch.randn(size=(4,2,3))
k=torch.randn(size=(4,1,3))
corr2d_multi_in(x,k)
多输出通道
def corr2d_multi_in_out(X,K):return torch.stack([corr2d_multi_in(X,k)for k in K],0)
K=torch.stack((k,k+1,k+2),0)
K.shape
corr2d_multi_in_out(x,K)
1x1卷积
def corr2d_multi_in_out_1x1(X,K):c_i,h,w=X.shapec_o=K.shape[0]X=X.reshape((c_i,h*w))K=K.reshape((c_o,c_i))Y=torch.matmul(K,X)return Y.reshape((c_o,h,w))
X=torch.normal(0,1,(3,3,3))
K=torch.normal(0,1,(2,3,1,1))
Y1=corr2d_multi_in_out_1x1(X,K)
Y2=corr2d_multi_in_out(X,K)
Y1==Y2
汇聚层
def pool2d(x,pool_size,mode='max'):p_h,p_w=pool_sizeY=torch.zeros((X.shape[0]-p_h+1,X.shape[1]-p_w+1))for i in range(Y.shape[0]):for j in range(Y.shape[1]):if mode=='max':Y[i,j]=X[i:i+p_h,j:j+p_w].max()elif mode=='avg':Y[i,j]=X[i:i+p_h,j:j+p_w].mean()return Y
X=torch.tensor([[0.0,1.,2.],[3.,4.,5.],[6.,7.,8.]])
pool2d(X,(2,2))
pool2d(X,(2,2),'avg')
LeNet
这是最早的神经网络,根据我的测试,这个模型在我的数据集上的效果比MLP要提高了1%以上,在这段时间里面,我页发现了原有数据集在分类上存在问题,所以重新制作了一份,在这份数据集上,随着我数据量的提升以及模型的修改,准确率达到了99.7%,且无过拟合现象。
原始的LeNet
from torch import nn
net=nn.Sequential(
nn.Conv2d(1,6,kernel_size=5,padding=2),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),
nn.Conv2d(6,16,kernel_size=5),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),
nn.Flatten(),
nn.Linear(16*3*5,120),nn.Sigmoid(),
nn.Linear(120,84),nn.Sigmoid(),
nn.Linear(84,9))
def init_weight(m):if type(m)==nn.Linear or type(m)==nn.Conv2d:nn.init.xavier_uniform_(m.weight)
net.apply(init_weight)
测试结果
我忘记截图了,效果达到了99%以上,同样的数据集在MLP上是98%
改进后的LeNet
我将平均池化层改成了最大池化层
net=nn.Sequential(
nn.Conv2d(1,6,kernel_size=5,padding=2),nn.Sigmoid(),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(6,16,kernel_size=5),nn.Sigmoid(),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Flatten(),
nn.Linear(16*3*5,120),nn.Sigmoid(),
nn.Linear(120,84),nn.Sigmoid(),
nn.Linear(84,9))
def init_weight(m):if type(m)==nn.Linear or type(m)==nn.Conv2d:nn.init.xavier_uniform_(m.weight)
net.apply(init_weight)
训练修改
我在训练过程中添加了记录test的loss最低时,保存pt和onnx,用于后续推理。
epochs_num=100
train_len=len(train_iter.dataset)
all_acc=[]
all_loss=[]
test_all_acc=[]
shape=None
for epoch in range(epochs_num):acc=0loss=0for x,y in train_iter:hat_y=net(x)l=loss_fn(hat_y,y)loss+=loptimer.zero_grad()l.backward()optimer.step()acc+=(hat_y.argmax(1)==y).sum()all_acc.append(acc/train_len)all_loss.append(loss.detach().numpy())test_acc=0test_loss=0test_len=len(test_iter.dataset)with torch.no_grad():for x,y in test_iter:shape=x.shapehat_y=net(x)test_loss+=loss_fn(hat_y,y)test_acc+=(hat_y.argmax(1)==y).sum()test_all_acc.append(test_acc/test_len)print(f'{epoch}的test的acc{test_acc/test_len}')# 保存测试损失最小的模型if test_loss < best_test_loss:best_test_loss = test_losstorch.save(net, best_model_path)dummy_input = torch.randn(shape) torch.onnx.export(net, dummy_input, "./models/LeNet5.onnx", opset_version=11)print(f'Saved better model with Test Loss: {best_test_loss:.4f}')
损失函数可视化
plt.plot(range(1,epochs_num+1),all_loss,'.-',label='train_loss')
plt.text(epochs_num, all_loss[-1], f'{all_loss[-1]:.4f}', fontsize=12, verticalalignment='bottom')
准确率可视化
plt.plot(range(1,epochs_num+1),all_acc,'-',label='train_acc')
plt.text(epochs_num, all_acc[-1], f'{all_acc[-1]:.4f}', fontsize=12, verticalalignment='bottom')
plt.plot(range(1,epochs_num+1),test_all_acc,'-.',label='test_acc')
plt.legend()
预测结果
import numpy as np
with torch.no_grad():all_num=5index=1plt.figure(figsize=(12,5))for i,label in zip(test_data_path,test_labels):if index<=all_num:img=cv2.imread(i)input_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)img=cv2.cvtColor(input_img,cv2.COLOR_BGR2RGB)input_img = np.expand_dims(input_img, axis=2) # 增加通道维度,形状变为 [1, H, W]input_img=transforms.ToTensor()(input_img)input_img = input_img.unsqueeze(0) # 增加批量维度,形状变为 [1, 1, 28, 20]print(input_img.shape)result=net(input_img).argmax(1)plt.subplot(1,all_num,index)plt.imshow(img)plt.title(f'true{label},predict{result.detach().numpy()}')plt.axis("off")index+=1
总结
数据集收集过程中遇到了部分麻烦,数据集还不够完整。