python实现conv(卷积),dept_conv(通道卷积),batchnorm(批量。。。

更新时间:2023-06-06 03:29:17 阅读: 评论:0

python实现conv(卷积),dept_conv(通道卷积),batchnorm(批量。。。
神经⽹络已经在各⾏各业都得到了普遍的应⽤,然⽽在⼯程落地以及⼀些验证过程中有必要⾃⼰实现⼀些常⽤的模块,⽐如卷积,池化,批量归⼀化等常规的神经⽹络模块。尽管现在都是各种调库的天下,⾃⼰在做相关⼯程发现了解和实现这些基本模块还是很有必要的,不仅加深⾃⼰的理解,更加容易⼯程落地。神经⽹络的基础这⾥就不过多介绍,直接上实现代码。
卷积实现:输⼊数据排布[N,chin,dim_inh,dim_inw],权重的排布[chout,chin,kernelh,kernelw],输出数据排布
[N,chout,dim_outh,dim_outw],实现程序也给出了输⼊数据,权重,输出数据都为⼆维数据的情形。x:[N,chin * dim_inh *
dim_inw],w:[chout,chin * kernelh * kernelw],result:[N,chout * dimouth * dimoutw]。实现⽰意图如下:
#四维实现
adorn
class predict_cnn(nn.Module):
def__init__(lf, stride=1, padding=0):
super(predict_cnn, lf).__init__()
lf.padding = padding
lf.stride = stride
def forward(lf, x, w, b):
w = w.detach().numpy()
b = b.detach().numpy()
ch_im_in = w.shape[1]
ch_im_out = w.shape[0]
kernel = w.shape[2]
dim_im_in_y = x.shape[2]
dim_im_in_x = x.shape[3]
dim_im_out_y =int((dim_im_in_y - kernel +2* lf.padding)/ lf.stride)+1
dim_im_out_x =int((dim_im_in_x - kernel +2* lf.padding)/ lf.stride)+1
result = np.zeros((ch_im_out, dim_im_out_y, dim_im_out_x))
勇敢英文
for co in range(ch_im_out):
for oy in range(dim_im_out_y):
for ox in range(dim_im_out_x):
国庆 英语
conv_out = b[co].copy()
for m in range(kernel):
for n in range(kernel):
row = lf.stride * oy + m - lf.padding
col = lf.stride * ox + n - lf.padding
if row <0or row >= dim_im_in_y or col <0or col >= dim_im_in_x:
pass
el:
for ci in range(ch_im_in):
conv_out += x[0, ci, row, col]* w[co, ci, m, n]
result[co][oy][ox]= conv_out
return result
reluctant#⼆维实现
class predict_cnn_dim2(nn.Module):
def__init__(lf, stride=1, padding=0):
super(predict_cnn_dim2, lf).__init__()
lf.padding = padding
lf.stride = stride
def forward(lf, x, w, b):
w = w.detach().numpy()
b = b.detach().numpy()
wherech_im_in = w.shape[1]
ch_im_in = w.shape[1]
ch_im_out = w.shape[0]
kernel = w.shape[2]
dim_im_in_y = x.shape[2]
dim_im_in_x = x.shape[3]
dim_im_out_y =int((dim_im_in_y - kernel +2* lf.padding)/ lf.stride)+1the boys 英文歌词
dim_im_out_x =int((dim_im_in_x - kernel +2* lf.padding)/ lf.stride)+1
result = np.zeros((ch_im_out, dim_im_out_y * dim_im_out_x))
w = w.reshape(w.shape[0],-1)
x = x.squeeze(0)
x = x.reshape(x.shape[0],-1)
for co in range(ch_im_out):
for oy in range(dim_im_out_y):
for ox in range(dim_im_out_x):
conv_out = b[co].copy()
co_idx = oy * dim_im_out_x + ox
for m in range(kernel):
for n in range(kernel):
row = lf.stride * oy + m - lf.padding
col = lf.stride * ox + n - lf.padding
ci_idx = row * dim_im_in_x + col
if row <0or row >= dim_im_in_y or col <0or col >= dim_im_in_x:
pass
el:
for ci in range(ch_im_in):
w_idx = ci * kernel * kernel + m * kernel + n
conv_out += x[ci, ci_idx]* w[co, w_idx]
result[co,co_idx]= conv_out
return result
通道卷积:实现⽰意图如下
可以看到通道卷积要求输⼊数据的通道和输出通道数⼀致,权重维度:[chout,1,kernelh,kernelw],输⼊数据:
[N,chin,dim_inh,dim_inw],输出数据:[N,chout,dim_outh,dim_outw],此时要求chin = chout,且在前向推理⼀般N=1。这⾥代码实现主要是针对前向推理,当N=1时,将输⼊数据的维度0,1转换⼀下,此时的通道卷积就等价于常规卷积,即相当于输⼊数据的通道chin = 1,样本N原来的通道数。具体代码实现如下:
#四维实现
class dpw_cnn(nn.Module):
def__init__(lf, stride=1, padding=0):
super(dpw_cnn, lf).__init__()
lf.padding = padding
lf.stride = stride
def forward(lf, x, w, b):
w = w.detach().numpy()
b = b.detach().numpy()
ch_im_in = w.shape[1]
ch_im_out = w.shape[0]
kernel = w.shape[2]
dim_im_in_y = x.shape[2]
dim_im_in_x = x.shape[3]
dim_im_out_y =int((dim_im_in_y - kernel +2* lf.padding)/ lf.stride)+1
dim_im_out_x =int((dim_im_in_x - kernel +2* lf.padding)/ lf.stride)+1        result = np.zeros((ch_im_out, dim_im_out_y, dim_im_out_x))
for co in range(ch_im_out):
for oy in range(dim_im_out_y):
for ox in range(dim_im_out_x):
conv_out = b[co].copy()
for m in range(kernel):
for n in range(kernel):
row = lf.stride * oy + m - lf.padding
col = lf.stride * ox + n - lf.padding
if row <0or row >= dim_im_in_y or col <0or col >= dim_im_in_x:
pass
el:
for ci in range(ch_im_in):
conv_out += x[0, co, row, col]* w[co, ci, m, n]
result[co][oy][ox]= conv_out
return result
#⼆维实现
orgasmclass dpw_cnn_dim2(nn.Module):
def__init__(lf, stride=1, padding=0):
wagesuper(dpw_cnn_dim2, lf).__init__()
lf.padding = padding
lf.stride = stride
def forward(lf, x, w, b):
w = w.detach().numpy()
b = b.detach().numpy()
ch_im_in = w.shape[1]
ch_im_out = w.shape[0]
kernel = w.shape[2]
dim_im_in_y = x.shape[2]
dim_im_in_x = x.shape[3]
dim_im_out_y =int((dim_im_in_y - kernel +2* lf.padding)/ lf.stride)+1        dim_im_out_x =int((di
m_im_in_x - kernel +2* lf.padding)/ lf.stride)+1        result = np.zeros((ch_im_out, dim_im_out_y * dim_im_out_x))free loop歌词
w = w.reshape(w.shape[0],-1)
x = x.squeeze()
x = x.reshape(x.shape[0],-1)
for co in range(ch_im_out):
for oy in range(dim_im_out_y):
for ox in range(dim_im_out_x):
conv_out = b[co].copy()
co_idx = oy * dim_im_out_x + ox
for m in range(kernel):
for n in range(kernel):
row = lf.stride * oy + m - lf.padding
col = lf.stride * ox + n - lf.padding
ci_idx = row * dim_im_in_x + col
if row <0or row >= dim_im_in_y or col <0or col >= dim_im_in_x:
pass
el:
for ci in range(ch_im_in):
w_idx = ci * kernel * kernel + m * kernel + n
conv_out += x[co, ci_idx]* w[co, w_idx]
result[co,co_idx]= conv_out
return result
全连接层:全连接最为直观,也很容易理解,实质就是两个矩阵相乘。实现过程如下:
class predict_fc(nn.Module):
def__init__(lf):
super(predict_fc, lf).__init__()
def forward(lf, x, w, b):
w = w.detach().numpy()
b = b.detach().numpy()
# x = x.numpy()
x = np.matmul(w, x.T).squeeze()+ b
return x
环球职业教育网
后续会进⼀步的讲解神经⽹络相关基础实现,这⾥只是做个铺垫,bn层在后续会讲解。希望可以帮助到⼤家,不当之处请指教,谢谢!这⾥附上⼀个例⼦:

本文发布于:2023-06-06 03:29:17,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/90/135492.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:实现   卷积   数据   通道   神经
相关文章
留言与评论(共有 0 条评论)
   
验证码:
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图