史上最详细ConvLstm的pytorch代码解读分析# -*- coding:utf-8 -*-
"""
作者:Refrain_ouc
⽇期:2020.10.29
"""
as nn
import torch
class ConvLSTMCell(nn.Module):
def__init__(lf, input_dim, hidden_dim, kernel_size, bias):
#input_dim是每个num_layer的第⼀个时刻的的输⼊dim,即channel
#hidden_dim是每⼀个num_layer的隐藏层单元,如第⼀层是64,第⼆层是128,第三层是128
#kernel_size是卷积核
super(ConvLSTMCell, lf).__init__()
lf.input_dim = input_dim
lf.hidden_dim = hidden_dim
lf.kernel_size = kernel_size
#padding的⽬的是保持卷积之后⼤⼩不变
lf.padding = kernel_size[0]//2, kernel_size[1]//2
lf.bias = bias
longlongago
out_channels=4* lf.hidden_dim,#因为lstmcell有四个门,隐藏层单元是rnn的四倍
kernel_size=lf.kernel_size,
padding=lf.padding,
bias=lf.bias)
def forward(lf, input_tensor, cur_state):preventative
#input_tensor的尺⼨为(batch_size,channel,weight,width),没有time_step
#cur_state的尺⼨是(batch_size,(hidden_dim)channel,weight,width),是调⽤函数init_hidden返回的细胞状态 h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1)# concatenate along channel axis
#conv层的卷积不需要和linear⼀样,可以是多维的,只要channel数⽬相同即可
combined_conv = lf.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, lf.hidden_dim, dim=1)
#使⽤split函数把输出4*hidden_dim分割成四个门
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g #下⼀个细胞状态
h_next = o * torch.tanh(c_next)#下⼀个hc
倒装句练习
return h_next, c_next
def init_hidden(lf, batch_size, image_size):
height, width = image_size
s(batch_size, lf.hidden_dim, height, width, v.weight.device),
class ConvLSTM(nn.Module):
"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.
Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is Fal).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last statesalteco
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, Fal)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""
def__init__(lf, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=Fal, bias=True, return_all_layers=Fal):
super(ConvLSTM, lf).__init__()
lf._check_kernel_size_consistency(kernel_size)
铜版纸英文#核对尺⼨,⽤的函数是静态⽅法
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
#kernel_size==hidden_dim=num_layer的维度,因为要遍历num_layer次
kernel_size = lf._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = lf._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size)==len(hidden_dim)== num_layers:
rai ValueError('Inconsistent list length.')
lf.input_dim = input_dim
lf.hidden_dim = hidden_dim
lf.kernel_size = kernel_size
lf.num_layers = num_layers
lf.batch_first = batch_first
lf.bias = bias
#如果return_all_layers==true,则返回所有得到h,如果为fal,则返回最后⼀层的最后⼀个h
cell_list =[]
for i in range(0, lf.num_layers):
#判断input_dim是否是第⼀层的第⼀个输⼊,如果是的话则使⽤input_dim,否则取第i层的最后⼀个hidden_dim的channel数作为输⼊ cur_input_dim = lf.input_dim if i ==0el lf.hidden_dim[i -1]
cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=lf.hidden_dim[i],
kernel_size=lf.kernel_size[i],
bias=lf.bias))
#以num_layer为三层为例,则cell_list列表⾥的内容为[convlstmcell0(),convlstmcell1(),convlstmcell2()]
#Module_list把nn.module的⽅法作为列表存放进去,在forward的时候可以调⽤Module_list的东西,cell_list【0】,cell_list【1】,#⼀直到cell_list【num_layer】,
def forward(lf, input_tensor, hidden_state=None):
#第⼀次传⼊hidden_state为none
#input_tensor的size为(batch_size,time_step,channel,height,width)
"""
Parameters
----------
angel dustinput_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
or是什么意思奥运会
研究所英语None. todo implement stateful
Returns
-
------
-------
last_state_list, layer_output
"""
#在forward⾥开始构建模型,⾸先把input_tensor的维度调整,然后初始化隐藏状态
if not lf.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1,0,2,3,4)
b, _, _, h, w = input_tensor.size()
# Implement stateful ConvLSTM
if hidden_state is not None:
rai NotImplementedError()
el:
# Since the init is done in forward. Can nd image size here
#调⽤convlstm的init_hidden⽅法不是lstmcell的⽅法
#返回的hidden_state有num_layer个hc,cc
hidden_state = lf._init_hidden(batch_size=b,
image_size=(h, w))
layer_output_list =[]
last_state_list =[]
q_len = input_tensor.size(1)#取time_step
cur_layer_input = input_tensor
出国留学读研的条件
#初始化h之后开始前向传播
for layer_idx in range(lf.num_layers):
#在已经初始化好了的hidden_state中取出第num_layer个状态给num_layer的h0,c0,其作为第⼀个输⼊
h, c = hidden_state[layer_idx]
output_inner =[]
#开始每⼀层的时间步传播
for t in range(q_len):
#⽤cell_list[i]表⽰第i层的convlstmcell,计算每个time_step的h和c
h, c = lf.cell_list[layer_idx](input_tensor=cur_layer_input[:, t,:,:,:],
cur_state=[h, c])
#将每⼀次的h存放在output_inner⾥
output_inner.append(h)
#layer_output是五维向量,在dim=1的维度堆栈,和input_tensor的维度保持⼀致
cinderella翻译layer_output = torch.stack(output_inner, dim=1)
#吧每⼀层输出肚饿五维向量作为下⼀层的输⼊,因为五维向量的输⼊没有num_layer,所以每⼀层的输⼊都要喂⼊五维向量
cur_layer_input = layer_output
#layer_output_list存放的是第⼀层,第⼆层,第三层的每⼀层的五维向量,这些五维向量作为input_tensor的输⼊
layer_output_list.append(layer_output)
#last_state_list⾥⾯存放的是第⼀层,第⼆层,第三次最后time_step的h和c
last_state_list.append([h, c])
if urn_all_layers:
#如果return_all_layers==fal的话,则返回每⼀层最后的状态,返回最后⼀层的五维向量,返回最后⼀层的h和c
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def_init_hidden(lf, batch_size, image_size):
init_states =[]
for i in range(lf.num_layers):
#cell_list[i]是celllstm的单元,以调⽤⾥⾯的⽅法
init_states.ll_list[i].init_hidden(batch_size, image_size))
#返回的init_states为num_layer个hc=(batch_size,channel(hidden_dim),height,width),cc=(batch_size,channel(hidden_dim),height,width)return init_states
@staticmethod
def_check_kernel_size_consistency(kernel_size):
if not(isinstance(kernel_size,tuple)or
if not(isinstance(kernel_size,tuple)or
(isinstance(kernel_size,list)and all([isinstance(elem,tuple)for elem in kernel_size]))): rai ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def_extend_for_multilayer(param, num_layers):
if not isinstance(param,list):
param =[param]* num_layers
return param