基于pytorch使⽤LSTM神经⽹络预测时序模型
import torch
from torch import nn
import numpy as np
import matplotlib as mat
文档翻译mat.u("TkAgg")
import matplotlib.pyplot as plt
import time
from torch.autograd import Variable
import cv2
solo什么意思中文
torch.manual_ed(1) # reproducible
LR = 0.02 # learning rate
class LSNN(nn.Module):
def __init__(lf):
super(LSNN, lf).__init__()
lf.lstm = nn.LSTM(
input_size=1,
cabalhidden_size=32,
num_layers=1,
batch_first=True, # input & output 会是以 batch size 为第⼀维度的特征集 e.g. (batch, time_step, input_size)
robi
)
lf.hidden = (torch.autograd.s(1, 1, 32)),torch.autograd.s(1, 1, 32)))
lf.out = nn.Linear(32, 1)
def forward(lf,x):
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, output_size)
r_out,lf.hidden= lf.lstm(x,lf.hidden) #hidden_state 也要作为 RNN 的⼀个输⼊
lf.hidden=(Variable(lf.hidden[0]),Variable(lf.hidden[1]))#可以把这⼀步去掉,在loss.backward()中加retain_graph=True,主要是Varible有记忆功能,⽽张量没有
outs = [] # 保存所有时间点的预测值
for time_step in range(r_out.size(1)): # 对每⼀个时间点计算 output
outs.append(lf.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1)
'''
def forward(lf, x, h_n,h_c):
r_out, = lf.rnn(x, h_state)
r_out = r_out.view(-1, 32)堪培拉英文
outs = lf.out(r_out)
return outs.view(-1, 10, 1), h_state
'''
出差英文lstmNN = LSNN()
optimizer = torch.optim.Adam(lstmNN.parameters(), lr=LR) # optimize all rnn parameters
loss_func = nn.MSELoss()
# 要使⽤初始 hidden state, 可以设成 None
for step in range(100):
start, end = step * np.pi, (step+1)*np.pi # time steps
# sin 预测 cos
steps = np.linspace(start, end, 10, dtype=np.float32)
x_np = np.sin(steps) # float32 for converting torch FloatTensor
y_np = np.cos(steps)
x = Variable(torch.from_numpy(x_waxis,:,np.newaxis])) # shape (batch, time_step, input_size)
y = Variable(torch.from_numpy(y_waxis,:,np.newaxis]))
y = Variable(torch.from_numpy(y_waxis,:,np.newaxis]))
boldface
print(x)
prediction = lstmNN(x) # rnn 对于每个 step 的 prediction
loss = loss_func(prediction, y) # cross entropy loss
<_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step()
# apply gradients
plt.ion()
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw();
plt.pau(0.05)
网报#plt.ioff()
plt.show()
结果:
蜉蝣的意思>chine famous money