pytorch-利⽤LSTM做股票预测
1.获取数据
import tushare as ts
长征六号甲
# 获取代号为000300的股票价格
_apis()
df=ts.bar('000001', conn=cons, ast='INDEX', start_date='2018-01-01', end_date='')
2. 对于获取的数据按⽇期进⾏升序排列,因为我们要通过历史的情况预测未来的情况
df=df.sort_index(ascending=True)
print(df.head(5))
3.取开盘价,收盘价,最⾼价,最低价,交易量五个特征,并做标准化
df=df[["open","clo","high","low","vol"]]
df=df.apply(lambda x:(x-min(x))/(max(x)-min(x)))
4.构造X和Y
思路:我们根据前n天的数据,预测当天的收盘价(clo),例如,根据1⽉1⽇,1⽉2⽇,1⽉3⽇的数据(包含5个特征) 预测 1⽉4⽇的收盘价(⼀个值)
⽐如:X=[ ["open1","clo1","high1","low1","vol1"] ,["open2","clo2","high2","low2","vol2"]
["open3","clo3","high3","low3","vol3"] ] Y=[ clo4 ]
这个例⼦中,X对应的quence length为3,input_size=5 (这tm就是nlp中词的embedding的概念)
我这边是设定 quence 长度为5 ,就是根据前5天的数据来预测收盘价
quence=5
X=[]
Y=[]
for i in range(df.shape[0]-quence):
X.append(np.array(df.iloc[i:(i+quence),].values,dtype=np.float32))
Y.append(np.array(df.iloc[(i+quence),1],dtype=np.float32))
print(X[0])
print(Y[0])
5.划分训练集,测试集,构造数据迭代器等常规操作
class Mydatat(Datat):
def __init__(lf,xx,yy,transform=None):
lf.x=xx
lf.y=yy
def __getitem__(lf,index):
x1=lf.x[index]
y1=lf.y[index]
anform !=None:
厨房的英文
anform(x1),y1
角字成语
return x1,y1
def __len__(lf):
return len(lf.x)
# # 构建batch
trainx,trainy=X[:int(0.7*total_len)],Y[:int(0.7*total_len)]
testx,testy=X[int(0.7*total_len):],Y[int(0.7*total_len):]
train_loader=DataLoader(datat=Mydatat(trainx,trainy,transform=transforms.ToTensor()), batch_size=12, shuffle=True)
test_loader=DataLoader(datat=Mydatat(testx,testy), batch_size=12, shuffle=True)
6. 定义LSTM模型
class lstm(nn.Module):
def __init__(lf,input_size=5,hidden_size=32,output_size=1):
super(lstm, lf).__init__()
# lstm的输⼊ #batch,q_len, input_size
lf.hidden_size=hidden_size
lf.input_size=input_size
lf.output_size=output_size
<=nn.LSTM(input_size=lf.input_size,hidden_size=lf.hidden_size,batch_first=True)
lf.linear=nn.Linear(lf.hidden_size,lf.output_size)
def forward(lf,x):
out,(hidden,cell)=lf.rnn(x) # x.shape : batch,q_len,hidden_size , hn.shape and cn.shape : num_layes * direction_numbers,batch,hidden_size a,b,c=hidden.shape
out=lf.shape(a*b,c))
return out
7.开始训练模型
criterion=nn.MSELoss()
optimizer=optim.Adam(model.parameters(),lr=0.001)
preds=[]
labels=[]
for i in range(100):
total_loss=0
for idx,(data,label) in enumerate(train_loader):
data1=data.squeeze(1)
pred=model(Variable(data1))
吃什么对记忆力好label=label.unsqueeze(1)
loss=criterion(pred,label)
<_grad()
loss.backward()
optimizer.step()
total_loss+=loss.item()
8.开始测试,将预测的收盘价与实际的收盘价画个图,红⾊表⽰预测的收盘价,蓝⾊表⽰实际的收盘价
preds=[]
labels=[]
for idx, (x, label) in enumerate(test_loader):
x = x.squeeze(1) # batch_size,q_len,input_size
pred=model(x)
下⾯画图,因为之前做了标准化到0-1区间,所以我图中要将收盘价恢复到原始的情况,全部取太密集,所以我只取了前50个进⾏⽐对import matplotlib.pyplot as plt
plt.plot([ele*(clo_max-clo_min)+clo_min for ele in preds[0:50]],"r",label="pred")
plt.plot([ele*(clo_max-clo_min)+clo_min for ele in labels[0:50]],"b",label="real")
plt.show()
完整代码:
import tushare as ts
import as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader,Datat
import numpy as np
from torch.autograd import Variable
import os
import pandas as pd
from torchvision import transforms
import numpy as np
from torchsummary import summary
# 获取代号为000300的股票价格
# _apis()
# df=ts.bar('000001', conn=cons, ast='INDEX', start_date='2018-01-01', end_date='')
#
# df=df.sort_index(ascending=True)
ad_csv("stock.csv")
df=df.sort_index(ascending=True)
print(df.head(5))
# 提取open,clo,high,low,vol 作为feature,并做标准化
df=df[["open","clo","high","low","vol"]]
clo_min=df['clo'].min()
clo_max=df["clo"].max()
df=df.apply(lambda x:(x-min(x))/(max(x)-min(x)))
# 定义X和Y: 根据前n天的数据,预测当天的收盘价
# 例如根据 1⽉1⽇,1⽉2⽇,1⽉3⽇的价格预测 1⽉4⽇的收盘价
# X=[ ["open1","clo1","high1","low1","vol1"] ,["open2","clo2","high2","low2","vol2"] ,["open3","clo3","high3","low3","vol3"] ] # Y=[ clo4 ]
# 那么X对应的quence=3 , [ input_size=5(5维度) ,这tm就是nlp中每个词的embedding ]
total_len=df.shape[0]
quence=5
X=[]
Y=[]
for i in range(df.shape[0]-quence):
X.append(np.array(df.iloc[i:(i+quence),].values,dtype=np.float32))
Y.append(np.array(df.iloc[(i+quence),1],dtype=np.float32))
print(X[0])
print(Y[0])
#重写Datat
class Mydatat(Datat):
def __init__(lf,xx,yy,transform=None):
lf.x=xx
lf.y=yy
def __getitem__(lf,index):
x1=lf.x[index]
y1=lf.y[index]
anform !=None:
anform(x1),y1
return x1,y1
def __len__(lf):
return len(lf.x)
# # 构建batch
trainx,trainy=X[:int(0.7*total_len)],Y[:int(0.7*total_len)]
testx,testy=X[int(0.7*total_len):],Y[int(0.7*total_len):]
train_loader=DataLoader(datat=Mydatat(trainx,trainy,transform=transforms.ToTensor()), batch_size=12, shuffle=True)
test_loader=DataLoader(datat=Mydatat(testx,testy), batch_size=12, shuffle=True)
class lstm(nn.Module):
def __init__(lf,input_size=5,hidden_size=32,output_size=1):
super(lstm, lf).__init__()
# lstm的输⼊ #batch,q_len, input_size
lf.hidden_size=hidden_size
lf.input_size=input_size
lf.output_size=output_size
<=nn.LSTM(input_size=lf.input_size,hidden_size=lf.hidden_size,batch_first=True)杨红樱作品
lf.linear=nn.Linear(lf.hidden_size,lf.output_size)
def forward(lf,x):
out,(hidden,cell)=lf.rnn(x) # x.shape : batch,q_len,hidden_size , hn.shape and cn.shape : num_layes * direction_numbers,batch,hidden_size a,b,c=hidden.shape
out=lf.shape(a*b,c))
return out
model=lstm()
criterion=nn.MSELoss()
optimizer=optim.Adam(model.parameters(),lr=0.001)
preds=[]
labels=[]
圆规画鸡蛋for i in range(100):
total_loss=0
for idx,(data,label) in enumerate(train_loader):
data1=data.squeeze(1)
pred=model(Variable(data1))
label=label.unsqueeze(1)
loss=criterion(pred,label)
福建的大学<_grad()
loss.backward()
optimizer.step()
total_loss+=loss.item()
# 开始测试
preds=[]
labels=[]
for idx, (x, label) in enumerate(test_loader):
x = x.squeeze(1) # batch_size,q_len,input_size
pred=model(x)
# print(len(preds[0:50]))
# print(len(labels[0:50]))
import matplotlib.pyplot as plt
plt.plot([ele*(clo_max-clo_min)+clo_min for ele in preds[0:50]],"r",label="pred")
plt.plot([ele*(clo_max-clo_min)+clo_min for ele in labels[0:50]],"b",label="real")
plt.show()