LSTM预测股票数据--模型和参数
class NetConfig():
def__init__(lf):
<_unit =10
lf.input_size =7
lf.output_size =1
lf.lr =0.0006
lf.time_step =20
lf.batch_size =80
lf.weights={
'in':tf.Variable(tf.random_normal([lf.input__unit])),
'out':tf.Variable(tf.random_normal([lf.rnn_unit,lf.output_size]))
}
lf.bias={
'in':tf.stant(0.1,shape=[lf.rnn_unit,])),
'out':tf.stant(0.1,shape=[lf.output_size,]))
}
class MyModel():
def__init__(lf):
lf.ss = tf.Session()
< = NetConfig()
def lstm(lf, X):
weights = lf.nc.weights
bias = lf.nc.bias
input_size = lf.nc.input_size
rnn_unit = _unit
output_size = lf.nc.output_size
time_step = lf.nc.time_step
batch_size = lf.nc.batch_size
lr = lf.nc.lr
ss = lf.ss
batch_size=tf.shape(X)[0]
time_step=tf.shape(X)[1]
w_in=weights['in']
b_in=bias['in']
shape(X,[-1,input_size])#需要将tensor转成2维进⾏计算,计算后的结果作为隐藏层的输⼊
input_rnn=tf.matmul(input,w_in)+b_in
input_shape(input_rnn,[-1,time_step,rnn_unit])#将tensor转成3维,作为lstm cell的输⼊
_cell.BasicLSTMCell(rnn_unit)
init__state(batch_size,dtype=tf.float32)
output_rnn,final_dynamic_rnn(cell, input_rnn,initial_state=init_state, dtype=tf.float32)#output_rnn是记录lstm每个输出节点的结果,final_s tates是最后⼀个cell的结果
shape(output_rnn,[-1,rnn_unit])#作为输出层的输⼊
w_out=weights['out']
b_out=bias['out']
宝宝鼻炎症状
pred=tf.matmul(output,w_out)+b_out
航展直播return pred,final_states
支付工资会计分录def train_lstm(lf, data, save_path, iter_num):
weights = lf.nc.weights
bias = lf.nc.bias
input_size = lf.nc.input_size
rnn_unit = _unit
output_size = lf.nc.output_size
time_step = lf.nc.time_step
batch_size = lf.nc.batch_size
lr = lf.nc.lr
ss = lf.ss
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
batch_index,train_x,train_y=get_train_data(data)
batch_index,train_x,train_y=get_train_data(data)
pred,_ = lf.lstm(X)
#损失函数
duce_mean(tf.shape(pred,[-1])-tf.reshape(Y,[-1])))
train_ain.AdamOptimizer(lr).minimize(loss)
ain.Saver(tf.global_variables(),max_to_keep=15)
#module_file = tf.train.latest_checkpoint("check")
ss.run(tf.global_variables_initializer())
#重复训练10000次
total_loss =[]
for i in range(iter_num):
for step in range(len(batch_index)-1):
_,loss_=ss.run([train_op,loss],feed_dict={X:train_x[batch_index[step]:batch_index[step+1]],Y:train_y[batch_index[step]:batch_index[step+1]]}) total_loss.append(loss_)
if i %10==0:
print("迭代第:"+str(i)+"次, Loss为:"+str(loss_))
if i %20==0:
print("保存模型:",saver.save(ss,save_path,global_step=i))
return total_loss
def predict_lstm(lf, data, model_path):
weights = lf.nc.weights
bias = lf.nc.bias
input_size = lf.nc.input_size
ps保存不了
rnn_unit = _unit
output_size = lf.nc.output_size
time_step = lf.nc.time_step
batch_size = lf.nc.batch_size熊猫英文怎么读
lr = lf.nc.lr
ss = lf.ss
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
自由人联合体#Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
mean,std,test_x,test_y = get_test_data(data)
我要表白网pred,_ = lf.lstm(X)
saver = tf.train.Saver(tf.global_variables())
#参数恢复
module_file = tf.train.latest_checkpoint(model_path)
test_predict=[]
for step in range(len(test_x)-1):
prob=ss.run(pred,feed_dict={X:[test_x[step]]})
shape((-1))
d(predict)
test_y=np.array(test_y)*std[7]+mean[7]
test_predict=np.array(test_predict)*std[7]+mean[7]
acc=np.average(np.abs(test_predict-test_y[:len(test_predict)])/test_y[:len(test_predict)])#偏差
#以折线图表⽰结果
return test_y,test_predict,acc
def plot_train_loss(total_loss):
plt.figure()
plt.plot(list(range(len(total_loss))), total_loss, color='b')
plt.show()
def plot_predict(test_y, test_predict):
plt.figure()
plt.plot(list(range(len(test_predict))), test_predict, color='b')
plt.plot(list(range(len(test_y))), test_y, color='r')
plt.show()
my_model = MyModel()
f=open('data/datat.csv')
ad_csv(f)
data=df.iloc[:,2:10].values
total_loss_lstm = ain_lstm(data,save_path ="./stock_model/model", iter_num =100)
<_default_graph()
<_default_graph()
学生个人档案my_model = MyModel()
test_y,test_predict = my_model.predict_lstm(data,"./stock_model/") plot_predict(test_y, test_predict)
error_lstm = np.sum((test_y[1:300]- test_predict[1:300])**2)
print(error_lstm)