python深度学习实战_75个有关神经网络建模、强化学习与迁移学习的解决方案

更新时间:2023-05-21 12:50:51 阅读: 评论:0

python深度学习实战_75个有关神经⽹络建模、强化学习与迁移学习的解决⽅案python 深度学习实战_75个有关神经⽹络建模、强化学习与迁移学习的解决⽅案
第⼀章程序
学习python 深度学习实战_75个有关神经⽹络建模、强化学习与迁移学习的解决⽅案中,在这⾥贴出书中的例程
炒牛蛙page 8
// An highlighted block
import tensorflow as tf
import numpy as np
x_input = np.array([[1,2,3,4,5]]) # 提供了⼀个虚拟数据集
y_input = np.array([[10]])
x = tf.placeholder(tf.float32,[None,5])# 创建⼀个占位符
y = tf.placeholder(tf.float32,[None,1])
W= tf.s([5,1]))# 使⽤⼀些变量对占位符进⾏操作
b = tf.s([1]))
y_pred = tf.matmul(x,W)+b
loss = tf.reduce_sum(tf.pow((y - y_pred),2))  # 定义⼀个损失函数
train = tf.train.GradientDescentOptimizer(0.0001).minimize(loss) # 指定优化器和想要最⼩化的变量
init = tf.global_variables_initializer() # 初始化所有变量,创建⼀个名为init的变量
ss = tf.Session()  #创建⼀个回话,病运⾏10个周期训练数据
ss.run(init)
for i in range(10):
feed_dict ={x:x_input, y:y_input}
#ss.run(train, feed_dict = feed_dict)
_, loss_value = ss.run([train,loss], feed_dict = feed_dict)
print(loss_value)
输出结果:
// An highlighted block
100.0
97.77255
95.594696
93.46538
91.38347
89.34794
87.357765
85.41191
83.5094
81.64925
page 11
dels import Sequential
from keras.layers import Den
import numpy as np
x_input = np.array([[1,2,3,4,5]]) # 提供⼀个虚拟数据集
y_input =  np.array([[10]])
model =Sequential() # 使⽤⼀个具有32个神经元的隐形层和⼀个神经元的输出层
model.add(Den(units =32,input_dim = x_input.shape[1]))
model.add(Den(units =1))
model .compile(loss ='m',optimizer ='sgd', #对模型进⾏编译,配置不同的设置,如损失函数、优化器和测度标准              metrics =['accuracy'])
model.summary() #可以轻松输出显⽰的模型摘要
history = model.fit(x_input,y_input,epochs =10,batch_size =32) #直接训练模型,将结果保存
pred = model.predict(x_input,batch_size =128)#预测为函数可以在训练后使⽤
输出结果
// An highlighted block
_________________________________________________________________
Layer(type)                Output Shape              Param #
=================================================================
den_7(Den)(None,32)192
_________________________________________________________________
den_8(Den)(None,1)33
=================================================================
Total params:225
Trainable params:225
Non-trainable params:0
_________________________________________________________________
Epoch 1/10
1/1[==============================]-0s 137ms/step - loss:128.1939- acc:0.0000e+00
Epoch 2/10
1/1[==============================]-0s 2ms/step - loss:1510.0778- acc:0.0000e+00
一面镜子Epoch 3/10
1/1[==============================]-0s 2ms/step - loss:1042477.8750- acc:0.0000e+00
Epoch 4/10
1/1[==============================]-0s 2ms/step - loss:559640412684288.0000- acc:0.0000e+00 Epoch 5/10
1/1[==============================]-0s 2ms/step - loss: inf - acc:0.0000e+00
Epoch 6/10
1/1[==============================]-0s 2ms/step - loss: nan - acc:0.0000e+00
Epoch 7/10
1/1[==============================]-0s 2ms/step - loss: nan - acc:0.0000e+00
Epoch 8/10
1/1[==============================]-0s 1ms/step - loss: nan - acc:0.0000e+00
Epoch 9/10
1/1[==============================]-0s 2ms/step - loss: nan - acc:0.0000e+00
Epoch 10/10
1/1[==============================]-0s 1ms/step - loss: nan - acc:0.0000e+00
page12
import torch
batch_size =32 #设定随机训练数据的⼤⼩
input_shape =5
output_shape =10
torch.t_default_tensor_type('torch.cuda.FloatTensor') #为了启动GPU,讲使⽤如下张量这将保证所有的计算将使⽤附加的GPU from torch.autograd import Variable  # ⽤它来⽣成随机训练数据
x =Variable(torch.randn(batch_size,input_shape))
y =Variable(torch.randn(batch_size,output_shape),requires_grad = Fal)
#使⽤⼀个简单的神经⽹络,其中⼀个有32个神经元的隐藏层和⼀个神经元的输出层  #使⽤.cuda()扩展保证模型在GPU上运⾏model = Linear(input_shape,32),Linear(32,output_shape)).cuda()
loss_function = MSELoss() # 定义MSE损失函数
learning_rate =0.001# 训练模型,迭代10次
for i in range(10):
y_pred =model(x)
loss =loss_function(y_pred,y)
print(loss.item())
# 零梯度
<_grad()
loss.backward()
# 更新权值
for param in model.parameters():
param.data -= learning_rate * ad.data
输出结果:
// An highlighted block
1.1000885963439941
1.0996534824371338
舜怎么读1.0992190837860107
1.0987858772277832
1.0983537435531616
1.097922682762146
生日之歌
1.0974926948547363
1.0970635414123535
1.0966354608535767
1.0962083339691162
page16
三个小矮人import mxnet as mx
import numpy as np #创建⼀些分配给GPU和CPU的简单虚拟数据
x_input = pty((1,5),mx.gpu())
x_input[:]= np.array([[1,2,3,4,5]],np.float32)
y_input = pty((1,5),mx.cpu())
y_input[:]= np.array([[10,15,20,22.5,25]],np.float32)
x_input  # 可以很容易地复制和调整数据
w_input = x_input
z_input = pyto(mx.cpu())
x_input +=1
w_input /=2
诚谢>什么时候去云南旅游最合适
z_input *=2
print(x_input.asnumpy()) #输出显⽰
print(w_input.asnumpy())
print(z_input.asnumpy())
batch_size =1 #创建⼀个迭代器
train_iter = mx.io.NDArrayIter(x_input,y_input,batch_size,
shuffle = True,data_name ='input',label_name ='target')
X= mx.sym.Variable('input') # 为模型创建符号
Y= mx.symbol.Variable('target')
fc1 = mx.sym.FullyConnected(data =X,name ='fc1', num_hidden =5)
lin_reg = mx.sym.LinearRegressionOutput(data = fc1, label =Y,name ="lin_reg")
model = mx.mod.Module(symbol = lin_reg, data_names =['input'],label_names =['target']) # 开始训练之前需要定义模型model.fit(train_iter,optimizer_params={'learning_rate':0.01,'momentum':0.9},num_epoch =100,
batch_end_callback = mx.callback.Speedometer(batch_size,2))
model.predict(train_iter).asnumpy()
输出结果:
想起这件事我就// An highlighted block
[[1.1.52.2.53.]]
[[1.1.52.2.53.]]
[[2.4.6.8.10.]]
array([[10.046575,15.069991,20.093437,22.605223,25.116766]],
dtype=float32)

本文发布于:2023-05-21 12:50:51,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/89/921901.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:模型   训练   学习   创建   数据   函数   优化   定义
相关文章
留言与评论(共有 0 条评论)
   
验证码:
推荐文章
排行榜
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图