第⼀个卷积层
输⼊的图⽚⼤⼩为:224*224*3,为后续处理⽅便,普遍改为227*227*3
第五层经过padding=1填充后,每组数据都被尺⼨⼤⼩为 3*3*128的卷积核进⾏卷积运算,步长为1,加上ReLU,输出两组13*13*128的像素层
经过3*3池化窗⼝,步长为2,池化后输出两组6*6*256的像素层
第六层⾄第⼋层全连接层
接下来的三层为全连接层,分别为:
6层. 4096 个神经元+ ReLU
7层. 4096个神经元 + ReLU
8层. 1000 个神经元,最后⼀层为softmax为1000类的概率值.
三.AlexNet的代码实现
该代码是⽤搭建好的AlexNet⽹络来实现MNIST⼿写体数字的识别(采⽤keras框架)
from keras.datats import mnist
from matplotlib import pyplot as plt
from keras.utils import np_utils
from keras.layers import Den,Dropout,Flatten,Conv2D,MaxPool2D,BatchNormalization
大学体验英语2dels import Sequential
from matplotlib import pyplot as plt
(X_train,Y_train),(X_test,Y_test)=mnist.load_data()
X_test1=X_test
Y_test1=Y_test
#处理图像特征
X_train=shape(-1,28,28,1).astype("float32")/255.0
X_test=shape(-1,28,28,1).astype("float32")/255.0
#处理标签
Y_train=_categorical(Y_train,10)
Y_test=_categorical(Y_test,10)
#print(X_train)
#print("-----------")
六级听力高频词汇
#print(Y_train)
#搭建AlexNet⽹络模型
#建⽴第⼀层卷积
model=Sequential()
model.add(Conv2D(
filters=96,
kernel_size=(11,11),
同性恋 英文strides=4,
padding="same",
input_shape=(28,28,1),
activation="relu"
))
#搭建BN层
weight是什么意思model.add(BatchNormalization())
#搭建第⼀层重叠最⼤池化层
model.add(MaxPool2D(
pool_size=(3,3),
strides=2,
padding="same"
))
#建⽴第⼆层卷积
model.add(Conv2D(
filters=256,
kernel_size=(5,5),
strides=1,
padding="same",
activation="relu"
activation="relu"
))
#搭建BN层
model.add(BatchNormalization())
#搭建第⼆层池化层
model.add(MaxPool2D(
darlingpool_size=(3,3),
strides=2,
padding="same",
))
#搭建第三层卷积
model.add(Conv2D(
filters=384,
kernel_size=(3,3),
strides=1,
padding="same",回答英语
activation="relu",
))
#搭建第四层卷积
model.add(Conv2D(
filters=384,
kernel_size=(3,3),
strides=1,
padding="same",
activation="relu"
))
#搭建第五卷积层
model.add(Conv2D(
filters=256,
kernel_size=(3,3),
strides=1,现在时
padding='same',
activation="relu"
)
)
model.add(MaxPool2D(
pool_size=(3,3),
strides=2,
padding="same"
))
#搭建第六层:全连接层
#在搭建全连接层之前,必须使⽤Flatten()降维model.add(Flatten())
#全连接层
model.add(Den(4096,activation="relu")) model.add(Dropout(0.5))
#搭建第七层:全连接层
model.add(Den(2048,activation="relu")) model.add(Dropout(0.5))
#搭建第⼋层:全连接层即输出层
model.add(Den(10,activation="softmax")) model.summary()
#编译
loss="categorical_crosntropy",
optimizer="sgd",
metrics=["accuracy"]
)wonderful wonderful
#训练
n_epoch=10
n_batch=128
training = model.fit(
X_train,
Y_train,
epochs=n_epoch,
batch_size=n_batch,
verbo=1,
validation_split=0.20
)
#画出准确率随着epoch的变化图
def show_train(tr,train,validation):
plt.plot(training.history[train],line,color="b")
plt.plot(training.history[validation],line,color="r") plt.title("trianing_history")
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.legend(["train","validation"],loc="lower right")
plt.show()
show_train(training,"accuracy","val_accuracy")
#画出误差随着epoch的变化图
def show_train(tr,train,validation):
plt.plot(training.history[train],line,color="b")
plt.plot(training.history[validation],line,color="r") plt.title("trianing_history")
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.legend(["train","validation"],loc="upper right")
e p
plt.show()
show_train(training,"loss","val_loss")
#评估
test=model.evaluate(X_train,Y_train,verbo=1)
print("误差:",test[0])
print("准确率:",test[1])
#预测月食 英文
def image_show(image): #画图
f()
fig.t_size_inches(2,2)
plt.imshow(image,cmap="binary")
plt.show()
prediction=model.predict_class(X_test)
def pre_result(i):
image_show(X_test1[i])
print("Y-test:",Y_test1[i])
print("预测值:",prediction[i])
pre_result(0)
pre_result(1)