Tensorflow:⼀维数据构造简单CNN # Implementing Different Layers
# ---------------------------------------
#
# We will illustrate how to u different types
# of layers in TensorFlow
#
# The layers of interest are:
# (1) Convolutional Layer卷积层
# (2) Activation Layer激活层
# (3) Max-Pool Layer池化层
# (4) Fully Connected Layer 全连接层
#
# We will generate two different data ts for this
# script, a 1-D data t (row of data) and
树木简笔画# a 2-D data t (similar to picture)
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import os
import random
import numpy as np
import random17岁那年的雨季
from tensorflow.python.framework import ops
<_default_graph()
# ---------------------------------------------------|
# -------------------1D-data-------------------------|
# ---------------------------------------------------|
# Create graph ssion 创建初始图结构
<_default_graph()
ss = tf.Session()
# parameters for the run运⾏参数
data_size = 25
conv_size = 5 # 卷积核宽度⽅向的⼤⼩
maxpool_size = 5 # 池化层核宽度⽅向上的⼤⼩
stride_size = 1 # 卷积核宽度⽅向上的步长
# ensure reproducibility 确保复现性
ed = 13
np.random.ed(ed)
tf.t_random_ed(ed)
# Generate 1D data ⽣成⼀维数据
贵成语
data_1d = al(size=data_size)
# Placeholder
x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])
# --------Convolution--------
def conv_layer_1d(input_1d, my_filter, stride):
# TensorFlow's 'conv2d()' function only works with 4D arrays:
# [batch, height, width, channels], we have 1 batch, and
# width = 1, but height = the length of the input, and 1 channel.
# So next we create the 4D array by inrting dimension 1's.
# 关于数据维度的处理⼗分关键,因为tensorflow中卷积操作只⽀持四维的张量,
# 所以要⼈为的把数据补充为4维数据[1,1,25,1]
input_2d = tf.expand_dims(input_1d, 0)
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Perform convolution with stride = 1, if we wanted to increa the stride,
# to say '2', then strides=[1,1,2,1]
convolution_output = v2d(input_4d, filter=my_filter, strides=[1, 1, stride, 1], padding="VALID")
# Get rid of extra dimensions 去掉多余的层数,只保留数字
羊头conv_output_1d = tf.squeeze(convolution_output)
return (conv_output_1d)
# Create filter for convolution.
my_filter = tf.Variable(tf.random_normal(shape=[1, conv_size, 1, 1]))
# Create convolution layer
my_convolution_output = conv_layer_1d(x_input_1d, my_filter, stride=stride_size)
# --------Activation--------
def activation(input_1d):
return (lu(input_1d))
# Create activation layer
my_activation_output = activation(my_convolution_output)
# --------Max Pool--------
def max_pool(input_1d, width, stride):
# Just like 'conv2d()' above, max_pool() works with 4D arrays.
# [batch_size=1, width=1, height=num_input, channels=1]
# 因为在处理卷积层的结果时,使⽤squeeze函数对结果输出进⾏降维,所以此处要将最⼤池化层的维度提升为4维 input_2d = tf.expand_dims(input_1d, 0)
不破楼兰终不还
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Perform the max pooling with strides = [1,1,1,1]
# If we wanted to increa the stride on our data dimension, say by
# a factor of '2', we put strides = [1, 1, 2, 1]
# We will also need to specify the width of the max-window ('width')
pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1],
strides=[1, 1, stride, 1],
padding='VALID')
# Get rid of extra dimensions
pool_output_1d = tf.squeeze(pool_output)
return (pool_output_1d)
my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, stride=stride_size)
创意手工制作# --------Fully Connected--------
def fully_connected(input_layer, num_outputs):
# First we find the needed shape of the multiplication weight matrix:
# The dimension will be (length of input) by (num_outputs)
weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer), [num_outputs]]))
# squeeze函数⽤于去掉维度为1的维度。保留数据。
# Initialize such weight
# 初始化weight
weight = tf.random_normal(weight_shape, stddev=0.1)
# Initialize the bias
古董收藏家# 初始化bias
bias = tf.random_normal(shape=[num_outputs])
# Make the 1D input array into a 2D array for matrix multiplication
# 将⼀维的数组添加⼀维成为2维数组
input_layer_2d = tf.expand_dims(input_layer, 0)
# Perform the matrix multiplication and add the bias
full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)
# Get rid of extra dimensions
# 去掉多余的维度只保留数据
full_output_1d = tf.squeeze(full_output)
return (full_output_1d)
my_full_output = fully_connected(my_maxpool_output, 5)
# Run graph
pineapple怎么读# Initialize Variables
init = tf.global_variables_initializer()
ss.run(init)
feed_dict = {x_input_1d: data_1d}