tensorflow 实现神经网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import tensorflow as tf
import pandas as pd
import numpy as np
#生产数据集
D=2#X的特征个数
K=300
C=2#输出Y的几类
X=(np.random((K,D))*10)-5
Y_c=[int(x0*x0+x1*x1<9) for (x0,x1) in X]
Y=pd.get_dummies(Y_c).values#one-hot

#构建网络模型
def get_weight(shape,reg):
w=tf.Variable(tf.random_normal(shape),dtype=tf.float32)
#如果进行正则化将损失值加入到loss中
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(reg)(w))
return w

def get_bais(shape):
return tf.Variable(tf.constant(0.0,shape=shape,dtypeype=tf.float32))
#输入输出占位
x=tf.placeholder(dtype=tf.float32,shape=(None,D))
y=tf.placeholder(dtype=float32,shape=(None,C))#输入数据的量不确定,使用None占位
h=100
#构建网络
w1=get_weight([D,h],1e-3)
b1=get_bais([1,h])
#隐藏层使用激活函数relu
y1=tf.nn.relu(tf.matmul(x,w1)+b1)

w2=get_weight([h,C],1e-3)
b2=get_bias([1,C])
y_=tf.matmul(y1,w2)+b2

#参数设置
STEPS=2000
BATCH_SIZE=256
#学习率
learning_rate=1e-0

#数据损失
data_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels = y,logits =y_))#交叉熵
#正则损失
reg_loss=tf.add_n(tf.get_collection("losses"))
total_loss=reg_loss+data_loss

#定义训练方式-梯度下降
train_function=tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

#开始训练
with tf.Session() as sess:
init_op=tf.global_variables_initializervar()
sess.run(init_op)
fpr i in range(STEPS):
start=(i*BATCH_SIZE)%k
end=start+BATCH_SIZE
sess.run(train_function,feed_dict={x:X[start:end],y:Y[start:end]})
if i%100==0:
# # 计算当前数据损失值
loss_v = sess.run(data_loss,feed_dict={x:X,y:Y})
print("this is %dth step,the loss is %f"%(i,loss_v))