with tf.session() as sess:
init_op = tf.global_variables_initallzer()
sess_run(init_op)
STEPS=3000
for i in range(STEPS):
start=
end=
(train_step, feed_dict:)
eg:
#coding:utf-8
import tensorflow as tf
import numpy as np
BATCH_SIZE = 8 #一次喂入神經網絡多少組數據
seed = 23455 #
#基于seed產生隨機數
rng = np.random.RandomState(seed)
#隨機數返回32行2列的矩陣 表示32組 體積和體重 作為輸入數據集
X = rng.rand(32,2)
#從X這個32行2列的矩陣中 取出一行 判斷如果和小于1 就給Y賦值1 如果和不小于1 則給Y賦值0
#Y作為輸入數據集的標簽(正確答案) 人為給出零件的合格與否 1合格 0不合格
Y = [[int(x0 x1 < 1)] for (x0,x1) in X]
#上面一行代碼類似于下面
#for (x0,x1) in X:
# if x0 x1<1:
# Y=1
# else:
# Y=0
print("X:\n",X)
print("Y:\n",Y)
#1定義神經網絡的輸入、參數、輸出,定義前向傳播過程
x = tf.placeholder(tf.float32, shape=(None, 2)) #輸入有 體積 重量 兩個特征 數據組數不定
y_ = tf.placeholder(tf.float32, shape=(None, 1)) #標準答案 每個標簽一個元素
w1 = tf.Variable(tf.random_normal([2,3], stddev=1, seed=1)) #[2,3]中 2對應x 3表示隱藏層用3個神經元
w2 = tf.Variable(tf.random_normal([3,1], stddev=1, seed=1)) #[3,1]中 1對應y 3表示隱藏層用3個神經元
#前向傳播過程描述
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
#2定義損失函數及反向傳播方法。
loss = tf.reduce_mean(tf.square(y-y_)) #均方誤差
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss) #梯度下降 學習率0.001
#train_step = tf.train.MomentumOptimize(0.001,0.9).minimize(loss) #Momentum優(yōu)化器
#train_step = tf.train.AdamOptimizer(0.001).minimize(loss) #Adam優(yōu)化器
#3生成會話,訓練STEPS輪
with tf.Session() as sess:
init_op = tf.global_variables_initializer() #變量初始化
(init_op)
#輸出目前(未經訓練)的參數取值
print("w1:\n",(w1))
print("w2:\n",(w2))
print("\n")
#訓練模型
STEPS = 3000 #訓練3000輪
for i in range(STEPS):
start = (i*BATCH_SIZE) % 32
end = start BATCH_SIZE
(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 500 == 0:
total_loss = (loss, feed_dict={x: X, y_: Y})
print("After %d training step(s),loss on all data is %g" % (i, total_loss))
#輸出訓練后的參數取值
print("\n")
print("w1:\n",(w1))
print("w2:\n",(w2))