# ---------- 执行OP ---------- with tf.Session() as sess: # 总是sess.run一个OP,才会产生(计算)结果 res = sess.run(product) print(res) sess.run(init) print(sess.run(state)) for _ in range(5): sess.run(update) print(sess.run(state))
# 定义cost function OP loss = tf.reduce_mean(tf.square(y - prediction)) # 定义优化方法(一次梯度下降迭代)OP train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for _ in range(2000): # 每次优化迭代,改变prediction计算中的Variables,降低loss sess.run(train_step, feed_dict={x:x_data, y:y_data})
# 预测时只需 sess.run(pridiction, feed_dict={x:x_data}
mini batch
1 2 3 4 5 6 7 8 9 10 11 12 13 14
batch_size = 100 n_batch = m // batch_size
#...
with tf.Session as sess: # 迭代2000个epoch for epoch in range(2000): # 每个epoch有n_batch个batch for batch in range(n_batch): # 获取batch_size大小的batch batch_x, batch_y = get_next_batch() sess.run(train_step, feed_dict={x:batch_x, y:batch_y})
with tf.Session() as sess: writer = tf.summary.FileWriter('dir', sess.graph) # for each epoch summary = sess.run(merged) writer.add_summary(summary, epoch)