关于tensorflow的基本用法,教程详见:
TensorFlow官方文档中文版——基本使用,
和 10 minutes Practical TensorFlow Tutorial for quick learners。
(第二个教程相对更好一点)
由于版本更新,原教程的一些代码已不能使用,下面是修改之后的tensorflow基本用法示例。
一.TensorFlow官方文档中文版——基本使用
1. add_operation
import tensorflow as tf
hello=tf.constant('hello, TensorFlow!')
sess=tf.Session()
print(sess.run(hello))
b'hello, TensorFlow!'
a=tf.constant(10)
b=tf.constant(32)
print(sess.run(a+b))
42
2. counter
import tensorflow as tf
#创建一个变量,初始化为标量0
state=tf.Variable(0, name='counter')
#创建一个op,其作用是使state增加1
one=tf.constant(1)
new_value=tf.add(state, one)
update=tf.assign(state, new_value)
#启动图后,变量必须先经过‘初始化’(init)op初始化。
#首先必须增加一个‘初始化’op到图中
init_op=tf.global_variables_initializer()
#启动图,运行op
with tf.Session() as sess:
#运行‘init’op
sess.run(init_op)
#打印‘state’的初始值
print(sess.run(state))
#运行op,更新‘state’,并打印‘state’
for _ in range(3):
sess.run(update)
print(sess.run(state))
0
1
2
3
3. matrix_multi
import tensorflow as tf
创建一个常量op,产生一个1*2矩阵,这个op被作为一个节点加到默认图中。构造器的返回值代表该常量op的返回值
matrix1=tf.constant([[3, 3]])
创建另外一个常量op,产生一个2*1矩阵
matrix2=tf.constant([[2], [2]])
创建一个矩阵乘法matmul op,把’matrix1’和’matrix2’作为输入。返回值’product’代表矩阵乘法的结果。
product = tf.matmul(matrix1, matrix2)
构造阶段完成后,开始启动图,启动图的第一步是创建一个Session对象。Session对象在使用完后需要关闭以释放资源,除了显式调用close外,也可以使用’with’代码自动完成关闭动作.
with tf.Session() as sess:
result=sess.run([product])
print(result)
[array([[12]], dtype=int32)]
4. fetch_feed
import tensorflow as tf
input1=tf.constant(3.0)
input2=tf.constant(2.0)
input3=tf.constant(5.0)
intermed=tf.add(input2, input3)
mul=tf.multiply(input1, intermed)
with tf.Session() as sess:
result=sess.run([mul, intermed])
print(result)
[21.0, 7.0]
input1=tf.placeholder(tf.float32)
input2=tf.placeholder(tf.float32)
output=tf.multiply(input1, input2)
with tf.Session() as sess:
print(sess.run([output], feed_dict={input1:[7.], input2:[2.]}))
[array([ 14.], dtype=float32)]
二. 10 minutes Practical TensorFlow Tutorial for quick learners
Basics of TensorFlow
import tensorflow as tf
#you can access this graph by:
graph=tf.get_default_graph()
#you can get the list of all operations by typing this:
#graph.get_operations()
for op in graph.get_operations():
print(op.name)
Constants
a=tf.constant(1.0)
a
<tf.Tensor 'Const:0' shape=() dtype=float32>
print(a)
Tensor("Const:0", shape=(), dtype=float32)
with tf.Session() as sess:
print(sess.run(a))
1.0
Variables
b=tf.Variable(2.0, name="test_var")
b
<tensorflow.python.ops.variables.Variable at 0x7fb8b90ce4e0>
init_op=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(b))
2.0
graph=tf.get_default_graph()
for op in graph.get_operations():
print(op.name)
Const
test_var/initial_value
test_var
test_var/Assign
test_var/read
init
Placeholders
a=tf.placeholder("float")
b=tf.placeholder("float")
y=tf.multiply(a, b)
feed_dict={a:2, b:3}
with tf.Session() as sess:
print(sess.run(y, feed_dict))
6.0
Tensorflow tutorial with linear regression
#create a random normal distribution:
w=tf.Variable(tf.random_normal([784, 10], stddev=0.01))
#calculates the mean of an array:
b=tf.Variable([10, 20, 30, 40, 50, 60], name='t')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf.reduce_mean(b)))
35
#ArgMax:very seimilar to python argmax(numpy.argmax)
a=[[0.1, 0.2, 0.3],
[20, 2, 3]]
b=tf.Variable(a, name='b')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf.argmax(b, 1)))
[2 0]
#(a)Creating training data
import tensorflow as tf
import numpy as np
trainX=np.linspace(-1, 1, 101)
trainY=3*trainX+np.random.randn(*trainX.shape)*0.33
#(b)Placeholders
X=tf.placeholder("float")
Y=tf.placeholder("float")
#(c)Modeling
w=tf.Variable(0.0, name="weights")
y_model = tf.multiply(X, w)
cost=tf.pow(Y-y_model, 2)
train_op=tf.train.GradientDescentOptimizer(0.01).minimize(cost)
#(d)Training
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(100):
for (x, y) in zip(trainX, trainY):
sess.run(train_op, feed_dict={X:x, Y:y})
print(sess.run(w))
2.96799
#(e)Exercise
with tf.Session() as sess:
sess.run(init)
print(sess.run(w))
0.0