tensorflow的基本用法(一)

时间:2023-01-31 11:27:17

一.sessoion
session会话:
指定到某个神经网络

#encoding:utf-8
import tensorflow as tf
import numpy as np
##定义一个向量##
##运算两个矩阵##
martrix1=tf.constant([[3,3]])
martrix2=tf.constant([[2],[2]])
product=tf.matmul(matrix1,matrix2)
##np.dot()相当于##
##seeeion用来运行结果##
sess=tf.Session()
result=sess.run(product)
print(result)
sess.close()

二.variable
使用变量实现一个简单的计数器

#encoding:utf-8
import tensorflow as tf
state=tf.Variable(0,name='counter')
print(state.name)
#tf.constant()#定义一个常量
#state=tf.Variable(0)
one=tf.constant(1)#常数为1
new_value=tf.add(state,one)
update=tf.assign(state,new_value)#new_value赋值给statue
init=tf.initialize_all_variables()#初始化所有的变量
#运算
with tf.Session() as sess:
    sess.run(init)# run and active激活rUn步骤
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))

结果:

counter:0
WARNING:tensorflow:From /home/sulei/tensorflow-workspace/variable.py:10: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2017-06-27 11:23:49.419560: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 11:23:49.419613: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 11:23:49.419631: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
1
2
3

三.Feeds的传入值:
tf.placeholder() 为这些操作创建占位符
外部对神经网络的传值
错现的错误
AttributeError: module ‘tensorflow’ has no attribute ‘mul’
解决的方法:
将mul改成multiply,则可以使用

import tensorflow as tf
input1=tf.placeholder(tf.float32)
input2=tf.placeholder(tf.float32)
ouput=tf.multiply(input1,input2)#multyiply:乘法
with tf.Session() as sess:
    print(sess.run(ouput,feed_dict={input1:[7.],input2:[2.]}))
    #字典的函数
/usr/bin/python3.5 /home/sulei/tensorflow-workspace/feeds.py
2017-06-27 13:43:13.138963: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 13:43:13.139033: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 13:43:13.139056: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
[ 14.]

Process finished with exit code 0

四、激励函数
激励方程:
tensorflow的基本用法(一)
使某一部分的神经元先激励
Activate Function的使用方法:参考链接
五.定义激励函数和添加一个神经层以及建造神经网络

import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定义一个权重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#预测出来的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪点
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#输入层/隐藏层/输出层
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#顺年,练习的步骤
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

结果:

loss的值会越来越小

六.可视化

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定义一个权重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#预测出来的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪点
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#输入层/隐藏层/输出层
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#顺年,练习的步骤
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)

fig=plt.figure()
ax=fig.add_subplot(1,1,1)#连续性
ax.scatter(x_data,y_data)
plt.show()
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
        #prediction_value=sess.run(prediction,feed_dict={xs:x_data})
        #lines=ax.plot(x_data,prediction_value,'r-',lw=5)
        #ax.lines.remove()

tensorflow的基本用法(一)

动态的可视化的模型趋近:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定义一个权重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#预测出来的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪点
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#输入层/隐藏层/输出层
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#顺年,练习的步骤
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)

fig=plt.figure()
ax=fig.add_subplot(1,1,1)#连续性
ax.scatter(x_data,y_data)
plt.ion()#连续显示图形,不暂停
plt.show()
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        #print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
        try:
            ax.lines.remove(lines[0])  # 抹除曲线
        except Exception:
            pass
        prediction_value=sess.run(prediction,feed_dict={xs:x_data})
        lines=ax.plot(x_data,prediction_value,'r-',lw=5)#曲线的形式
        plt.pause(0.1)#暂停0.1s

tensorflow的基本用法(一)
Tensorboard 可视化好帮手

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt #导入可视化模块

def add_layer(inputs, in_size, out_size, activation_function=None):
    #activation_function=None表示没有激活函数 相当于是线性模型
    with tf.name_scope('layer'):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name = 'W')
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')#偏置最好不为零所以加了0.1
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs

# 定义神经网络输入的placeholder
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32,[None,1], name = "x_input")
    #None表示可以输入任意的数据。因为x_data是300x1的矩阵,所以这里为[None,1]
    ys = tf.placeholder(tf.float32,[None,1], name = 'y_input')

#隐藏层layer1 输入节点1,输出节点10
l1 = add_layer(xs, 1, 10, activation_function = tf.nn.relu)
#预测的时候输入时隐藏层的输入l1,输入节点10,输出为y_data 有1个节点
prediction = add_layer(l1, 10, 1, activation_function = None)

#计算损失
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                        reduction_indices = [1]))
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()
#tf.initialize_all_variables()运行的时候会提示你现在的新名称是tf.global_variables_initializer()
sess = tf.Session()
writer = tf.summary.FileWriter("/home/sulei/tensorflow-workspace/logs/",sess.graph)
#writer = tf.summary.FileWriter("/*****/",sess.graph)
# 先加载到一个文件中,然后再加载到浏览器里观看,双引号里标出存放这个文件的路径
# 莫烦视频中是tf.train.SummaryWriter(),我运行报错后发现这个函数名称被改为了tf.summary.FileWriter()
sess.run(init)

之后在events.out.tfevents.1499002324的文件夹之下使用如下的命令

sulei@sulei:~/tensorflow-workspace/logs$ tensorbord --logdir='/home/sulei/tensorflow-workspace/logs/'

展示的结果:

sulei@sulei:~/tensorflow-workspace/logs$ tensorboard --logdir='/home/sulei/tenrflow-workspace/logs/'
Starting TensorBoard 54 at http://sulei:6006
(Press CTRL+C to quit)

打开链接:http://sulei:6006