本文实例为大家分享了Tensorflow实现AlexNet卷积神经网络的具体实现代码,供大家参考,具体内容如下
之前已经介绍过了AlexNet的网络构建了,这次主要不是为了训练数据,而是为了对每个batch的前馈(Forward)和反馈(backward)的平均耗时进行计算。在设计网络的过程中,分类的结果很重要,但是运算速率也相当重要。尤其是在跟踪(Tracking)的任务中,如果使用的网络太深,那么也会导致实时性不好。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
from datetime import datetime
import math
import time
import tensorflow as tf
batch_size = 32
num_batches = 100
def print_activations(t):
print (t.op.name, '', t.get_shape().as_list())
def inference(images):
parameters = []
with tf.name_scope( 'conv1' ) as scope:
kernel = tf.Variable(tf.truncated_normal([ 11 , 11 , 3 , 64 ], dtype = tf.float32, stddev = 1e - 1 ), name = 'weights' )
conv = tf.nn.conv2d(images, kernel, [ 1 , 4 , 4 , 1 ], padding = 'SAME' )
biases = tf.Variable(tf.constant( 0.0 , shape = [ 64 ], dtype = tf.float32), trainable = True , name = 'biases' )
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name = scope)
print_activations(conv1)
parameters + = [kernel, biases]
lrn1 = tf.nn.lrn(conv1, 4 , bias = 1.0 , alpha = 0.001 / 9 , beta = 0.75 , name = 'lrn1' )
pool1 = tf.nn.max_pool(lrn1, ksize = [ 1 , 3 , 3 , 1 ], strides = [ 1 , 2 , 2 , 1 ], padding = 'VALID' , name = 'pool1' )
print_activations(pool1)
with tf.name_scope( 'conv2' ) as scope:
kernel = tf.Variable(tf.truncated_normal([ 5 , 5 , 64 , 192 ], dtype = tf.float32, stddev = 1e - 1 ), name = 'weights' )
conv = tf.nn.conv2d(pool1, kernel, [ 1 , 1 , 1 , 1 ], padding = 'SAME' )
biases = tf.Variable(tf.constant( 0.0 , shape = [ 192 ], dtype = tf.float32), trainable = True , name = 'biases' )
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name = scope)
parameters + = [kernel, biases]
print_activations(conv2)
lrn2 = tf.nn.lrn(conv2, 4 , bias = 1.0 , alpha = 0.001 / 9 , beta = 0.75 , name = 'lrn2' )
pool2 = tf.nn.max_pool(lrn2, ksize = [ 1 , 3 , 3 , 1 ], strides = [ 1 , 2 , 2 , 1 ], padding = 'VALID' , name = 'pool2' )
print_activations(pool2)
with tf.name_scope( 'conv3' ) as scope:
kernel = tf.Variable(tf.truncated_normal([ 3 , 3 , 192 , 384 ], dtype = tf.float32, stddev = 1e - 1 ), name = 'weights' )
conv = tf.nn.conv2d(pool2, kernel, [ 1 , 1 , 1 , 1 ], padding = 'SAME' )
biases = tf.Variable(tf.constant( 0.0 , shape = [ 384 ], dtype = tf.float32), trainable = True , name = 'biases' )
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name = scope)
parameters + = [kernel, biases]
print_activations(conv3)
with tf.name_scope( 'conv4' ) as scope:
kernel = tf.Variable(tf.truncated_normal([ 3 , 3 , 384 , 256 ], dtype = tf.float32, stddev = 1e - 1 ), name = 'weights' )
conv = tf.nn.conv2d(conv3, kernel, [ 1 , 1 , 1 , 1 ], padding = 'SAME' )
biases = tf.Variable(tf.constant( 0.0 , shape = [ 256 ], dtype = tf.float32), trainable = True , name = 'biases' )
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name = scope)
parameters + = [kernel, biases]
print_activations(conv4)
with tf.name_scope( 'conv5' ) as scope:
kernel = tf.Variable(tf.truncated_normal([ 3 , 3 , 256 , 256 ], dtype = tf.float32, stddev = 1e - 1 ), name = 'weights' )
conv = tf.nn.conv2d(conv4, kernel, [ 1 , 1 , 1 , 1 ], padding = 'SAME' )
biases = tf.Variable(tf.constant( 0.0 , shape = [ 256 ], dtype = tf.float32), trainable = True , name = 'biases' )
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name = scope)
parameters + = [kernel, biases]
print_activations(conv5)
pool5 = tf.nn.max_pool(conv5, ksize = [ 1 , 3 , 3 , 1 ], strides = [ 1 , 2 , 2 , 1 ], padding = 'VALID' , name = 'pool5' )
print_activations(pool5)
return pool5, parameters
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range (num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i > = num_steps_burn_in:
if not i % 10 :
print ( '%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration + = duration
total_duration_squared + = duration * duration
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print ( '%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, num_batches, mn, sd))
def run_benchmark():
with tf.Graph().as_default():
image_size = 224
images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3 ], dtype = tf.float32, stddev = 1e - 1 ))
pool5, parameters = inference(images)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
time_tensorflow_run(sess, pool5, "Forward" )
objective = tf.nn.l2_loss(pool5)
grad = tf.gradients(objective, parameters)
time_tensorflow_run(sess, grad, "Forward-backward" )
run_benchmark()
|
这里的代码都是之前讲过的,只是加了一个计算时间和现实网络的卷积核的函数,应该很容易就看懂了,就不多赘述了。我在GTX TITAN X上前馈大概需要0.024s, 反馈大概需要0.079s。哈哈,自己动手试一试哦。
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/Felaim/article/details/68923725