tensorflow升级到1.0之后,增加了一些高级模块: 如tf.layers, tf.metrics, 和tf.losses,使得代码稍微有些简化。
任务:花卉分类
版本:tensorflow 1.0
花总共有五类,分别放在5个文件夹下。
闲话不多说,直接上代码,希望大家能看懂:)
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
|
# -*- coding: utf-8 -*-
from skimage import io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
path = 'e:/flower/'
#将所有的图片resize成100*100
w = 100
h = 100
c = 3
#读取图片
def read_img(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
for idx,folder in enumerate (cate):
for im in glob.glob(folder + '/*.jpg' ):
print ( 'reading the images:%s' % (im))
img = io.imread(im)
img = transform.resize(img,(w,h))
imgs.append(img)
labels.append(idx)
return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
data,label = read_img(path)
#打乱顺序
num_example = data.shape[ 0 ]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
#将所有数据分为训练集和验证集
ratio = 0.8
s = np. int (num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_val = data[s:]
y_val = label[s:]
#-----------------构建网络----------------------
#占位符
x = tf.placeholder(tf.float32,shape = [ None ,w,h,c],name = 'x' )
y_ = tf.placeholder(tf.int32,shape = [ None ,],name = 'y_' )
#第一个卷积层(100——>50)
conv1 = tf.layers.conv2d(
inputs = x,
filters = 32 ,
kernel_size = [ 5 , 5 ],
padding = "same" ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ))
pool1 = tf.layers.max_pooling2d(inputs = conv1, pool_size = [ 2 , 2 ], strides = 2 )
#第二个卷积层(50->25)
conv2 = tf.layers.conv2d(
inputs = pool1,
filters = 64 ,
kernel_size = [ 5 , 5 ],
padding = "same" ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ))
pool2 = tf.layers.max_pooling2d(inputs = conv2, pool_size = [ 2 , 2 ], strides = 2 )
#第三个卷积层(25->12)
conv3 = tf.layers.conv2d(
inputs = pool2,
filters = 128 ,
kernel_size = [ 3 , 3 ],
padding = "same" ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ))
pool3 = tf.layers.max_pooling2d(inputs = conv3, pool_size = [ 2 , 2 ], strides = 2 )
#第四个卷积层(12->6)
conv4 = tf.layers.conv2d(
inputs = pool3,
filters = 128 ,
kernel_size = [ 3 , 3 ],
padding = "same" ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ))
pool4 = tf.layers.max_pooling2d(inputs = conv4, pool_size = [ 2 , 2 ], strides = 2 )
re1 = tf.reshape(pool4, [ - 1 , 6 * 6 * 128 ])
#全连接层
dense1 = tf.layers.dense(inputs = re1,
units = 1024 ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ),
kernel_regularizer = tf.contrib.layers.l2_regularizer( 0.003 ))
dense2 = tf.layers.dense(inputs = dense1,
units = 512 ,
activation = tf.nn.relu,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ),
kernel_regularizer = tf.contrib.layers.l2_regularizer( 0.003 ))
logits = tf.layers.dense(inputs = dense2,
units = 5 ,
activation = None ,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.01 ),
kernel_regularizer = tf.contrib.layers.l2_regularizer( 0.003 ))
#---------------------------网络结束---------------------------
loss = tf.losses.sparse_softmax_cross_entropy(labels = y_,logits = logits)
train_op = tf.train.AdamOptimizer(learning_rate = 0.001 ).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1 ),tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#定义一个函数,按批次取数据
def minibatches(inputs = None , targets = None , batch_size = None , shuffle = False ):
assert len (inputs) = = len (targets)
if shuffle:
indices = np.arange( len (inputs))
np.random.shuffle(indices)
for start_idx in range ( 0 , len (inputs) - batch_size + 1 , batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else :
excerpt = slice (start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
#训练和测试数据,可将n_epoch设置更大一些
n_epoch = 10
batch_size = 64
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for epoch in range (n_epoch):
start_time = time.time()
#training
train_loss, train_acc, n_batch = 0 , 0 , 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle = True ):
_,err,ac = sess.run([train_op,loss,acc], feed_dict = {x: x_train_a, y_: y_train_a})
train_loss + = err; train_acc + = ac; n_batch + = 1
print ( " train loss: %f" % (train_loss / n_batch))
print ( " train acc: %f" % (train_acc / n_batch))
#validation
val_loss, val_acc, n_batch = 0 , 0 , 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle = False ):
err, ac = sess.run([loss,acc], feed_dict = {x: x_val_a, y_: y_val_a})
val_loss + = err; val_acc + = ac; n_batch + = 1
print ( " validation loss: %f" % (val_loss / n_batch))
print ( " validation acc: %f" % (val_acc / n_batch))
sess.close()
|
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:http://www.cnblogs.com/denny402/p/6931338.html