写了个多层感知器,用bp梯度下降更新,拟合正弦曲线,效果凑合。
python" id="highlighter_728048">
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def sigmod(z):
return 1.0 / ( 1.0 + np.exp( - z))
class mlp( object ):
def __init__( self , lr = 0.1 , lda = 0.0 , te = 1e - 5 , epoch = 100 , size = none):
self .learningrate = lr
self .lambda_ = lda
self .thresholderror = te
self .maxepoch = epoch
self .size = size
self .w = []
self .b = []
self .init()
def init( self ):
for i in xrange ( len ( self .size) - 1 ):
self .w.append(np.mat(np.random.uniform( - 0.5 , 0.5 , size = ( self .size[i + 1 ], self .size[i]))))
self .b.append(np.mat(np.random.uniform( - 0.5 , 0.5 , size = ( self .size[i + 1 ], 1 ))))
def forwardpropagation( self , item = none):
a = [item]
for windex in xrange ( len ( self .w)):
a.append(sigmod( self .w[windex] * a[ - 1 ] + self .b[windex]))
"""
print "-----------------------------------------"
for i in a:
print i.shape,
print
for i in self.w:
print i.shape,
print
for i in self.b:
print i.shape,
print
print "-----------------------------------------"
"""
return a
def backpropagation( self , label = none, a = none):
# print "backpropagation--------------------begin"
delta = [(a[ - 1 ] - label) * a[ - 1 ] * ( 1.0 - a[ - 1 ])]
for i in xrange ( len ( self .w) - 1 ):
abc = np.multiply(a[ - 2 - i], 1 - a[ - 2 - i])
cba = np.multiply( self .w[ - 1 - i].t * delta[ - 1 ], abc)
delta.append(cba)
"""
print "++++++++++++++delta++++++++++++++++++++"
print "len(delta):", len(delta)
for ii in delta:
print ii.shape,
print "\n======================================="
"""
for j in xrange ( len (delta)):
ads = delta[j] * a[ - 2 - j].t
# print self.w[-1-j].shape, ads.shape, self.b[-1-j].shape, delta[j].shape
self .w[ - 1 - j] = self .w[ - 1 - j] - self .learningrate * (ads + self .lambda_ * self .w[ - 1 - j])
self .b[ - 1 - j] = self .b[ - 1 - j] - self .learningrate * delta[j]
"""print "=======================================1234"
for ij in self.b:
print ij.shape,
print
"""
# print "backpropagation--------------------finish"
error = 0.5 * (a[ - 1 ] - label) * * 2
return error
def train( self , input_ = none, target = none, show = 10 ):
for ep in xrange ( self .maxepoch):
error = []
for itemindex in xrange (input_.shape[ 1 ]):
a = self .forwardpropagation(input_[:, itemindex])
e = self .backpropagation(target[:, itemindex], a)
error.append(e[ 0 , 0 ])
tt = sum (error) / len (error)
if tt < self .thresholderror:
print "finish {0}: " . format (ep), tt
return
elif ep % show = = 0 :
print "epoch {0}: " . format (ep), tt
def sim( self , inp = none):
return self .forwardpropagation(item = inp)[ - 1 ]
if __name__ = = "__main__" :
tt = np.arange( 0 , 6.28 , 0.01 )
labels = np.zeros_like(tt)
print tt.shape
"""
for po in xrange(tt.shape[0]):
if tt[po] < 4:
labels[po] = 0.0
elif 8 > tt[po] >= 4:
labels[po] = 0.25
elif 12 > tt[po] >= 8:
labels[po] = 0.5
elif 16 > tt[po] >= 12:
labels[po] = 0.75
else:
labels[po] = 1.0
"""
tt = np.mat(tt)
labels = np.sin(tt) * 0.5 + 0.5
labels = np.mat(labels)
model = mlp(lr = 0.2 , lda = 0.0 , te = 1e - 5 , epoch = 500 , size = [ 1 , 6 , 6 , 6 , 1 ])
print tt.shape, labels.shape
print len (model.w), len (model.b)
print
model.train(input_ = tt, target = labels, show = 10 )
sims = [model.sim(tt[:, idx])[ 0 , 0 ] for idx in xrange (tt.shape[ 1 ])]
xx = tt.tolist()[ 0 ]
plt.figure()
plt.plot(xx, labels.tolist()[ 0 ], xx, sims, 'r' )
plt.show()
|
效果图:
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/u013781175/article/details/48313903