Theano Logistic Regression

时间:2024-11-11 11:36:14

原理

逻辑回归的推理过程能够參考这篇文章:http://blog.****.net/zouxy09/article/details/20319673,当中包括了关于逻辑回归的推理,梯度下降以及python源代码,讲的有点多。能够直接看核心部分

对于这篇文章补充一个就是其缺少的正则化内容:

能够查看知乎上的一个回答,算是比較完整

https://www.zhihu.com/question/35508851/answer/63093225

Theano 代码

#!/usr/bin/env python
# -*- encoding:utf-8 -*-
'''
This is done by Vincent.Y
mainly modified from deep learning tutorial
'''
import numpy as np
import theano
import theano.tensor as T
from theano import function
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
class LogisticRegression():
def __init__(self,X,n_in,n_out):
self.W = theano.shared(
value=np.zeros(
(n_in,n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
) self.b=theano.shared(
value=np.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
) self.p_y_given_x=T.nnet.softmax(T.dot(X,self.W)+self.b)
self.y_pred=T.argmax(self.p_y_given_x,axis=1)
self.params=[self.W,self.b]
self.X=X def negative_log_likelihood(self,y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) def errors(self,y):
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y',y.type,'y_pred',self.y_pred.type)
)
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred,y))
else:
return NotImplementedError() def load_data():
#we generate data from sklearn
np.random.seed(0)
X, y = make_moons(800, noise=0.20)
print "xxxxx",X.shape
#return train validate test sets
return [(X[0:600,],y[0:600,]),(X[600:800,],y[600:800,])] def sgd_optimization(learing_rate=0.12,n_epochs=300):
datasets=load_data()
train_set_x,train_set_y=datasets[0]
test_set_x,test_set_y=datasets[1] index=T.lscalar()
x = T.matrix('x')
y = T.lvector('y') classifier=LogisticRegression(X=x,n_in=2,n_out=2) cost=classifier.negative_log_likelihood(y) test_model=function(
inputs=[x,y],
outputs=classifier.errors(y)
) g_W=T.grad(cost=cost,wrt=classifier.W)
g_b=T.grad(cost=cost,wrt=classifier.b) updates=[(classifier.W,classifier.W-learing_rate*g_W),
(classifier.b,classifier.b-learing_rate*g_b)] train_model=function(
inputs=[x,y],
outputs=classifier.errors(y),
updates=updates
) epoch=0
while(epoch<n_epochs):
epoch=epoch+1
avg_cost=train_model(train_set_x,train_set_y)
test_cost=test_model(test_set_x,test_set_y)
print "epoch is %d,train error %f, test error %f"%(epoch,avg_cost,test_cost)
predict_model=function(
inputs=[x],
outputs=classifier.y_pred
)
plot_decision_boundary(lambda x:predict_model(x),train_set_x,train_set_y) def plot_decision_boundary(pred_func,train_set_x,train_set_y):
x_min, x_max = train_set_x[:, 0].min() - .5, train_set_x[:, 0].max() + .5
y_min, y_max = train_set_x[:, 1].min() - .5, train_set_x[:, 1].max() + .5
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(train_set_x[:, 0], train_set_x[:, 1], c=train_set_y, cmap=plt.cm.Spectral)
plt.show()
if __name__=="__main__":
sgd_optimization()

效果

Theano Logistic Regression