本文主要使用了对数几率回归法与线性判别法(LDA)对数据集(西瓜3.0)进行分类。其中在对数几率回归法中,求解最优权重W时,分别使用梯度下降法,随机梯度下降与牛顿法。
代码如下:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-09 15:03:50
# @Author : whb (whb@bupt.edu.cn)
# @Link : ${link}
# @Version : $Id$ import numpy as p
import matplotlib.pyplot as plt
import pandas as pd
import random
from scipy.linalg import solve,inv def read_data(file_name):
train_data = pd.read_excel(file_name) # 得到一个数据框(每一列代表一个实例)
return[list(train_data.ix[0]), list(train_data.ix[1]), list(train_data.ix[2]), list(train_data.ix[3])]
###算法##
# 1 对数几率回归
def func(x, y, w):
#'x为样本,y为样本类别,w为权重+偏向'
n = len(x[0]) # 训练集个数
m = len(x) # 每个实例的属性个数 m-1
result = 0
for i in xrange(n):
s = 0
for j in xrange(m):
s += x[j][i] * w[j]
result += -y[i] * (s) + p.log(1 + p.exp(s))
return result def p1(x, w):
# 后验概率估计
wx = 0
for i in xrange(len(x)):
wx += w[i] * x[i]
return p.exp(wx) / (1 + p.exp(wx)) def dfunc(x, y, w):
# 一阶导数
df = p.zeros(len(x))
for i in xrange(len(x[0])):
df += x[:, i] * (y[i] - p1(x[:, i], w))
return -df def d2func(x, y, w):
# 二阶导数
n = len(x[0])
d2f = p.zeros((n, n))
for i in xrange(len(x[0])):
d2f[i][i] = (1 - p1(x[:, i], w)) * p1(x[:, i], w)
return p.mat(x) * p.mat(d2f) * p.mat(x.transpose()) # 牛顿法
def newtown(x, y, w, error, n):
i = 1
while i < n:
d1 = dfunc(x, y, w)
if p.dot(d1, d1) < error:
print '牛顿法: 迭代 ' + str(i) + '步:w=', w
return w
break
w = w - solve(d2func(x, y, w), dfunc(x, y, w))
i += 1 # 梯度下降法
def gradienet_down(x, y, w, error, n):
i = 1
h = 0.1
while i < n:
start1 = func(x, y, w)
df = dfunc(x, y, w)
w = w - h * df
start2 = func(x, y, w)
if abs(start1 - start2) < error:
print '梯度下降法:迭代 ' + str(i) + '步:w=', w
return w
break
i += 1 #随机梯度下降算法
def SGD(x, y, w, error, n):
i = 1
h = 0.1
while i < n: start1 = func(x, y, w) x_set=range(17)
random.shuffle(x_set) #随机洗牌
for k in x_set: #只使用一个样本更新权重
df = -x[:, k] * (y[k] - p1(x[:, k], w))
w = w - h * df
start2 = func(x, y, w)
if abs(start1 - start2) < error:
print '随机梯度法: 迭代' + str(i) + '步:w=', w
return w
break
i += 1 #LDA线性判别法
def LDA(x,y):
x=p.mat(x[:2])
u0=p.zeros((2,1))
m0=0
u1=p.zeros((2,1))
m1=0
for j in xrange(len(y)):
if y[j]==1:
u1 += x[:,j]
m1 +=1
else:
u0 += x[:,j]
m0 +=1
u0=u0/m0 #均值
u1=u1/m1
sum_=p.zeros((2,2)) #类内方差矩阵。
for i in xrange(17):
if y[i]==1:
sum_ += (x[:,i]-u1)*(p.mat(x[:,i]-u1).T)
else:
sum_ += (x[:,i]-u0)*(p.mat(x[:,i]-u0).T)
return inv(sum_)*p.mat(u0-u1) #可视化
def result_plot(x,y,w_min):
x1 = p.arange(0, 0.8, 0.01)
y1 = [-(w_min[2] + w_min[0] * x1[k]) / w_min[1] for k in xrange(len(x1))]
color = ['r'] * y.count(1.) + ['b'] * y.count(0.)
plt.scatter(x[0], x[1], c=color)
plt.plot(x1, y1) if __name__ == '__main__':
file_name = 'xigua.xls'
data = read_data(file_name)
x = data[:3] # 各实例的属性值
x = p.array(x)
y = data[-1] # 类别标记
w = [1, 1, 1] # 初始值
error = 0.0001 # 误差
n = 1000 # 迭代步数 w_min=newtown(x,y,w,error,n) w_min1= gradienet_down(x, y, w, error, n) w_min11=SGD(x, y, w, error, n) w_min2=LDA(x, y) w_min2=[w_min2[0,0],w_min2[1,0],0]
# 可视化
plt.figure(1)
plt.subplot(221)
result_plot(x,y,w_min)
plt.title(u'牛顿法')
plt.subplot(222)
result_plot(x,y,w_min1)
plt.title(u'梯度下降法')
plt.subplot(223)
result_plot(x,y,w_min11)
plt.title(u'随机梯度下降法')
plt.subplot(224)
result_plot(x,y,w_min2)
plt.title(u'LDA')
plt.show()
结果:
牛顿法: 迭代 5步:w= [ 3.14453235 12.52792035 -4.42024654]
梯度下降法:迭代 838步:w= [ 2.80637226 11.14036869 -3.95330427]
随机梯度法: 迭代182步:w= [ 1.84669379 6.02658819 -2.31718771]