理工学---算法模型---传统机器学习---树相关---随机森林原理与算法实现
# -*- coding: utf-8 -*-
# Random Forest Algorithm on Sonar Dataset
from random import seed
from random import randrange
from csv import reader
from math import sqrt
import numpy as np
#随机森林算法部分
#---------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
#随机森林预测
#---------------------------------------------------------------------------------------------------
#得到单棵树的预测
# Make a prediction with a decision tree
def predict(node, row):
if row[node['index']] < node['value']:#属于左子集一类
if isinstance(node['left'], dict):
return predict(node['left'], row)
else:
return node['left']
else:#属于右子集一类
if isinstance(node['right'], dict):#需要顺着子集的路径继续递归寻找类别
return predict(node['right'], row)
else:#已经找到类别
return node['right']
#利用随机森林预测类别
# Make a prediction with a list of bagged trees
def bagging_predict(trees, row):
predictions = [predict(tree, row) for tree in trees]#利用多棵决策树预测样本类别
return max(set(predictions), key=predictions.count)#少数服从多数确定类别
#随机森林产生过程
#---------------------------------------------------------------------------------------------------
#强制产生叶节点,并标记类别
# Create a terminal node value
def to_terminal(group):#强制产生叶节点,并标记节点为类别样本数目最多的类别
outcomes = [row[-1] for row in group]
return max(set(outcomes), key=outcomes.count)
#节点分支主体
# Create child splits for a node or make terminal
def split(node, max_depth, min_size, n_features, depth):
left, right = node['groups']#取出左右子集
del(node['groups'])
# check for a no split
if not left or not right:#左右子集为空,强制产生叶节点,并标记节点为类别样本数目最多的类别
node['left'] = node['right'] = to_terminal(left + right)
return
# check for max depth
if depth >= max_depth:#决策树层数过多,强制产生叶节点,并标记节点为类别样本数目最多的类别;depth一般默认设置为1表示当前在根节点分支
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# process left child
if len(left) <= min_size:#左子集中样本数过少,强制产生叶节点,并标记节点为类别样本数目最多的类别
node['left'] = to_terminal(left)
else:
node['left'] = get_split(left, n_features)#对左子集进行分支,即决策树的递归操作
split(node['left'], max_depth, min_size, n_features, depth+1)
# process right child
if len(right) <= min_size:#右子集中样本数过少,强制产生叶节点,并标记节点为类别样本数目最多的类别
node['right'] = to_terminal(right)
else:
node['right'] = get_split(right, n_features)#对右子集进行分支,决策树递归
split(node['right'], max_depth, min_size, n_features, depth+1)
#计算节点Gini索引
# Calculate the Gini index for a split dataset
def gini_index(groups, class_values):#计算样本集合的gini索引值
gini = 0.0
for class_value in class_values:
for group in groups:
size = len(group)
if size == 0:
continue
proportion = [row[-1] for row in group].count(class_value) / float(size)
gini += (proportion * (1.0 - proportion)) #这里采用左右子集的Gini系数之和来表示
return gini
#简单地利用属性值对节点划分子集
# Split a dataset based on an attribute and an attribute value
def test_split(index, value, dataset):#以某一样本的某一特征为参考,当其他样本的该特征值小于参考特征值的时候归为一类,大于等于参考特征值的时候归为另一类
left, right = list(), list()
for row in dataset:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
#产生测试属性和左右子集和划分左右子集的标准
# Select the best split point for a dataset
def get_split(dataset, n_features):
class_values = list(set(row[-1] for row in dataset))#取出样本类别标签
b_index, b_value, b_score, b_groups = 999, 999, 999, None
features = list()
while len(features) < n_features:#随机获取n_features个特征用于创建决策树
index = randrange(len(dataset[0])-1)
if index not in features:
features.append(index)
for index in features:#对某一个特征进行操作
for row in dataset:#尝试逐个样本的值作为划分左右子集的标准,感觉这种方式很低效
groups = test_split(index, row[index], dataset)#按照样本row的index特征值将样本集合dataset分为两个子集
gini = gini_index(groups, class_values)#左右子集的gini之和,取最小值来选取测试属性
if gini < b_score:
b_index, b_value, b_score, b_groups = index, row[index], gini, groups#更新最佳特征的相关信息
return {'index':b_index, 'value':b_value, 'groups':b_groups}#返回测试属性和左右子集和划分左右子集的标准
#生成单棵树的主体部分
# Build a decision tree
def build_tree(train, max_depth, min_size, n_features):#生成一棵决策树
root = get_split(train, n_features)#获取测试属性、左右子集和划分左右子集的标准(先随机选出n_features个属性,然后在这些属性里选择出测试属性,进而分支)
split(root, max_depth, min_size, n_features, 1)#决策树分支,每一次分支都是先随机选出n_features个属性,然后再在里面选出测试属性,进而分支
return root
#在训练样本中随机选取部分样本,为单棵结的生长准备
# Create a random subsample from the dataset with replacement
def subsample(dataset, ratio):
sample = list()
n_sample = round(len(dataset) * ratio)#这里dataset是局部变量,接受的是上一层传入的train_set数据
while len(sample) < n_sample:
index = randrange(len(dataset))
sample.append(dataset[index])
return sample
#随机森林的生长主体并利用得到的随机森林对测试集进行测试
# Random Forest Algorithm
def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features):#利用随机森林进行学习
trees = list()
for i in range(n_trees):
sample = subsample(train, sample_size)#随机抽取部分样本,sample_size为抽取比例
tree = build_tree(sample, max_depth, min_size, n_features)#利用部分随机的特征来进行决策树的生长
trees.append(tree)#生成多棵决策树
predictions = [bagging_predict(trees, row) for row in test]#利用随机森林对测试样本集进行类别预测
return(predictions)
#交叉验证部分
#---------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
#计算预测精度
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
#准备好交叉验证数据
# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):#将样本随机分成n_folds组,最后一组的数目会小于等于其他组
dataset_split = list()
dataset_copy = list(dataset)
fold_size = len(dataset) / n_folds
sample_flag=0
for i in range(n_folds):
fold = list()
if (len(dataset)-sample_flag>=fold_size):
while len(fold) < fold_size:
index = randrange(len(dataset_copy))#产生随机索引,以便后面抽取样本
fold.append(dataset_copy.pop(index))#这是无放回抽取样本,是因为交叉验证的需要,与随机森林的有放回抽取是不一样的
sample_flag=sample_flag+1
dataset_split.append(fold)#逐一获得各组数据
else:
dataset_split.append(dataset_copy)#获得最后一组数据
return dataset_split
#交叉验证
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)#按照交叉验证的要求得到n_folds组数据
scores = list()
for fold in folds:#将其中一组数据做为测试组,其他组作为训练组,进行随机森林的学习,并循环这个过程,即所谓交叉验证
train_set = list(folds)#这里只是为了避免对原数据造成影响,采用这种方式给新变量赋值
train_set.remove(fold)
train_set = sum(train_set, [])#将训练组的样本合到一个列表下,之后便是在这一个集合中随机抽取部分样本;测试组数据在fold中;
test_set = list()
for row in fold:#将测试组数据转移到test_set
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None#去掉测试组数据的类别标签
predicted = algorithm(train_set, test_set, *args)#利用随机森林对测试集进行类别预测
actual = [row[-1] for row in fold]#真实类别值
accuracy = accuracy_metric(actual, predicted)#计算一组测试值的预测精度
scores.append(accuracy)
return scores
#数据准备部分
#---------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
# Load a CSV file
def load_csv(filename):#读取csv文件中的数据,按行读取,存放于列表中
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
# Convert string column to float
def str_column_to_float(dataset, column):#将样本中某变量的值由字符型转换成浮点型并去掉了首尾的空格
for row in dataset:
row[column] = float(row[column].strip())
# Convert string column to integer
def str_column_to_int(dataset, column):#将样本指定列的特征值由字符转换成数值标签,0,1,...这里就是要被标签的数值化
class_values = [row[column] for row in dataset]
unique = set(class_values)
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
for row in dataset:
row[column] = lookup[row[column]]
return lookup#有没有返回无所谓
#随机森林的使用部分
#---------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
# Test the random forest algorithm
seed(1)
# load and prepare data
filename = ''
dataset = load_csv(filename)
sample_num,var_num=np.shape(dataset)
print(sample_num,var_num)#数据集是208个样本,60个特征,最后一位为类别标标签,两类,R代表岩石,M代表金属
# convert string attributes to float
for i in range(0, len(dataset[0])-1):#逐个将样本的特征值由字符型转换为浮点型
str_column_to_float(dataset, i)
# convert class column to integers
str_column_to_int(dataset, len(dataset[0])-1)#将类别标签转换为数值标签
# evaluate algorithm
n_folds = 5 #将原始样本分成5份,以便于交叉验证
max_depth = 10#初始化决策树的层数
min_size = 1 #当集合中样本数少于min_size时就停止分支,相当于分支时统计特征的限制,设置为1即没有任何限制
sample_size = 0.8#sample_size为随机抽取样本时的比例
n_features = int(sqrt(len(dataset[0])-1))#初始化随机选取特征的数目,
for n_trees in [5,10,15]:
scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features)
print('Trees: %d' % n_trees)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))