今天整理资料时,发现了在学校时做的这个实验,当时整个过程过重偏向依赖分类器方面,而又很难对分类器性能进行一定程度的改良,所以最后没有选用这个方案,估计以后也不会接触这类机器学习的东西了,希望它对刚入门的人有点用。
SVM比较适合高维数据的二分类,本来准备对语音特征直接用SVM进行二分类,但是发现样本数据比较多,训练的2天都没有出收敛,最后想用VQ聚类的方法先抽取出具有代表性的语音,但是用这些代表性的训练集训练SVM分类器,效果还可以,用了一个下午就收敛了。识别结果还行,比较差的情况下,也有80%的准确率。
LibSVM是*大学林智仁设计的开发包:http://www.csie.ntu.edu.tw/~cjlin/
训练特征提取部分:
clc;
clear; train_feature0=[]; %A类特征
train_feature1=[]; %B类特征
%%%%%相关参数选取%%%%%
N=128;
ine = 1*10^(-3); %%%%%%%%%A类训练数据
root0='D:\model_train\train_orign';
list0=dir(root0);
list0(1:2)=[];
N0=length(list0);
for i=1:N0
file=[root0,'\',list0(i).name];
x=readwav(file);
[temp]=mfcc(x);
train_feature0=[train_feature0;temp];
disp(list0(i).name);
end
%%%%
[vector0,cent0]=VQ(train_feature0,N,ine);VQ聚类A类
%%%%
% for i=1:N
% train_feature=[train_feature;vector0(i).data(1:XQ,:)];
% end
% label0=ones(N*XQ,1);%文件标记 1 %%%%%%%%B类训练数据
root1='D:\model_train\train_tamper';
list1=dir(root1);
list1(1:2)=[];
N1=length(list1);
for j=1:N1
file=[root1,'\',list1(j).name];
x=wavread(file);
[temp]=mfcc(x);
train_feature1=[train_feature1;temp];
disp(list1(j).name);
end
%%%%%%%%%%%VQ聚类B类
[vector1,cent1]=VQ(train_feature1,N,ine);
模型训练与分类部分:
clc;clear;
%---------用SVM测试三种特征的准确率,单独检测,2012-3-19-------
addpath('svm-mat-2.89-3'); % add path with libsvm routines
display(datestr(now)); %------------------------装载训练数据----------------------------------------
load('train_feature.mat'); %语音特征训练数据
load('train_label.mat');%对应标签 load('test_feature.mat'); %用于归一化
%-------------直接指定参数-----------
G = 1/24; C = 1e8; %le
cmd=sprintf('-t 2 -g %.4e -c %.4e',G,C);
[train_scale,test_scale] = scaleForSVM_corrected(train_feature,test_feature,-1,1);% scale the training set and testing sets to [0,1]
model1=svmtrain(train_label,train_scale,cmd);% train the svm classifier 训练 按帧 save model1.mat model1;
save test_scale.mat test_scale;
clear;
load('model1.mat');
load('test_scale.mat');
%------------------------装载测试数据---------------------------------------- load('test_label.mat');%对应标签
load('test_file_frames.mat'); %帧级别
load('test_label_frame.mat');
%--------------------------------------------------------------------------
[predict_label,predict_accuracy_rate]=svmpredict(test_label_frame,test_scale,model1);% classify the testing set
N=length(test_file_frames);
temp1=0;
temp2=0; for i=1:N
temp2=temp2+test_file_frames(i);
temp1=temp2-test_file_frames(i)+1;
% zonghe=score(temp1:temp2);
same=ismember(predict_label(temp1:temp2),test_label_frame(temp1:temp2));
if sum(same)/test_file_frames(i)>0.5
predict_result(i)=test_label(i);
else
predict_result(i)=(~test_label(i));
end
end
predict_result=predict_result';
index=(predict_result==test_label);
num_correct=sum(index);
accuracy=num_correct/N;
fprintf('A类和B类综合 accuracy = %0.2f (%s%s%s)%s \n',accuracy,num2str(num_correct),'/',num2str(N),'(classification)');
display(datestr(now));
%%%%%%%%%%%%%%%%%%%A类准确率
index=(predict_result(1:200)==test_label(1:200));
num_z=sum(index);
accuracy=num_z/(N/2);
fprintf('A类 accuracy = %0.2f (%s%s%s)%s \n',accuracy,num2str(num_z),'/',num2str(N/2),'(classification)');
%%%%%%%%%%%%%%%%%%%B类准确率
index=(predict_result(201:400)==test_label(201:400));
num_f=sum(index);
accuracy=num_f/(N/2);
fprintf('B类 accuracy = %0.2f (%s%s%s)%s \n',accuracy,num2str(num_f),'/',num2str(N/2),'(classification)');