图片经过边缘检测及二值化处理后是黑色的底,白色的线。把图片中每个白色像素点设为(Xi,Yi)。定理:对于直角坐标系中的每一个点,在极坐标中有一条曲线ρ=Xicosθ+Yisinθ与其对应。分别使θ等于0,△θ,2△θ……π,便可求出对应的ρ值。将ρ也分成许多小段△ρ,这样极坐标就被分成了许多以(△ρ,△θ)为单元的小块。计算每一条曲线落在各小段中的次数。所有数据点变换完成后,对小单元进行检测,这样,落入次数较多的单元,说明此点为较多曲线的公共点,而这些曲线对应的(X,Y)平面上的点可以认为是共线的。 其实我就是需要极坐标中相交次数最多的交点坐标(ρ,θ)中的θ值……
我自己的理解:原本直线方程是:y=kx+b; 现在把它化为在极坐标下的形式:ρ=Xicosθ+Yisinθ。注意其中的x,y是直接坐标下的像素点。然后θ和ρ分段离散化取值,θ取值对应不同的ρ,这样在直角坐标系中的一个值(x,y)在极坐标下就对应一条曲线,其实,注意ρ,θ就是相当于直角坐标系中的k,b,ρ=Xicosθ+Yisinθ。就是用计算机运算速度快的原理算出ρ,θ。上面还没讲完,直角坐标系中的一个值(x,y)在极坐标下就对应一条曲线。那么,原本在直角坐标系中的直线上的像素点一一对应极坐标一条条曲线,他们会相交于一点。对应的(ρ,θ)就是我们需要的k和b。当然,具体的细节还需要查看源码,投票次数啊,具体的分割ρ,θ。。投票次数就是在(ρ,θ)平面相交的同一点的次数,当大于某个阈值时就说明是一条直线了取出(ρ,θ)就是对应的直线参数系数
图中曲线是(50,30)对应的在(ρ,θ)平面的曲线
图中的曲线是(50,30),(30,10)2条曲线相交,当把直接坐标系中的直线像素点一一映射到(ρ,θ)平面,应该都会相交于同一个(ρ,θ),当达到某个阈值时就说明可以了
下面是论文中的hough检测直线步骤:
下面是书上的代码:
//LineFinder.h #include"opencv2/imgproc/imgproc.hpp" #include"opencv2/highgui/highgui.hpp" #include "opencv2/core/core.hpp" #include<iostream> //#include <math.h> //#include <cmath> using namespace std; using namespace cv; #define PI 3.141592 class LineFinder{ private: Mat img; vector<Vec4i>lines; double deltaRho; double deltaTheta; int minVote; double minLength; double maxGap; public: LineFinder():deltaRho(1),deltaTheta(PI/180),minVote(10),minLength(0.),maxGap(0.){} void setAccResolution(double dRho,double dTheta) { deltaRho=dRho; deltaTheta=dTheta; } void setMinVote (int minv) { minVote=minv; } void setLineLengAndGap(double length,double gap) { minLength=length; maxGap=gap; } vector<Vec4i> findLines(Mat &binary) { lines.clear(); HoughLinesP(binary,lines,deltaRho,deltaTheta,minVote,minLength,maxGap); return lines; } void drawDetectedLines(Mat &image,Scalar color=Scalar(255,255,255)) { vector<Vec4i>::const_iterator it2=lines.begin(); while (it2!=lines.end()) { Point pt1((*it2)[0],(*it2)[1]); Point pt2((*it2)[2],(*it2)[3]); line(image,pt1,pt2,color); ++it2; } } };
//main #include "LineFinder.h" void main() { Mat img=imread("C:\\Users\\Administrator\\Desktop\\工作\\testp\\road.jpg",0); Mat out1; Canny(img,out1,125,350); Mat img1; img1=img.clone(); vector<Vec2f> lines; HoughLines(out1,lines,1,PI/180,60); vector<Vec2f>::const_iterator it=lines.begin(); while (it!=lines.end()) { float rho=(*it)[0]; float theta=(*it)[1]; if (theta<PI/4.||theta>3.*PI/4.) { Point pt1(rho/cos(theta),0); Point pt2((rho-out1.rows*sin(theta))/cos(theta),out1.rows); line(img1,pt1,pt2,Scalar(255),1); } else { Point pt1(0,rho/sin(theta)); Point pt2(out1.cols,(rho-out1.cols*cos(theta))/sin(theta)); line(img1,pt1,pt2,Scalar(255),1); } ++it; } LineFinder finder; finder.setLineLengAndGap(100,20); finder.setMinVote(60); finder.findLines(out1); finder.drawDetectedLines(img); imshow("houghlinep",img); imshow("original",img1); imshow("out",out1); waitKey(0); }
检测圆::
#include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" using namespace cv; void main() { Mat img=imread("C:\\Users\\Administrator\\Desktop\\工作\\testp\\chariot.jpg",0); Mat img1; GaussianBlur(img,img1,Size(5,5),1.5); vector<Vec3f>circles; HoughCircles(img1,circles,CV_HOUGH_GRADIENT,2,50,200,100,25,100); vector<Vec3f>::const_iterator itc=circles.begin(); while(itc!=circles.end()) { circle(img1,Point((*itc)[0],(*itc)[1]),(*itc)[2],Scalar(255),5); ++itc; } imshow("original",img); imshow("img1",img1); waitKey(0); }
点集的直线拟合:
#include "LineFinder.h" void main() { Mat img=imread("C:\\Users\\Administrator\\Desktop\\工作\\testp\\road.jpg",0); Mat out1; Canny(img,out1,125,350); Mat img1; img1=img.clone(); vector<Vec2f> lines; HoughLines(out1,lines,1,PI/180,60); vector<Vec2f>::const_iterator it=lines.begin(); while (it!=lines.end()) { float rho=(*it)[0]; float theta=(*it)[1]; if (theta<PI/4.||theta>3.*PI/4.) { Point pt1(rho/cos(theta),0); Point pt2((rho-out1.rows*sin(theta))/cos(theta),out1.rows); line(img1,pt1,pt2,Scalar(255),1); } else { Point pt1(0,rho/sin(theta)); Point pt2(out1.cols,(rho-out1.cols*cos(theta))/sin(theta)); line(img1,pt1,pt2,Scalar(255),1); } ++it; } LineFinder finder; finder.setLineLengAndGap(100,20); finder.setMinVote(60); finder.findLines(out1); finder.drawDetectedLines(img); vector<Vec4i>lines111=finder.findLines(out1); int n=0; Mat oneline(out1.size(),CV_8U,Scalar(0)); line(oneline,Point(lines111[n][0],lines111[n][1]),Point(lines111[n][2],lines111[n][3]),Scalar(255),3); bitwise_and(out1,oneline,oneline); //threshold(oneline,oneline,100,255,THRESH_BINARY_INV); vector<Point>points; for (int y=0;y<oneline.rows;y++) { uchar *rowPtr=oneline.ptr<uchar>(y); for (int x=0;x<oneline.cols;x++) { if (rowPtr[x]) { points.push_back(Point(x,y)); } } } Vec4f line12; fitLine(points,line12,CV_DIST_L2,0,0.01,0.01); int x0=line12[2]; int y0=line12[3]; int x1=x0+100*line12[0]; int y1=y0+100*line12[1]; line(img,Point(x0,y0),Point(x1,y1),Scalar(0),3); imshow("src",img); imshow("oneline",oneline); waitKey(0);
好久再看opencv3时候,自己的理解搞子如下: