opencv从读取图像到摄像机标定,立体标定,立体校正,最后得出视差图的一个大体流程。当然这只是一种方法,应该也是一种相对简明的实现方法。为了让流程看的更清楚,只列出了几个关键的步骤和函数,可以根据这个流程搭建自己的实现程序。
cvFindStereoCorrespondenceBM(左摄像机最终校正图像,右摄像机最终校正图像,输出视差矩阵,BMState)
||
-------以下部分均可视为标定----------
|
|
cvRemap(左原始图像,输出最终校正图像,mapx1,mapy1)//mapx、mapy是从原始图像到最终校正的共面行对准非畸变图像之间的映射关系,只要得出mapx和mapy,后面的视差图就变得非常简单了,当然,好的视差图和算法是匹算法是息息相关的。
|
|
cvInitUndistortRectifyMap(左摄像机矩阵(即内参),左摄像机畸变参数, Rl, Pl, 输出mapx1,输出 mapy1);
|
|
cvStereoRectify(左内参,右内参,左畸变,右畸变,图像尺寸,R,T,输出Rl,输出Rr,输出Pl,输出Pr,输出Q,标志位)
|
|
cvStereoCalibrate(物点,左像点,右像点,每幅图像上点数,输出左内参,输出左畸变,输出右内参,输出右畸变,图像尺寸,输出R,输出T,输出E,输出F,迭代结束条件,标志位)
|
|
cvFindCornerSubPix()求亚像素角点(像点)
|
|
cvFindChessboardCorners()求出粗略角点位置
|
|
从摄像机中抓取图像或从文件中读取图像
附上opencv源程序(自己做了注解和修改)
#pragma warning( disable: 4996 )
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://pr.willowgarage.com/wiki/OpenCV
************************************************** */
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
//#include "cvaux.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
#include <iostream>
#include <fstream>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int nx, int ny, int useUncalibrated)
{
//第一部分,最终输出的所需数据为映射矩阵mapx、mapy,即从原始的畸变图像到立体校正后行对齐非畸变图像之间的映射。
//把映射矩阵存入文件,若摄像机硬件不发生改动,则映射关系不会发生变化。在完成第一次求映射矩阵后,映射矩阵可作为固有参数。
int displayCorners = 0;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes, n = nx*ny, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<int> npoints;
vector<uchar> active[2];
vector<CvPoint2D32f> temp(n);
CvSize imageSize = { 0,0 };
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
CvMat _M1 = cvMat(3, 3, CV_64F, M1);
CvMat _M2 = cvMat(3, 3, CV_64F, M2);
CvMat _D1 = cvMat(1, 5, CV_64F, D1);
CvMat _D2 = cvMat(1, 5, CV_64F, D2);
CvMat _R = cvMat(3, 3, CV_64F, R);
CvMat _T = cvMat(3, 1, CV_64F, T);
CvMat _E = cvMat(3, 3, CV_64F, E);
CvMat _F = cvMat(3, 3, CV_64F, F);
if (displayCorners)
cvNamedWindow("corners", 1);
// READ IN THE LIST OF CHESSBOARDS:
if (!f)
{
fprintf(stderr, "can not open file %s\n", imageList);
return;
}
for (i = 0;;i++)
{
char buf[1024];
int count = 0, result = 0;
lr = i % 2;
vector<CvPoint2D32f>& pts = points[lr]; //-------------------ptr-->points
if (!fgets(buf, sizeof(buf) - 3, f))
break;
size_t len = strlen(buf);
while (len > 0 && isspace(buf[len - 1]))
buf[--len] = '\0';
if (buf[0] == '#')
continue;
IplImage* img = cvLoadImage(buf, 0);
if (!img)
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for (int s = 1; s <= maxScale; s++)
{
IplImage* timg = img;
if (s > 1)
{
timg = cvCreateImage(cvSize(img->width*s, img->height*s),
img->depth, img->nChannels);
cvResize(img, timg, CV_INTER_CUBIC);
}
result = cvFindChessboardCorners(timg, cvSize(nx, ny), //----------------------------------------------
&temp[0], &count, //&temp-->&ptr-->points[0]-->_imagePoints--(cvStereoCalibrate)->&_M,&_D
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if (timg != img)
cvReleaseImage(&timg);
if (result || s == maxScale)
for (j = 0; j < count; j++)
{
temp[j].x /= s;
temp[j].y /= s;
}
if (result)
break;
}
if (displayCorners)
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage(imageSize, 8, 3);
cvCvtColor(img, cimg, CV_GRAY2BGR);
cvDrawChessboardCorners(cimg, cvSize(nx, ny), &temp[0],
count, result);
cvShowImage("corners", cimg);
cvReleaseImage(&cimg);
if (cvWaitKey(0) == 27) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
N = pts.size(); //N为两个包含点的向量的大小
pts.resize(N + n, cvPoint2D32f(0, 0));
active[lr].push_back((uchar)result);
//assert( result != 0 );
if (result)
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix(img, &temp[0], count, //----------------------------亚像素脚点
cvSize(11, 11), cvSize(-1, -1),
cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS,
30, 0.01));
copy(temp.begin(), temp.end(), pts.begin() + N); //-------------------------temp-->ptr
}
cvReleaseImage(&img);
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
nframes = active[0].size();//Number of good chessboads found
objectPoints.resize(nframes*n);
for (i = 0; i < ny; i++)
for (j = 0; j < nx; j++)
objectPoints[i*nx + j] =
cvPoint3D32f(i*squareSize, j*squareSize, 0);
for (i = 1; i < nframes; i++)
copy(objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n);
npoints.resize(nframes, n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0]);
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0]); //---------------- points-->_imagePoints
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0]);
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0]);
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate(&_objectPoints, &_imagePoints1, //-------------------------------------
&_imagePoints2, &_npoints, // 输入左右像素坐标系点和世界坐标系点,输出左右摄像机内外参数和立体参数
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER +
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH);
printf(" done\n");
/*--------------------------------输出摄像机参数----------------------------------*/
printf("CameraMatrix1=\n");
for (int i = 0;i<3;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_M1, i, j));
cout << endl;
}
printf("CameraMatrix2=\n");
for (int i = 0;i<3;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_M2, i, j));
cout << endl;
}
printf("CameraDistort1=\n");
for (int i = 0;i<1;i++)
{
for (int j = 0;j<5;j++)
printf("%f\t", cvmGet(&_D1, i, j));
cout << endl;
}
printf("CameraDistort2=\n");
for (int i = 0;i<1;i++)
{
for (int j = 0;j<5;j++)
printf("%f\t", cvmGet(&_D2, i, j));
cout << endl;
}
/*------------------------------------------------------------------------------*/
// CALIBRATION QUALITY CHECK 计算由cvStereoCalibrate()函数输出的各个参数的误差---------------------------------
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0]);
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0]);
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints(&_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1);
cvUndistortPoints(&_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2);
cvComputeCorrespondEpilines(&_imagePoints1, 1, &_F, &_L1);
cvComputeCorrespondEpilines(&_imagePoints2, 2, &_F, &_L2);
double avgErr = 0;
for (i = 0; i < N; i++)
{
double err = fabs(points[0][i].x*lines[1][i].x + //-------------------------------误差
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf("avg err = %g\n", avgErr / (nframes*n));
//COMPUTE AND DISPLAY RECTIFICATION
if (showUndistorted)
{
CvMat* mx1 = cvCreateMat(imageSize.height,
imageSize.width, CV_32F);
CvMat* my1 = cvCreateMat(imageSize.height,
imageSize.width, CV_32F);
CvMat* mx2 = cvCreateMat(imageSize.height,
imageSize.width, CV_32F);
CvMat* my2 = cvCreateMat(imageSize.height,
imageSize.width, CV_32F);
CvMat* img1r = cvCreateMat(imageSize.height,
imageSize.width, CV_8U);
CvMat* img2r = cvCreateMat(imageSize.height,
imageSize.width, CV_8U);
CvMat* disp = cvCreateMat(imageSize.height,
imageSize.width, CV_16S);
CvMat* vdisp = cvCreateMat(imageSize.height,
imageSize.width, CV_8U);
CvMat* pair;
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if (useUncalibrated == 0)
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify(&_M1, &_M2, &_D1, &_D2, imageSize, //-------------------------------------------
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, 0,
0/*CV_CALIB_ZERO_DISPARITY*/);
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1, &_D1, &_R1, &_P1, mx1, my1); //--------------------------------------------
cvInitUndistortRectifyMap(&_M2, &_D2, &_R2, &_P2, mx2, my2);
}
//OR ELSE HARTLEY'S METHOD
else if (useUncalibrated == 1 || useUncalibrated == 2)
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
printf( "-------------------" );
//Just to show you could have independently used F
if (useUncalibrated == 2)
cvFindFundamentalMat(&_imagePoints1,
&_imagePoints2, &_F);
cvStereoRectifyUncalibrated(&_imagePoints1,
&_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1, &_D1, &_R1, &_M1, mx1, my1);
cvInitUndistortRectifyMap(&_M2, &_D1, &_R2, &_M2, mx2, my2);
}
else
assert(0);
/*---------------------------------输出立体参数--------------------------------*/
printf("R=\n");
for (int i = 0;i<3;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_R, i, j));
cout << endl;
}
printf("T=\n");
for (int i = 0;i<1;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_T, i, j));
cout << endl;
}
printf("R1=\n");
for (int i = 0;i<3;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_R1, i, j));
cout << endl;
}
printf("R2=\n");
for (int i = 0;i<3;i++)
{
for (int j = 0;j<3;j++)
printf("%f\t", cvmGet(&_R2, i, j));
cout << endl;
}
/*-------------------------------把映射矩阵存储到文件-----------------*/
cvSave("mapx1.xml", mx1);
cvSave("mapy1.xml", my1);
cvSave("mapx2.xml", mx2);
cvSave("mapy2.xml", my2);
//第二部分,求视差,这一步可和第一部分开,若有相应的map映射矩阵,可直接从文件中载入映射矩阵。
cvNamedWindow("rectified", 1);
// RECTIFY THE IMAGES AND FIND DISPARITY MAPS
if (!isVerticalStereo)
pair = cvCreateMat(imageSize.height, imageSize.width * 2,
CV_8UC3);
else
pair = cvCreateMat(imageSize.height * 2, imageSize.width,
CV_8UC3);
//Setup for finding stereo corrrespondences
CvStereoBMState *BMState = cvCreateStereoBMState(); //--------------------------------------
assert(BMState != 0);
BMState->preFilterSize = 41;
BMState->preFilterCap = 31;
BMState->SADWindowSize = 41;
BMState->minDisparity = -64;
BMState->numberOfDisparities = 128;
BMState->textureThreshold = 10;
BMState->uniquenessRatio = 15;
for (i = 0; i < nframes; i++)
{
IplImage* img1 = cvLoadImage(imageNames[0][i].c_str(), 0);
IplImage* img2 = cvLoadImage(imageNames[1][i].c_str(), 0);
if (img1 && img2)
{
CvMat part;
cvRemap(img1, img1r, mx1, my1); //mapx、mapy是从原始图像(有畸变图像)到最后行对准图像之间的映射
cvRemap(img2, img2r, mx2, my2); //------------------------------------------------------------
if (!isVerticalStereo || useUncalibrated != 0)
{
// When the stereo camera is oriented vertically,
// useUncalibrated==0 does not transpose the
// image, so the epipolar lines in the rectified
// images are vertical. Stereo correspondence
// function does not support such a case.
cvFindStereoCorrespondenceBM(img1r, img2r, disp, //------------------------------------------------
BMState);
cvNormalize(disp, vdisp, 0, 256, CV_MINMAX);
cvNamedWindow("disparity");
cvShowImage("disparity", vdisp);
}
if (!isVerticalStereo)
{
cvGetCols(pair, &part, 0, imageSize.width);
cvCvtColor(img1r, &part, CV_GRAY2BGR);
cvGetCols(pair, &part, imageSize.width,
imageSize.width * 2);
cvCvtColor(img2r, &part, CV_GRAY2BGR);
for (j = 0; j < imageSize.height; j += 16)
cvLine(pair, cvPoint(0, j),
cvPoint(imageSize.width * 2, j),
CV_RGB(0, 255, 0));
}
else
{
cvGetRows(pair, &part, 0, imageSize.height);
cvCvtColor(img1r, &part, CV_GRAY2BGR);
cvGetRows(pair, &part, imageSize.height,
imageSize.height * 2);
cvCvtColor(img2r, &part, CV_GRAY2BGR);
for (j = 0; j < imageSize.width; j += 16)
cvLine(pair, cvPoint(j, 0),
cvPoint(j, imageSize.height * 2),
CV_RGB(0, 255, 0));
}
//cvShowImage("rectified", pair);
if (cvWaitKey() == 27)
break;
}
cvReleaseImage(&img1);
cvReleaseImage(&img2);
}
cvReleaseStereoBMState(&BMState);
cvReleaseMat(&mx1);
cvReleaseMat(&my1);
cvReleaseMat(&mx2);
cvReleaseMat(&my2);
cvReleaseMat(&img1r);
cvReleaseMat(&img2r);
cvReleaseMat(&disp);
}
}
int main(void)
{
StereoCalib("ch12_list.txt", 9, 6, 0);
return 0;
}