C++ OpenCV 立体相机校准/图像校正
声明:本页面是StackOverFlow热门问题的中英对照翻译,遵循CC BY-SA 4.0协议,如果您需要使用它,必须同样遵循CC BY-SA许可,注明原文地址和作者信息,同时你必须将它归于原作者(不是我):StackOverFlow
原文地址: http://stackoverflow.com/questions/24130884/
Warning: these are provided under cc-by-sa 4.0 license. You are free to use/share it, But you must attribute it to the original authors (not me):
StackOverFlow
OpenCV Stereo Camera Calibration/Image Rectification
提问by Khaled
I'm trying to calibrate my two Point Grey (Blackfly) cameras for stereo vision. I'm using the tutorial stereo_calib.cpp that comes with OpenCV (code below). For some reason, I'm getting really bad results (RMS error=4.49756 and average reprojection err = 8.06533) and all my rectified images come out grey. I think my problem is that I'm not picking the right flags for the stereoCalibrate() function, but I've tried many different combinations and at best the rectified images would be warped.
我正在尝试为立体视觉校准我的两个 Point Grey (Blackfly) 相机。我正在使用 OpenCV 附带的教程 Stereo_calib.cpp(下面的代码)。出于某种原因,我得到了非常糟糕的结果(RMS 误差 = 4.49756 和平均重投影误差 = 8.06533)并且我所有的校正图像都是灰色的。我认为我的问题是我没有为stereoCalibrate() 函数选择正确的标志,但我已经尝试了许多不同的组合,最多可以扭曲校正后的图像。
Here's a link to the images I used and a sample rectified pair: https://www.dropbox.com/sh/5wp31o8xcn6vmjl/AAADAfGiaT_NyXEB3zMpcEvVa#/
这是我使用的图像的链接和校正后的样本对:https: //www.dropbox.com/sh/5wp31o8xcn6vmjl/AAADAfGiaT_NyXEB3zMpcEvVa#/
Any help would be appreciated!!
任何帮助,将不胜感激!!
static void
StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
if( imagelist.size() % 2 != 0 )
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return;
}
bool displayCorners = true;//false;//true;
const int maxScale = 1;//2;
const float squareSize = 1.8;
//const float squareSize = 1.f; // Set this to your actual square size
// ARRAY AND VECTOR STORAGE:
vector<vector<Point2f> > imagePoints[2];
vector<vector<Point3f> > objectPoints;
Size imageSize;
//int i, j, k, nimages = (int)imagelist.size()/2;
int i, j, k, nimages = (int)imagelist.size();
cout << "nimages: " << nimages << "\n";
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;
for( i = j = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
const string& filename = imagelist[i*2+k];
Mat img = imread(filename, 0);
if(img.empty()) {
break;
}
if( imageSize == Size() ) {
imageSize = img.size();
} else if( img.size() != imageSize )
{
cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
break;
}
bool found = false;
vector<Point2f>& corners = imagePoints[k][j];
for( int scale = 1; scale <= maxScale; scale++ )
{
Mat timg;
if( scale == 1 )
timg = img;
else
resize(img, timg, Size(), scale, scale);
found = findChessboardCorners(timg, boardSize, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
if( scale > 1 )
{
Mat cornersMat(corners);
cornersMat *= 1./scale;
}
break;
}
}
if( displayCorners )
{
cout << filename << endl;
Mat cimg, cimg1;
cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 1280./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf);
imshow("corners", cimg1);
char c = (char)waitKey(500);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
if( !found ) {
cout << "!found\n";
break;
}
cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01));
}
if( k == 2 )
{
goodImageList.push_back(imagelist[i*2]);
goodImageList.push_back(imagelist[i*2+1]);
j++;
}
}
cout << j << " pairs have been successfully detected.\n";
nimages = j;
if( nimages < 2 )
{
cout << "Error: too little pairs to run the calibration\n";
return;
}
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);
for( i = 0; i < nimages; i++ )
{
for( j = 0; j < boardSize.height; j++ )
for( k = 0; k < boardSize.width; k++ )
objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
}
cout << "Running stereo calibration ...\n";
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
//TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5));
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
//CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_RATIONAL_MODEL +
//CV_CALIB_FIX_K3);
//CV_CALIB_FIX_K2);
CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
//CV_CALIB_FIX_K1 + CV_CALIB_FIX_K2 + CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
cout << "done with RMS error=" << rms << endl;
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for( i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints[0][i].size();
Mat imgpt[2];
for( k = 0; k < 2; k++ )
{
imgpt[k] = Mat(imagePoints[k][i]);
undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
}
for( j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average reprojection err = " << err/npoints << endl;
// save intrinsic parameters
FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
//CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
CALIB_ZERO_DISPARITY, 0, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
// OpenCV can handle left-right
// or up-down camera arrangements
//bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
bool isVerticalStereo = false;
// COMPUTE AND DISPLAY RECTIFICATION
if( !showRectified )
return;
Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useCalibrated )
{
// we already computed everything
}
// OR ELSE HARTLEY'S METHOD
else
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
vector<Point2f> allimgpt[2];
for( k = 0; k < 2; k++ )
{
for( i = 0; i < nimages; i++ )
std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
}
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);
R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
P2 = cameraMatrix[1];
}
//Precompute maps for cv::remap()
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
Mat canvas;
double sf;
int w, h;
if( !isVerticalStereo )
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w*2, CV_8UC3);
}
else
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h*2, w, CV_8UC3);
}
for( i = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
if( useCalibrated )
{
Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
}
}
if( !isVerticalStereo )
for( j = 0; j < canvas.rows; j += 16 )
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
else
for( j = 0; j < canvas.cols; j += 16 )
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
char c = (char)waitKey();
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
回答by Ben
First of all, about your calibration images. I see a few points that could lead to a better calibration :
首先,关于您的校准图像。我看到有几点可以导致更好的校准:
- Use stabler images. Most of your images are blurred a bit, which results in bad accuracy in the corner detection
- Vary scale. Most images that you use present the checkerboard approx. at the same distance from the cameras.
- Be careful about your checkerboard itself. It appears to be quite badly attached to its support. If you want to achieve a good calibration, you must ensure that your checkerboard is attached tightly on a flat surface.
- 使用更稳定的图像。您的大部分图像都有点模糊,这导致角点检测的准确性不佳
- 改变规模。您使用的大多数图像呈现棋盘格大约。与摄像机的距离相同。
- 小心你的棋盘本身。它似乎非常依赖它的支持。如果您想获得良好的校准效果,您必须确保您的棋盘紧贴在平坦的表面上。
You have much more detailed advice about how to make a good calibration in this SO answer
您对如何在此 SO 答案中进行良好校准有更详细的建议
Now, about the stereo calibration itself. Best way that I found to achieve a good calibration is to separately calibrate each camera intrinsics (using the calibrateCamera function) then the extrinsics (using stereoCalibrate) using the intrinsics as a guess. Have a look at the stereoCalibrate flags for how to do this.
现在,关于立体声校准本身。我发现实现良好校准的最佳方法是分别校准每个相机的内部函数(使用calibrateCamera 函数),然后使用内部函数作为猜测的外部函数(使用stereoCalibrate)。查看如何执行此操作的stereoCalibrate 标志。
Outside of this, your flags in the stereoCalibrate function are as such :
除此之外,您在stereoCalibrate 函数中的标志如下:
- CV_CALIB_FIX_ASPECT_RATIO : you force the aspect ratio fx/fy to be fixed
- CV_CALIB_SAME_FOCAL_LENGTH : seems OK since you have two identical cameras. You can check whether it is exact by calibrating independently each camera
- CV_CALIB_RATIONAL_MODEL : enables K3, k4 and k5 distorsion parameters
- CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 : fixes those 3 parameters. Since you don't use any uess, you actually put them to 0 here so the option CV_CALIB_RATIONAL_MODEL is no use in your code with those flags
- CV_CALIB_FIX_ASPECT_RATIO :您强制固定宽高比 fx/fy
- CV_CALIB_SAME_FOCAL_LENGTH :看起来没问题,因为你有两个相同的相机。您可以通过独立校准每个相机来检查它是否准确
- CV_CALIB_RATIONAL_MODEL :启用 K3、k4 和 k5 失真参数
- CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 :修复这三个参数。由于您不使用任何 uess,因此您实际上将它们设置为 0,因此选项 CV_CALIB_RATIONAL_MODEL 在带有这些标志的代码中没有用
Note that if you calibrate independently each camera and use the intrinsics, you have different levels of use of this data :
请注意,如果您独立校准每个相机并使用内在函数,则您对该数据的使用级别不同:
- With the flag CV_CALIB_FIX_INTRINSIC, the intrinsics will be used as such and only extrinsic parameters will be optimized
- With CV_CALIB_USE_INTRINSIC_GUESS, intrinsics will be used as guesses but optimized again
- With a combination of CV_CALIB_FIX_PRINCIPAL_POINT, CV_CALIB_FIX_FOCAL_LENGTH and CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 you get a little of play about which parameters are fixed and which are optimized again
- 使用标志 CV_CALIB_FIX_INTRINSIC,将使用内部函数,并且仅优化外部参数
- 使用 CV_CALIB_USE_INTRINSIC_GUESS,内部函数将用作猜测但再次优化
- 结合 CV_CALIB_FIX_PRINCIPAL_POINT、CV_CALIB_FIX_FOCAL_LENGTH 和 CV_CALIB_FIX_K1、...、CV_CALIB_FIX_K6,您可以稍微了解哪些参数是固定的,哪些是再次优化的
回答by Upperwal
I had the same problem few weeks back where I tried almost 50 stereo image samples but the rectified image was all blank. So I decided to write my own implementation of stereo calibration code but in real time.
几周前我遇到了同样的问题,我尝试了近 50 个立体图像样本,但校正后的图像都是空白的。所以我决定实时编写自己的立体校准代码实现。
This code detected the chessboard corners in real time and saves those image where chessboard corners in both left and right images is found and then it runs stereo calib code on them.
此代码实时检测棋盘角并保存那些在左右图像中找到棋盘角的图像,然后在它们上运行立体校准代码。
I hope this helps you and someone in the future.
我希望这对你和未来的人有所帮助。
Source Code: https://github.com/upperwal/opencv/blob/master/samples/cpp/stereo_calib.cpp
源代码:https: //github.com/upperwal/opencv/blob/master/samples/cpp/stereo_calib.cpp
Demo Video: https://www.youtube.com/watch?v=kizO_s-YUDU
演示视频:https: //www.youtube.com/watch?v=kizO_s-YUDU
All the best
祝一切顺利
回答by Nikita Chopra
I had similar problem and then I re edited the code . Please make sure you have sufficient number of images at different depth form camera and at different orientation that will lead to less re projection error
我遇到了类似的问题,然后我重新编辑了代码。请确保您有足够数量的不同深度相机和不同方向的图像,这将导致更少的重新投影错误
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<iostream>
int main()
{
const int CHESSBOARD_WIDTH = 9; //input width of chessboard
const int CHESSBOARD_HEIGHT = 6; //input height of chessboard
const float squareSize = 3.96; //input size of a side of a single square in chessboard
cv::Size corner=cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT);
int counter =30;
int nimages=24;
cv::Size imageSize;
enum{capturing=0,calibrated=1};
int mode=capturing;
char leftfilename[100];
char rightfilename[100];
std::vector<cv::Mat> imagePoints1;
std::vector<cv::Mat> imagePoints2;
std::vector<std::vector<cv::Point3f>> objectPoints;
bool found1=false;
bool found2=false;
int counter2=0;
cv::Mat pointBuf1=cv::Mat::zeros(54,2,CV_32FC1);
cv::Mat pointBuf2=cv::Mat::zeros(54,2,CV_32FC1);
for(int i=1;i<=counter;i++)
{ sprintf(leftfilename,"newleftcheck%d.jpg",i);
sprintf(rightfilename,"newrightcheck%d.jpg",i);
// const int CHESSBOARD_INTERSECTION_COUNT = CHESSBOARD_WIDTH * CHESSBOARD_HEIGHT;
cv::Mat imgleft_frame=cv::imread(leftfilename);
cv::Mat imgright_frame=cv::imread(rightfilename);
//cv::Mat imgleft_frame =cv::Mat(480,640,CV_8UC4,s.leftBuffer,4*imgWidth*sizeof(unsigned char));
//cv::Mat imgright_frame =cv::Mat(480,640,CV_8UC4,s.rightBuffer,4*imgWidth*sizeof(unsigned char));
imageSize=imgleft_frame.size();
found1 = findChessboardCorners(imgleft_frame, corner,pointBuf1,cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
found2 = findChessboardCorners(imgright_frame, cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT),pointBuf2,cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
if(found1)
{ cv::Mat gray_image1;
cvtColor(imgleft_frame,gray_image1,cv::COLOR_BGRA2GRAY);
cornerSubPix( gray_image1, pointBuf1, cv::Size(11,11),cv::Size(-1,-1), cv::TermCriteria(cv::TermCriteria::EPS+cv::TermCriteria::MAX_ITER, 30, 0.1 ));
drawChessboardCorners( imgleft_frame,cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT), pointBuf1, found1 );
}
if(found2)
{ cv::Mat gray_image2;
cvtColor(imgright_frame,gray_image2,cv::COLOR_BGRA2GRAY);
cornerSubPix( gray_image2, pointBuf2, cv::Size(11,11),cv::Size(-1,-1), cv::TermCriteria(cv::TermCriteria::EPS+cv::TermCriteria::MAX_ITER, 30, 0.1 ));
drawChessboardCorners( imgright_frame,cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT), pointBuf2, found2 );
}
if(found1&&found2)
{ imagePoints1.push_back(pointBuf1);
imagePoints2.push_back(pointBuf2);
//sprintf(leftfilename,"newleftcheck%d.jpg",s.counter);
//sprintf(rightfilename,"newrightcheck%d.jpg",s.counter);
//cv::imwrite(leftfilename,imgleft_frame);
//cv::imwrite(rightfilename,imgright_frame);
counter2=counter2+1;
std::cout<<counter2<<std::endl;
}
nimages=counter2;
objectPoints.resize(nimages);
std::cout<<"countervalue"<<i<<std::endl;
}
for(int i = 0; i <nimages; i++ )
{
for( int j = 0; j < CHESSBOARD_HEIGHT; j++ )
for( int k = 0; k < CHESSBOARD_WIDTH; k++ )
objectPoints[i].push_back(cv::Point3f(j*squareSize, k*squareSize, 0));
}
std::cout<<"check1"<<std::endl;
cv::Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = cv::Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = cv::Mat::eye(3, 3, CV_64F);
cv::Mat R, T, E, F;
std::cout<<objectPoints.size()<<std::endl;
std::cout<<imagePoints1.size()<<std::endl;
if(imagePoints1.size()==imagePoints2.size())
std::cout<<"samesize"<<std::endl;
if(imagePoints1.size()>=nimages )
{ std::cout<<"check2"<<std::endl;
double rms = stereoCalibrate( objectPoints, imagePoints1, imagePoints2,cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],imageSize, R, T, E, F,
cv::CALIB_FIX_ASPECT_RATIO +cv::CALIB_ZERO_TANGENT_DIST +
cv::CALIB_SAME_FOCAL_LENGTH +cv::CALIB_RATIONAL_MODEL +
cv::CALIB_FIX_K3 +cv::CALIB_FIX_K4 + cv::CALIB_FIX_K5,
cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, 1e-5) );
std::cout<<"check3"<<std::endl;
std::cout << "done with RMS error=" << rms << std::endl;
mode=calibrated;
std::cout<<"calibrated"<<std::endl;
}
if(mode==calibrated)
{
double err = 0;
int npoints = 0;
std::vector<cv::Vec3f> lines[2];
for(int i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints1[i].rows;
std::cout<<npt<<std::endl;
cv:: Mat imgpt1;
cv::Mat imgpt2;
// for(int k = 0; k < 2; k++ )
imgpt1 = cv::Mat(imagePoints1[i]);
undistortPoints(imgpt1, imgpt1, cameraMatrix[0], distCoeffs[0], cv::Mat(), cameraMatrix[0]);
computeCorrespondEpilines(imgpt1, 1, F, lines[0]);
imgpt2 = cv::Mat(imagePoints2[i]);
undistortPoints(imgpt2, imgpt2, cameraMatrix[1], distCoeffs[1], cv::Mat(), cameraMatrix[1]);
computeCorrespondEpilines(imgpt2, 2, F, lines[1]);
std::cout<<"checksdcdufb"<<std::endl;
//std::cout<<"imagepoint"<<imagePoints1[1].at<unsigned int>(1,1)<<std::endl;
/* for(int j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints1[i].at<double>(j,0) *lines[1][j][0] +imagePoints1[i].at<double>(j,1)*lines[1][j][1] + lines[1][j][2]) +fabs(imagePoints2[i].at<double>(j,0)*lines[0][j][0] +
imagePoints2[i].at<double>(j,1)*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}*/
std::cout<<"check8"<<std::endl;
cv::FileStorage fs("intrinsics.xml", cv::FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
std:: cout << "Error: can not save the intrinsic parameters\n";
cv::Mat R1, R2, P1, P2, Q;
cv::Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
cv::CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.xml", cv::FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
std::cout << "Error: can not save the intrinsic parameters\n";
}
//std::cout << "average reprojection err = " << err/npoints <<std::endl;
}
return 0;
}