Bundle adjustment explained
Bundle adjustment explained Read More »
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
#include <opencv/cv.hpp> #include <opencv2/core.hpp> #include <opencv2/calib3d.hpp> #include "transformation.hpp" //https://www.cnblogs.com/shengguang/p/5932522.html void HouseHolderQR(const cv::Mat &A, cv::Mat &Q, cv::Mat &R) { assert ( A.channels() == 1 ); assert ( A.rows >= A.cols ); auto sign = [](double value) { return value >= 0 ? 1: -1; }; const auto totalRows = A.rows; const auto totalCols = A.cols; R = A.clone(); Q = cv::Mat::eye ( totalRows, totalRows, A.type() ); for ( int col = 0; col < A.cols; ++ col ) { cv::Mat matAROI = cv::Mat ( R, cv::Range ( col, totalRows ), cv::Range ( col, totalCols ) ); cv::Mat y = matAROI.col ( 0 ); auto yNorm = norm ( y ); cv::Mat e1 = cv::Mat::eye ( y.rows, 1, A.type() ); cv::Mat w = y + sign(y.at<double>(0,0)) * yNorm * e1; cv::Mat v = w / norm( w ); cv::Mat vT; cv::transpose(v, vT ); cv::Mat I = cv::Mat::eye( matAROI.rows, matAROI.rows, A.type() ); cv::Mat I_2VVT = I - 2 * v * vT; cv::Mat matH = cv::Mat::eye ( totalRows, totalRows, A.type() ); cv::Mat matHROI = cv::Mat(matH, cv::Range ( col, totalRows ), cv::Range ( col, totalRows ) ); I_2VVT.copyTo ( matHROI ); R = matH * R; Q = Q * matH; } } int main() { /* Thera are two notations for Projection Matrix 1)P=K[R|t] ┌X┐ ┌R,t┐ |Y| X_Cam=R*X+t ==> Homogeneous Coordinate ==> X_Cam=|0,1| |Z| └ ┘4x4└1┘4x1 ┌X┐ ┌ ┐ |Y| image plane <--x=K[I|0]3x4 |R,t| |Z| └0,1┘4x4 └1┘4x1 P=K[R|t] 2)P=KR[I|-X0] ┌X┐ ┌R -Rc┐|Y| X_Cam=R*[X_w-C] ==> Homogeneous Coordinate ==> | ||Z| └0 1 ┘└1┘ ┌X┐ ┌R -Rc┐|Y| image plane <--x=K[I|0]3x4 |0 1 ||Z| └ ┘└1┘ P=K[R|-RC]= KR[I|-C] (1) and (2) ==> t=-RC */ int numberOfPixelInHeight,numberOfPixelInWidth; double heightOfSensor, widthOfSensor; double focalLength=1.50; double mx, my, U0, V0; numberOfPixelInHeight=600; numberOfPixelInWidth=800; heightOfSensor=10; widthOfSensor=10; my=(numberOfPixelInHeight)/heightOfSensor ; U0=(numberOfPixelInHeight)/2 ; mx=(numberOfPixelInWidth)/widthOfSensor; V0=(numberOfPixelInWidth)/2; cv::Mat cameraMatrix = (cv::Mat_<double>(3,3) << focalLength*mx, 0, V0, 0,focalLength*my,U0, 0,0,1); double tx,ty,tz,roll,pitch,yaw; tx=1.0; ty=2.1; tz=-1.4; cv::Mat translation = (cv::Mat_<double>(3,1) <<tx,ty,tz); cv::Vec3d theta; roll=+M_PI/4 ; pitch=+M_PI/10; yaw=-+M_PI/6; theta[0]=roll; theta[1]=pitch; theta[2]=yaw; cv::Mat rotation=eulerAnglesToRotationMatrix(theta); //1)P=K[R|t] cv::Mat R_T; cv::hconcat(rotation, translation, R_T); cv::Mat projectionMatrix=cameraMatrix*R_T; cv::Mat calculatedCameraMatrix,calculatedRotation,calculatedTranslation; cv::decomposeProjectionMatrix(projectionMatrix,calculatedCameraMatrix,calculatedRotation,calculatedTranslation); std::cout<<"==============================Ground Truth==============================" <<std::endl; std::cout<<"Rotation Matrix (Ground Truth)" <<std::endl; std::cout<<rotation <<std::endl; std::cout<<"Translation Matrix (Ground Truth)" <<std::endl; std::cout<<translation <<std::endl; std::cout<<"Camera Matrix (Ground Truth)" <<std::endl; std::cout<<cameraMatrix <<std::endl; std::cout<<"==============================Decomposing Using OpenCV==============================" <<std::endl; std::cout<<"Computed Rotation Matrix (OpenCV)" <<std::endl; std::cout<<calculatedRotation <<std::endl; std::cout<<"Computed Translation Matrix (OpenCV)" <<std::endl; //std::cout<<calculatedTranslation/calculatedTranslation.at<double>(3,0) <<std::endl; cv::Mat tempT=(cv::Mat_<double>(3,1)<< calculatedTranslation.at<double>(0,0)/calculatedTranslation.at<double>(3,0), calculatedTranslation.at<double>(1,0)/calculatedTranslation.at<double>(3,0), calculatedTranslation.at<double>(2,0)/calculatedTranslation.at<double>(3,0)); std::cout<<-rotation*tempT<<std::endl; std::cout<<"Computed Camera Matrix (OpenCV)" <<std::endl; std::cout<<calculatedCameraMatrix <<std::endl; ///////////////////////////////////////decomposing the projection matrix/////////////////////////////////////////// /* P=KR[I|-X0]=[H_inf3x3|h3x1] KR=H_inf3x3 1)X0 -KRX0=h3x1 => X0=-(KR)^-1*h3x1 ==>X0=-(H_inf3x3)^-1*h3x1 2)K,R KR=H_inf3x3 =>(KR)^-1= H_inf3x3^-1 =>R^-1*K^-1=H_inf3x3^-1 | R^-1*K^-1=Q*R => R=Q^-1, K=R^-1 H_inf3x3^-1=Q*R | */ cv::Mat H_inf3x3=(cv::Mat_<double>(3,3)<< projectionMatrix.at<double>(0,0),projectionMatrix.at<double>(0,1),projectionMatrix.at<double>(0,2) ,projectionMatrix.at<double>(1,0),projectionMatrix.at<double>(1,1),projectionMatrix.at<double>(1,2) ,projectionMatrix.at<double>(2,0),projectionMatrix.at<double>(2,1),projectionMatrix.at<double>(2,2)); cv::Mat h3x1=(cv::Mat_<double>(3,1)<<projectionMatrix.at<double>(0,3),projectionMatrix.at<double>(1,3),projectionMatrix.at<double>(2,3) ); cv::Mat Q,R; cv::Mat H_inf3x3_inv=H_inf3x3.inv(); //R=Q^-1, K=R^-1 HouseHolderQR(H_inf3x3_inv, Q, R); cv::Mat K=R.inv(); std::cout<<"==============================Decomposing Using My Code==============================" <<std::endl; //due to homogeneity we divide it by last element std::cout<<"Estimated Camera Matrix\n"<<K/K.at<double>(2,2) <<std::endl; cv::Mat rotationMatrix=Q.inv(); std::cout<<"Estimated Camera Rotation\n"<<rotationMatrix*-1 <<std::endl; std::cout<<"Estimated Camera Translation" <<std::endl; //t=-R*C, Q.inv()=R std::cout<<-1*(-Q.inv()*(-H_inf3x3.inv()*h3x1 ))<<std::endl; |
And the output is:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
==============================Ground Truth============================== Rotation Matrix (Ground Truth) [0.823639103546332, 0.5427868801100539, -0.1643199010764935; -0.4755282581475767, 0.5031184295835893, -0.7216264418079997; -0.3090169943749474, 0.6724985119639573, 0.6724985119639574] Translation Matrix (Ground Truth) [1; 2.1; -1.4] Camera Matrix (Ground Truth) [120, 0, 400; 0, 90, 300; 0, 0, 1] ==============================Decomposing Using OpenCV============================== Computed Rotation Matrix (OpenCV) [0.823639103546332, 0.5427868801100539, -0.1643199010764936; -0.4755282581475769, 0.5031184295835893, -0.7216264418079997; -0.3090169943749474, 0.6724985119639573, 0.6724985119639574] Computed Translation Matrix (OpenCV) [0.9999999999999988; 2.1; -1.4] Computed Camera Matrix (OpenCV) [120, -1.4210854715202e-14, 400; 0, 90.00000000000001, 300; 0, 0, 1] ==============================Decomposing Using My Code============================== Estimated Camera Matrix [120, -5.776629175002766e-14, 399.9999999999999; -5.608036291626306e-15, 89.99999999999994, 299.9999999999999; -1.49192016182457e-17, -1.561251128379126e-16, 1] Estimated Camera Rotation [0.8236391035463322, 0.5427868801100542, -0.1643199010764936; -0.4755282581475767, 0.5031184295835892, -0.7216264418079998; -0.3090169943749475, 0.6724985119639573, 0.6724985119639575] Estimated Camera Translation [1; 2.099999999999999; -1.4] |
Decomposing Projection Using OpenCV and C++ Read More »
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
void projectFromCameraCoordinateToCameraPlane( Eigen::Matrix3Xd &pointsInCameraCoordinate, Eigen::Matrix3d &cameraIntrinsicMatrix,Eigen::Matrix2Xd &pointsInCameraPlane) { /* Intrinsic camera Matrix is something like this: focalLength ===> unit is mm mx= ==>unit is Pixel/mm U0= ===> unit is Pixel my=-(numberOfPixelInHeight)/heightOfSensor ; U0=(numberOfPixelInHeight)/2 ; mx=(numberOfPixelInWidth)/heightOfSensor; V0=(numberOfPixelInWidth)/2; Gamma=0 X,Y,Z,W are homogeneous position of point in camera coordinate ┌ ┐ ┌ ┐┌ ┐ v` |f*mx Gamma V0 0||X| u` =|0 f*my U0 0||Y| w` |0 0 1 0||Z| └ ┘ └ ┘└W┘ ▲y | ----►V | | ____________________|_____________________ | |__|__|__|__|__|__|__|__|__|__|__|__|__|__| U ▼ |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__|____________► x |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| |__|__|__|__|__|__|__|__|__|__|__|__|__|__| To project points from camera coordinate to camera plane: V`=X*f*mx + V0*Z U`=Y*f*my + U0*Z W`=Z U=U`/W` ,V=V`/W` V=f*mx*X/Z + V0 U=f*my*Y/Z + U0 (U,V) is the index of corresponding point in the image */ Eigen::Matrix3Xd pointsInCameraPlaneHomogeneous(pointsInCameraCoordinate.rows(),pointsInCameraCoordinate.cols()); pointsInCameraPlaneHomogeneous=cameraIntrinsicMatrix*pointsInCameraCoordinate; std::cout<<"pointsInCameraPlaneHomogeneous"<<std::endl; std::cout<<pointsInCameraPlaneHomogeneous<<std::endl; for(int i=0;i<pointsInCameraPlaneHomogeneous.cols();i++ ) { pointsInCameraPlaneHomogeneous(0,i)=pointsInCameraPlaneHomogeneous(0,i)/pointsInCameraPlaneHomogeneous(2,i); pointsInCameraPlaneHomogeneous(1,i)=pointsInCameraPlaneHomogeneous(1,i)/pointsInCameraPlaneHomogeneous(2,i); } pointsInCameraPlane=pointsInCameraPlaneHomogeneous.block(0,0,pointsInCameraPlaneHomogeneous.rows()-1,pointsInCameraPlaneHomogeneous.cols()); } void transformPoints(Eigen::Matrix3Xd &points, Eigen::Matrix3d &rotationMatrix, Eigen::Vector3d &translationVector,Eigen::Matrix3Xd &transformedPoints) { transformedPoints=(rotationMatrix*points).colwise() + translationVector ; } void projectFromCameraCoordinateToCameraPlane_Example() { /* OpenCV coordinate system ▲ / / Z/ / / ------------------------------► X | | | | Y | ▼ */ int numberOfPixelInHeight,numberOfPixelInWidth; double heightOfSensor, widthOfSensor; double focalLength=1.5; double mx, my, U0, V0; numberOfPixelInHeight=600; numberOfPixelInWidth=600; heightOfSensor=10; widthOfSensor=10; my=(numberOfPixelInHeight)/heightOfSensor ; U0=(numberOfPixelInHeight)/2 ; mx=(numberOfPixelInWidth)/widthOfSensor; V0=(numberOfPixelInWidth)/2; double L = 0.2; Eigen::Matrix3Xd controlPointsInWorldCoordinate(3,6); Eigen::Matrix3Xd controlPointsInCameraCoordinate(3,6); Eigen::Matrix3d cameraIntrinsicMatrix; cameraIntrinsicMatrix<<focalLength*mx, 0, V0, 0,focalLength*my,U0, 0,0,1; controlPointsInWorldCoordinate.col(0)= Eigen::Vector3d(-L, -L, 0); controlPointsInWorldCoordinate.col(1)= Eigen::Vector3d(2 * L, -L, 0.2); controlPointsInWorldCoordinate.col(2)= Eigen::Vector3d(L, L, 0.2); controlPointsInWorldCoordinate.col(3)= Eigen::Vector3d(-L, L, 0); controlPointsInWorldCoordinate.col(4)= Eigen::Vector3d(-2 * L, L, 0); controlPointsInWorldCoordinate.col(5)= Eigen::Vector3d(0, 0, 0.5); Eigen::Matrix3d rotationMatrix; rotationMatrix<< 1,0,0 ,0,1,0 ,0,0,1; Eigen::Vector3d translationVector(3,1); translationVector<<-0.1, 0.1, 1.2; transformPoints(controlPointsInWorldCoordinate, rotationMatrix, translationVector, controlPointsInCameraCoordinate ); Eigen::Matrix2Xd pointsInCameraPlane(2,controlPointsInCameraCoordinate.cols()); projectFromCameraCoordinateToCameraPlane(controlPointsInCameraCoordinate,cameraIntrinsicMatrix,pointsInCameraPlane); std::vector<cv::Point3d> cvPointsInCameraCoordinate; std::vector<cv::Point2d> cvpointsInCameraPlane; cvPointsInCameraCoordinate.push_back(cv::Point3d(-L, -L, 0)); cvPointsInCameraCoordinate.push_back(cv::Point3d(2 * L, -L, 0.2)); cvPointsInCameraCoordinate.push_back(cv::Point3d(L, L, 0.2)); cvPointsInCameraCoordinate.push_back(cv::Point3d(-L, L, 0)); cvPointsInCameraCoordinate.push_back(cv::Point3d(-2 * L, L, 0)); cvPointsInCameraCoordinate.push_back(cv::Point3d(0, 0, 0.5)); cv::Mat cameraMatrix= (cv::Mat_<double>(3,3) << focalLength*mx, 0, V0, 0,focalLength*my,U0, 0,0,1); cv::Mat rvec= cv::Mat::eye(3,3, CV_64F); cv::Mat tvec=(cv::Mat_<double>(3,1)<<-0.1, 0.1, 1.2); cv::projectPoints(cvPointsInCameraCoordinate,rvec,tvec,cameraMatrix,cv::noArray(),cvpointsInCameraPlane); std::cout<<"cameraIntrinsicMatrix" <<std::endl; std::cout<<cameraIntrinsicMatrix <<std::endl; std::cout<<cameraMatrix <<std::endl; Eigen::MatrixXd image(numberOfPixelInHeight,numberOfPixelInWidth); image=MatrixXd::Zero(numberOfPixelInHeight,numberOfPixelInWidth); int U,V; for(int i=0;i<pointsInCameraPlane.cols();i++) { U=int(pointsInCameraPlane(0,i)); V=int(pointsInCameraPlane(1,i)); std::cout<<U<<","<<V <<std::endl; image(U,V)=255; std::cout<<cvpointsInCameraPlane.at(i)<<std::endl; } Mat dst; eigen2cv(image, dst); std::string fileName=std::string("eigen_camera_projection_result_focal_")+std::to_string(focalLength)+ std::string("_.jpg"); cv::imwrite(fileName,dst); } |
Camera Projection Matrix with Eigen Read More »
Following my other post, you can extract the equation for epipolar lines. Basically choosing one point in one image and using fundamental matrix, we will get a line in the other image:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
/* FM_7POINT 7-point algorithm FM_8POINT 8-point algorithm FM_LMEDS least-median algorithm. 7-point algorithm is used. FM_RANSAC ANSAC algorithm. It needs at least 15 points. 7-point algorithm is used */ cv::Mat fundamentalMatrix= cv::findFundamentalMat(imagePointsLeftCamera, imagePointsRightCamera, cv::FM_8POINT); std::vector<cv::Vec3d> leftLines, rightLines; cv::computeCorrespondEpilines(imagePointsLeftCamera, 1, fundamentalMatrix, rightLines); cv::computeCorrespondEpilines(imagePointsRightCamera, 2, fundamentalMatrix, leftLines); cv::Mat leftImageRGB(leftImage.size(), CV_8UC3); cv::cvtColor(leftImage, leftImageRGB, CV_GRAY2RGB); cv::Mat rightImageRGB(rightImage.size(), CV_8UC3); cv::cvtColor(rightImage, rightImageRGB, CV_GRAY2RGB); cv::Mat imagePointLeftCameraMatrix=cv::Mat_<double>(3,1); for(std::size_t i=0;i<rightLines.size();i=i+1) { cv::Vec3d l=rightLines.at(i); double a=l.val[0]; double b=l.val[1]; double c=l.val[2]; std::cout<<"------------------------a,b,c Using OpenCV (ax+by+c=0)------------------------------"<<std::endl; std::cout<< a <<", "<<b <<", "<<c <<std::endl; std::cout<<"------------------------calculating a,b,c (ax+by+c=0) ------------------------------"<<std::endl; imagePointLeftCameraMatrix.at<double>(0,0)=imagePointsLeftCamera[i].x; imagePointLeftCameraMatrix.at<double>(1,0)=imagePointsLeftCamera[i].y; imagePointLeftCameraMatrix.at<double>(2,0)=1; cv::Mat rightLineMatrix=fundamentalMatrix*imagePointLeftCameraMatrix; std::cout<< rightLineMatrix.at<double>(0,0) <<", "<<rightLineMatrix.at<double>(0,1) <<", "<<rightLineMatrix.at<double>(0,2) <<std::endl; /////////////////////////////////drawing the line on the image///////////////////////////////// /*ax+by+c=0*/ double x0,y0,x1,y1; x0=0; y0=(-c-a*x0)/b; x1=rightImageRGB.cols; y1=(-c-a*x1)/b; std::cout<<"error: "<< a*imagePointsRightCamera.at(i).x+ b*imagePointsRightCamera.at(i).y +c<<std::endl; cv::line(rightImageRGB, cvPoint(x0,y0), cvPoint(x1,y1), cvScalar(0,255,0), 1); } cv::imwrite("leftImageEpipolarLine.jpg",leftImageRGB); |
n this tutorial, I created an ellipsoid in 3D space and create two cameras in right and left and captured the image:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
cv::Mat leftCameraRotation,rightCameraRotation; double rollLeft, pitchLeft, yawLeft,rollRight, pitchRight, yawRight ,txLeft,tyLeft,tzLeft,txRight,tyRight,tzRight; cv::Vec3d thetaLeft,thetaRight; rollLeft=0 ; pitchLeft=+M_PI/10; yawLeft=0; thetaLeft[0]=rollLeft; thetaLeft[1]=pitchLeft; thetaLeft[2]=yawLeft; rollRight=0; pitchRight= -M_PI/10; yawRight= 0; thetaRight[0]=rollRight; thetaRight[1]=pitchRight; thetaRight[2]=yawRight; txLeft=-1; tyLeft=0.0; tzLeft=-4.0; txRight=1.0; tyRight=0.0; tzRight=-4.0; leftCameraRotation =eulerAnglesToRotationMatrix(thetaLeft); rightCameraRotation =eulerAnglesToRotationMatrix(thetaRight); cv::Mat leftCameraTranslation = (cv::Mat_<double>(3,1) <<txLeft,tyLeft,tzLeft); cv::Mat rightCameraTranslation = (cv::Mat_<double>(3,1) <<txRight,tyRight,tzRight); std::vector<cv::Point3d> objectPointsInWorldCoordinate; double X,Y,Z,radius; ////////////////////////////////////////creating ellipsoid in the world coordinate/////////////////////// double phiStepSize,thetaStepSize; phiStepSize=0.7; thetaStepSize=0.6; double a,b,c; a=2; b=3; c=1.6; for(double phi=-M_PI;phi<M_PI;phi=phi+phiStepSize) { for(double theta=-M_PI/2;theta<M_PI/2;theta=theta+thetaStepSize) { X=a*cos(theta)*cos(phi); Y=b*cos(theta)*sin(phi); Z=c*sin(theta); objectPointsInWorldCoordinate.push_back(cv::Point3d(X, Y, Z)); } } int numberOfPixelInHeight,numberOfPixelInWidth; double heightOfSensor, widthOfSensor; double focalLength=2.0; double mx, my, U0, V0; numberOfPixelInHeight=600; numberOfPixelInWidth=600; heightOfSensor=10; widthOfSensor=10; my=(numberOfPixelInHeight)/heightOfSensor ; U0=(numberOfPixelInHeight)/2 ; mx=(numberOfPixelInWidth)/widthOfSensor; V0=(numberOfPixelInWidth)/2; cv::Mat K = (cv::Mat_<double>(3,3) << focalLength*mx, 0, V0, 0,focalLength*my,U0, 0,0,1); std::vector<cv::Point2d> imagePointsLeftCamera,imagePointsRightCamera; cv::projectPoints(objectPointsInWorldCoordinate, leftCameraRotation, leftCameraTranslation, K, cv::noArray(), imagePointsLeftCamera); cv::projectPoints(objectPointsInWorldCoordinate, rightCameraRotation, rightCameraTranslation, K, cv::noArray(), imagePointsRightCamera); ////////////////////////////////////////////////storing images from right and left camera////////////////////////////////////////////// std::string fileName; cv::Mat rightImage,leftImage; int U,V; leftImage=cv::Mat::zeros(numberOfPixelInHeight,numberOfPixelInWidth,CV_8UC1); for(std::size_t i=0;i<imagePointsLeftCamera.size();i++) { V=int(imagePointsLeftCamera.at(i).x); U=int(imagePointsLeftCamera.at(i).y); leftImage.at<char>(U,V)=255; } fileName=std::string("imagePointsLeftCamera")+std::to_string(focalLength)+ std::string("_.jpg"); cv::imwrite(fileName,leftImage); rightImage=cv::Mat::zeros(numberOfPixelInHeight,numberOfPixelInWidth,CV_8UC1); for(std::size_t i=0;i<imagePointsRightCamera.size();i++) { V=int(imagePointsRightCamera.at(i).x); U=int(imagePointsRightCamera.at(i).y); rightImage.at<char>(U,V)=255; } fileName=std::string("imagePointsRightCamera")+std::to_string(focalLength)+ std::string("_.jpg"); cv::imwrite(fileName,rightImage); |
Creating a Simulated Stereo Vision Cameras With OpenCV and C++ Read More »
camera_info_publisher.cpp:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
#include <camera_info_manager/camera_info_manager.h> int main(int argc, char** argv) { ros::init(argc, argv, "camera_info_publisher"); ros::NodeHandle nh; const std::string cname="prosilica"; const std::string url="file://${ROS_HOME}/camera_info/prosilica.yaml"; ros::Publisher pubCameraInfo = nh.advertise<sensor_msgs::CameraInfo>("camera/camera_info", 1); camera_info_manager::CameraInfoManager cinfo(nh,cname,url); sensor_msgs::CameraInfo msgCameraInfo; ros::Rate loop_rate(5); while (nh.ok()) { msgCameraInfo=cinfo.getCameraInfo(); pubCameraInfo.publish(msgCameraInfo); ros::spinOnce(); loop_rate.sleep(); } } |
image_geometry_demo.cpp:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
#include <ros/ros.h> #include <image_geometry/pinhole_camera_model.h> void cameraInfoCallback(const sensor_msgs::CameraInfoConstPtr& info_msg) { std::cout<<"===============================cameraInfoCallback===============================" <<std::endl; image_geometry::PinholeCameraModel cam_model_; cam_model_.fromCameraInfo(info_msg); // projection_matrix is the matrix you should use if you don't want to use project3dToPixel() and want to use opencv API cv::Matx34d projection_matrix=cam_model_.fullProjectionMatrix(); std::cout<<cam_model_.project3dToPixel(cv::Point3d(-0.1392072,-0.02571392, 2.50376511) )<<std::endl; } int main(int argc,char ** argv) { ros::init(argc,argv,"image_geometry_demo",1); ros::NodeHandle nh; ros::Subscriber sub = nh.subscribe("camera/camera_info", 1000, cameraInfoCallback); ros::spin(); } |
CMakeLists.txt
1 |
find_package(catkin REQUIRED COMPONENTS image_geometry camera_info_manager roscpp) |
How to use image_geometry and camera_info_manager in ROS Read More »