opencv 基于ORB特征点图像拼接
生活随笔
收集整理的這篇文章主要介紹了
opencv 基于ORB特征点图像拼接
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
原圖
?
#include <iostream> #include <opencv2/opencv.hpp> #include <opencv.hpp>using namespace cv; using namespace std;void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);typedef struct {Point2f left_top;Point2f left_bottom;Point2f right_top;Point2f right_bottom; }four_corners_t;four_corners_t corners;void CalcCorners(const Mat& H, const Mat& src) {double v2[] = { 0, 0, 1 };//左上角double v1[3];//變換后的坐標值Mat V2 = Mat(3, 1, CV_64FC1, v2); //列向量Mat V1 = Mat(3, 1, CV_64FC1, v1); //列向量V1 = H * V2;//左上角(0,0,1)cout << "V2: " << V2 << endl;cout << "V1: " << V1 << endl;corners.left_top.x = v1[0] / v1[2];corners.left_top.y = v1[1] / v1[2];//左下角(0,src.rows,1)v2[0] = 0;v2[1] = src.rows;v2[2] = 1;V2 = Mat(3, 1, CV_64FC1, v2); //列向量V1 = Mat(3, 1, CV_64FC1, v1); //列向量V1 = H * V2;corners.left_bottom.x = v1[0] / v1[2];corners.left_bottom.y = v1[1] / v1[2];//右上角(src.cols,0,1)v2[0] = src.cols;v2[1] = 0;v2[2] = 1;V2 = Mat(3, 1, CV_64FC1, v2); //列向量V1 = Mat(3, 1, CV_64FC1, v1); //列向量V1 = H * V2;corners.right_top.x = v1[0] / v1[2];corners.right_top.y = v1[1] / v1[2];//右下角(src.cols,src.rows,1)v2[0] = src.cols;v2[1] = src.rows;v2[2] = 1;V2 = Mat(3, 1, CV_64FC1, v2); //列向量V1 = Mat(3, 1, CV_64FC1, v1); //列向量V1 = H * V2;corners.right_bottom.x = v1[0] / v1[2];corners.right_bottom.y = v1[1] / v1[2];}int main(int argc, char* argv[]) {//Mat imageRight = imread("images/imgR.jpg", 1); //右圖//Mat imageLeft = imread("images/imgL.jpg", 1); //左圖//Mat imageRight = imread("images/SR.jpg", 1); //右圖//Mat imageLeft = imread("images/SL.jpg", 1); //左圖Mat imageRight = imread("images/flowerR.jpg", 1); //右圖Mat imageLeft = imread("images/flowerL.jpg", 1); //左圖//灰度圖轉換 Mat image_r, image_l;cvtColor(imageRight, image_r, COLOR_BGR2GRAY);cvtColor(imageLeft, image_l, COLOR_BGR2GRAY);//直接從可能重復的區域提取特征點匹配 當前是左右圖在拼接處大概有1/3是重復的Rect rect_right = Rect(0, 0, imageRight.cols / 3, imageRight.rows);Rect rect_left = Rect(2*imageLeft.cols/3, 0, (imageLeft.cols/3) -1 , imageLeft.rows);Mat image_r_rect = imageRight(Rect(rect_right));Mat image_l_rect = imageLeft(Rect(rect_left));double start = getTickCount();//提取特征點 Ptr<FeatureDetector> ORBDetector = ORB::create(10000);vector<KeyPoint> keyPoints_r, keyPoints_l;ORBDetector->detect(image_r_rect, keyPoints_r);ORBDetector->detect(image_l_rect, keyPoints_l);//特征點描述,為下邊的特征點匹配做準備 Ptr<DescriptorExtractor> ORBDescriptor = ORB::create(10000);Mat imageDesc_r, imageDesc_l;ORBDescriptor->compute(image_r_rect, keyPoints_r, imageDesc_r);ORBDescriptor->compute(image_l_rect, keyPoints_l, imageDesc_l);flann::Index flannIndex(imageDesc_r, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);vector<DMatch> GoodMatchePoints;Mat macthIndex(imageDesc_l.rows, 2, CV_32SC1), matchDistance(imageDesc_l.rows, 2, CV_32FC1);flannIndex.knnSearch(imageDesc_l, macthIndex, matchDistance, 2, flann::SearchParams());// Lowe's algorithm,獲取優秀匹配點for (int i = 0; i < matchDistance.rows; i++){if (matchDistance.at<float>(i, 0) < 0.4 * matchDistance.at<float>(i, 1)){DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));GoodMatchePoints.push_back(dmatches);}}Mat first_match;//drawMatches(imageLeft, keyPoints_l, imageRight, keyPoints_r, GoodMatchePoints, first_match);drawMatches(image_l_rect, keyPoints_l, image_r_rect, keyPoints_r, GoodMatchePoints, first_match);//namedWindow("first_match ", 2);//imshow("first_match ", first_match);//waitKey();vector<Point2f> imagePoints1, imagePoints2;for (int i = 0; i < GoodMatchePoints.size(); i++){imagePoints2.push_back(keyPoints_l[GoodMatchePoints[i].queryIdx].pt);imagePoints1.push_back(keyPoints_r[GoodMatchePoints[i].trainIdx].pt);}if (imagePoints1.size() <= 10 || imagePoints2.size() <= 10){printf("There is little keypoints\n");getchar();}//將左圖的坐標轉化到原圖的位置,否則其變換矩陣在x方向的平穩不對for (auto iter = imagePoints2.begin(); iter != imagePoints2.end(); iter++){(*iter).x += 2 * imageLeft.cols / 3;}//獲取圖像1到圖像2的投影映射矩陣 尺寸為3*3 Mat homo = findHomography(imagePoints1, imagePoints2, RANSAC);// 也可以使用getPerspectiveTransform方法獲得透視變換矩陣,不過要求只能有4個點,效果稍差//Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2); cout << "變換矩陣為:\n" << homo << endl << endl; //輸出映射矩陣 //計算配準圖的四個頂點坐標CalcCorners(homo, imageRight);//cout << "left_top:" << corners.left_top << endl;//cout << "left_bottom:" << corners.left_bottom << endl;//cout << "right_top:" << corners.right_top << endl;//cout << "right_bottom:" << corners.right_bottom << endl;//圖像配準 Mat imageTransform1, imageTransform2;warpPerspective(imageRight, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), imageLeft.rows));rectangle(imageRight, Rect(imageRight.cols - MAX(corners.right_top.x, corners.right_bottom.x), 0, MAX(corners.right_top.x, corners.right_bottom.x), 500), (0, 0, 255));imshow("rectangle", imageRight);//imshow("透視矩陣變換Right", imageTransform1);//imwrite("trans1.jpg", imageTransform1);//waitKey();//創建拼接后的圖,需提前計算圖的大小int move_x = 900;int dst_width = imageTransform1.cols; //取最右點的長度為拼接圖的長度int dst_height = imageLeft.rows;Mat dst(dst_height, dst_width, CV_8UC3);dst.setTo(0);imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));imshow("transform1", dst);imageLeft.copyTo(dst(Rect(0, 0, imageLeft.cols, imageLeft.rows)));imshow("b_dst", dst);waitKey();OptimizeSeam(imageLeft, imageTransform1, dst);double end = getTickCount();double useTime = (end - start) / getTickFrequency();cout << "use-time : " << useTime << "s" << endl;imshow("dstOptimize", dst);//imwrite("dst.jpg", dst);waitKey();return 0; }//優化兩圖的連接處,使得拼接自然 void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst) {int start = MIN(corners.left_top.x, corners.left_bottom.x);//開始位置,即重疊區域的左邊界 double processWidth = img1.cols - start;//重疊區域的寬度 int rows = dst.rows;int cols = img1.cols; //注意,是列數*通道數double alpha = 1;//img1中像素的權重 for (int i = 0; i < rows; i++){uchar* p = img1.ptr<uchar>(i); //獲取第i行的首地址uchar* t = trans.ptr<uchar>(i);uchar* d = dst.ptr<uchar>(i);for (int j = start; j < cols; j++){//如果遇到圖像trans中無像素的黑點,則完全拷貝img1中的數據if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0){alpha = 1;}else{//img1中像素的權重,與當前處理點距重疊區域左邊界的距離成正比,實驗證明,這種方法確實好 alpha = (processWidth - (j - start)) / processWidth;}d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);}}}特征點匹配
?拼接結果?
總結
以上是生活随笔為你收集整理的opencv 基于ORB特征点图像拼接的全部內容,希望文章能夠幫你解決所遇到的問題。