日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 人文社科 > 生活经验 >内容正文

生活经验

(opencv)ORB匹配算法

發布時間:2023/11/27 生活经验 31 豆豆
生活随笔 收集整理的這篇文章主要介紹了 (opencv)ORB匹配算法 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
#include<opencv2/opencv.hpp>
#include<iostream>
using namespace std;
using namespace cv;
#include<vector>Mat img1 = imread("C:\\Users\\Administrator\\Desktop\\水面\\figa1.png");//讀入圖像
Mat img1_gray;
Mat img2 = imread("C:\\Users\\Administrator\\Desktop\\水面\\figa2.png");//讀入圖像
Mat img2_gray;
int thresh = 300;
RNG rng(12345);void PointsToKeyPoints(vector<Point2f>corner1, vector<KeyPoint>& keypoints_1)
{for (size_t i = 0; i < corner1.size(); i++) {keypoints_1.push_back(KeyPoint(corner1[i], 1.f));}
}//回調函數
void on_harris(int, void*)
{vector<Point2f>corner1;goodFeaturesToTrack(img1_gray, corner1, thresh, 0.01, 0.1, Mat(), 3, false, 0.04);vector<Point2f>corner2;goodFeaturesToTrack(img2_gray, corner2, thresh, 0.01, 0.1, Mat(), 3, false, 0.04);//cout << "檢測到的角點數量為:" << corner1.size() << endl;//以隨機顏色繪制出角點for (int i = 0; i < corner1.size(); i++){circle(img1, corner1[i], 4, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), 2, 8, 0);}imshow("【原始圖像1】", img1);//以隨機顏色繪制出角點for (int i = 0; i < corner2.size(); i++){circle(img2, corner2[i], 4, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), 2, 8, 0);}imshow("【原始圖像2】", img2);//  調用cornerSubPix函數計算出亞像素角點的位置TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 40, 0.001);cornerSubPix(img1_gray, corner1, Size(5, 5), Size(-1, -1), criteria);cornerSubPix(img2_gray, corner2, Size(5, 5), Size(-1, -1), criteria);//輸出角點信息cout << corner1.size() << endl;cout << corner2.size() << endl;vector<KeyPoint> keypoints_1, keypoints_2;//關鍵點Mat descriptors_1, descriptors_2;//描述子Ptr<FeatureDetector> detector = ORB::create();Ptr<DescriptorExtractor> descriptor = ORB::create();Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");//暴力匹配PointsToKeyPoints(corner1, keypoints_1);PointsToKeyPoints(corner2, keypoints_2);//--第二步根據角點位置計算BRIEF描述子descriptor->compute(img1, keypoints_1, descriptors_1);descriptor->compute(img2, keypoints_2, descriptors_2);//--第三步:對兩幅圖像中的BRIEF描述子進行匹配,使用Hamming距離vector<DMatch> matches;matcher->match(descriptors_1, descriptors_2, matches);//--第四步:匹配點對篩選auto min_max = minmax_element(matches.begin(), matches.end(),[](const DMatch& m1, const DMatch& m2) {return m1.distance < m2.distance; });double min_dist = min_max.first->distance;double max_dist = min_max.second->distance;printf("-- Mat dist : %f \n", max_dist);printf("-- Mat dist : %f \n", min_dist);//當描述子之間的距離大于兩倍的最小距離時,即認為匹配有誤。但是有時最小距離會非常小,所以要設置一個經驗值30作為下限vector<DMatch> good_matches;for (int i = 0; i < descriptors_1.rows; i++) {if (matches[i].distance <= max(2 * min_dist, 30.0)) {good_matches.push_back(matches[i]);}}//--第五步:繪制匹配結果Mat img_match;Mat img_goodmatch;drawMatches(img1, keypoints_1, img2, keypoints_2, matches, img_match);drawMatches(img1, keypoints_1, img2, keypoints_2, good_matches, img_goodmatch);namedWindow("all matches", WINDOW_FREERATIO);namedWindow("good matches", WINDOW_FREERATIO);imshow("all matches", img_match);imshow("good matches", img_goodmatch);std::vector<Point2f> obj;std::vector<Point2f> scene;for (size_t i = 0; i < good_matches.size(); i++){//-- Get the keypoints from the good matchesobj.push_back(keypoints_1[good_matches[i].queryIdx].pt);scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);}Mat H = findHomography(obj, scene, RANSAC);//-- Get the corners from the image_1 ( the object to be "detected" )std::vector<Point2f> obj_corners(4);obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img1.cols, 0);obj_corners[2] = cvPoint(img1.cols, img1.rows); obj_corners[3] = cvPoint(0, img1.rows);std::vector<Point2f> scene_corners(4);perspectiveTransform(obj_corners, scene_corners, H);//-- Draw lines between the corners (the mapped object in the scene - image_2 )line(img_goodmatch, scene_corners[0] + Point2f(img1.cols, 0), scene_corners[1] + Point2f(img1.cols, 0), Scalar(0, 255, 0), 8);line(img_goodmatch, scene_corners[1] + Point2f(img1.cols, 0), scene_corners[2] + Point2f(img1.cols, 0), Scalar(0, 255, 0), 8);line(img_goodmatch, scene_corners[2] + Point2f(img1.cols, 0), scene_corners[3] + Point2f(img1.cols, 0), Scalar(0, 255, 0), 8);line(img_goodmatch, scene_corners[3] + Point2f(img1.cols, 0), scene_corners[0] + Point2f(img1.cols, 0), Scalar(0, 255, 0), 8);//-- Show detected matchesnamedWindow("Good Matches & Object detection",WINDOW_FREERATIO),imshow("Good Matches & Object detection", img_goodmatch);}int main()
{/*img1 = img1(Rect(1000, 0, 400, 400));img2 = img2(Rect(1000, 0, 400, 400));*/img1_gray = img1.clone();cvtColor(img1_gray, img1_gray, COLOR_RGB2GRAY);namedWindow("【原始圖像1】", WINDOW_FREERATIO);img2_gray = img2.clone();cvtColor(img2_gray, img2_gray, COLOR_RGB2GRAY);namedWindow("【原始圖像2】", WINDOW_FREERATIO);createTrackbar("【最大角點數】", "【原始圖像1】", &thresh, 500, on_harris);on_harris(0, 0);waitKey(0);return 0;
}

總結

以上是生活随笔為你收集整理的(opencv)ORB匹配算法的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。