轉(zhuǎn)載請注明出處:小鋒學長生活大爆炸[xfxuezhang.cn]文章來源:http://www.zghlxwxcb.cn/news/detail-728173.html
????????此代碼可以替代內(nèi)置的images.findImage函數(shù)使用,但可能會誤匹配,如果是對匹配結(jié)果要求比較高的,還是得謹慎使用。文章來源地址http://www.zghlxwxcb.cn/news/detail-728173.html
runtime.images.initOpenCvIfNeeded();
importClass(java.util.ArrayList);
importClass(java.util.List);
importClass(java.util.LinkedList);
importClass(org.opencv.imgproc.Imgproc);
importClass(org.opencv.imgcodecs.Imgcodecs);
importClass(org.opencv.core.Core);
importClass(org.opencv.core.Mat);
importClass(org.opencv.core.MatOfDMatch);
importClass(org.opencv.core.MatOfKeyPoint);
importClass(org.opencv.core.MatOfRect);
importClass(org.opencv.core.Size);
importClass(org.opencv.features2d.DescriptorMatcher);
importClass(org.opencv.features2d.Features2d);
importClass(org.opencv.features2d.SIFT);
importClass(org.opencv.features2d.ORB);
importClass(org.opencv.features2d.BRISK);
importClass(org.opencv.features2d.AKAZE);
importClass(org.opencv.features2d.BFMatcher);
importClass(org.opencv.core.MatOfPoint2f);
importClass(org.opencv.calib3d.Calib3d);
importClass(org.opencv.core.CvType);
importClass(org.opencv.core.Point);
importClass(org.opencv.core.Scalar);
importClass(org.opencv.core.MatOfByte);
/*
* 用法示例:
* var image1 = captureScreen();
* var image2 = images.read('xxxx');
* match(image1, image2);
*/
function match(img1, img2, method) {
console.time("匹配耗時");
// 指定特征點算法SIFT
var match_alg = null;
if(method == 'sift') {
match_alg = SIFT.create();
}else if(method == 'orb') {
match_alg = ORB.create();
}else if(method == 'brisk') {
match_alg = BRISK.create();
}else {
match_alg = AKAZE.create();
}
var bigTrainImage = Imgcodecs.imdecode(new MatOfByte(images.toBytes(img1)), Imgcodecs.IMREAD_UNCHANGED);
var smallTrainImage = Imgcodecs.imdecode(new MatOfByte(images.toBytes(img2)), Imgcodecs.IMREAD_UNCHANGED);
// 轉(zhuǎn)灰度圖
// console.log("轉(zhuǎn)灰度圖");
var big_trainImage_gray = new Mat(bigTrainImage.rows(), bigTrainImage.cols(), CvType.CV_8UC1);
var small_trainImage_gray = new Mat(smallTrainImage.rows(), smallTrainImage.cols(), CvType.CV_8UC1);
Imgproc.cvtColor(bigTrainImage, big_trainImage_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.cvtColor(smallTrainImage, small_trainImage_gray, Imgproc.COLOR_BGR2GRAY);
// 獲取圖片的特征點
// console.log("detect");
var big_keyPoints = new MatOfKeyPoint();
var small_keyPoints = new MatOfKeyPoint();
match_alg.detect(bigTrainImage, big_keyPoints);
match_alg.detect(smallTrainImage, small_keyPoints);
// 提取圖片的特征點
// console.log("compute");
var big_trainDescription = new Mat(big_keyPoints.rows(), 128, CvType.CV_32FC1);
var small_trainDescription = new Mat(small_keyPoints.rows(), 128, CvType.CV_32FC1);
match_alg.compute(big_trainImage_gray, big_keyPoints, big_trainDescription);
match_alg.compute(small_trainImage_gray, small_keyPoints, small_trainDescription);
// console.log("matcher.train");
var matcher = new BFMatcher();
matcher.clear();
var train_desc_collection = new ArrayList();
train_desc_collection.add(big_trainDescription);
// vector<Mat>train_desc_collection(1, trainDescription);
matcher.add(train_desc_collection);
matcher.train();
// console.log("knnMatch");
var matches = new ArrayList();
matcher.knnMatch(small_trainDescription, matches, 2);
//對匹配結(jié)果進行篩選,依據(jù)distance進行篩選
// console.log("對匹配結(jié)果進行篩選");
var goodMatches = new ArrayList();
var nndrRatio = 0.8;
var len = matches.size();
for (var i = 0; i < len; i++) {
var matchObj = matches.get(i);
var dmatcharray = matchObj.toArray();
var m1 = dmatcharray[0];
var m2 = dmatcharray[1];
if (m1.distance <= m2.distance * nndrRatio) {
goodMatches.add(m1);
}
}
var matchesPointCount = goodMatches.size();
//當匹配后的特征點大于等于 4 個,則認為模板圖在原圖中,該值可以自行調(diào)整
if (matchesPointCount >= 4) {
log("模板圖在原圖匹配成功!");
var templateKeyPoints = small_keyPoints;
var originalKeyPoints = big_keyPoints;
var templateKeyPointList = templateKeyPoints.toList();
var originalKeyPointList = originalKeyPoints.toList();
var objectPoints = new LinkedList();
var scenePoints = new LinkedList();
var goodMatchesList = goodMatches;
var len = goodMatches.size();
for (var i = 0; i < len; i++) {
var goodMatch = goodMatches.get(i);
objectPoints.addLast(templateKeyPointList.get(goodMatch.queryIdx).pt);
scenePoints.addLast(originalKeyPointList.get(goodMatch.trainIdx).pt);
}
var objMatOfPoint2f = new MatOfPoint2f();
objMatOfPoint2f.fromList(objectPoints);
var scnMatOfPoint2f = new MatOfPoint2f();
scnMatOfPoint2f.fromList(scenePoints);
//使用 findHomography 尋找匹配上的關(guān)鍵點的變換
var homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);
/**
* 透視變換(Perspective Transformation)是將圖片投影到一個新的視平面(Viewing Plane),也稱作投影映射(Projective Mapping)。
*/
var templateCorners = new Mat(4, 1, CvType.CV_32FC2);
var templateTransformResult = new Mat(4, 1, CvType.CV_32FC2);
var templateImage = smallTrainImage;
var doubleArr = util.java.array("double", 2);
doubleArr[0] = 0;
doubleArr[1] = 0;
templateCorners.put(0, 0, doubleArr);
doubleArr[0] = templateImage.cols();
doubleArr[1] = 0;
templateCorners.put(1, 0, doubleArr);
doubleArr[0] = templateImage.cols();
doubleArr[1] = templateImage.rows();
templateCorners.put(2, 0, doubleArr);
doubleArr[0] = 0;
doubleArr[1] = templateImage.rows();
templateCorners.put(3, 0, doubleArr);
//使用 perspectiveTransform 將模板圖進行透視變以矯正圖象得到標準圖片
Core.perspectiveTransform(templateCorners, templateTransformResult, homography);
//矩形四個頂點
var pointA = templateTransformResult.get(0, 0);
var pointB = templateTransformResult.get(1, 0);
var pointC = templateTransformResult.get(2, 0);
var pointD = templateTransformResult.get(3, 0);
var y0 = Math.round(pointA[1])>0?Math.round(pointA[1]):0;
var y1 = Math.round(pointC[1])>0?Math.round(pointC[1]):0;
var x0 = Math.round(pointD[0])>0?Math.round(pointD[0]):0;
var x1 = Math.round(pointB[0])>0?Math.round(pointB[0]):0;
console.timeEnd("匹配耗時");
return {x: x0, y: y0};
} else {
console.timeEnd("匹配耗時");
log("模板圖不在原圖中!");
return null;
}
}
到了這里,關(guān)于【教程】Autojs使用OpenCV進行SIFT/BRISK等算法進行圖像匹配的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!