Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
vpKeyPoint.h
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Key point functionalities.
32 */
33#ifndef _vpKeyPoint_h_
34#define _vpKeyPoint_h_
35
36#include <algorithm> // std::transform
37#include <float.h> // DBL_MAX
38#include <fstream> // std::ofstream
39#include <limits>
40#include <map> // std::map
41#include <numeric> // std::accumulate
42#include <stdlib.h> // srand, rand
43#include <time.h> // time
44#include <vector> // std::vector
45
46#include <visp3/core/vpConfig.h>
47#include <visp3/core/vpDisplay.h>
48#include <visp3/core/vpImageConvert.h>
49#include <visp3/core/vpPixelMeterConversion.h>
50#include <visp3/core/vpPlane.h>
51#include <visp3/core/vpPoint.h>
52#include <visp3/vision/vpBasicKeyPoint.h>
53#include <visp3/vision/vpPose.h>
54#ifdef VISP_HAVE_MODULE_IO
55#include <visp3/io/vpImageIo.h>
56#endif
57#include <visp3/core/vpConvert.h>
58#include <visp3/core/vpCylinder.h>
59#include <visp3/core/vpMeterPixelConversion.h>
60#include <visp3/core/vpPolygon.h>
61#include <visp3/vision/vpXmlConfigParserKeyPoint.h>
62
63// Require at least OpenCV >= 2.1.1
64#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
65#include <opencv2/features2d/features2d.hpp>
66#include <opencv2/imgproc/imgproc.hpp>
67#include <opencv2/imgproc/imgproc_c.h>
68
69#if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
70#include <opencv2/xfeatures2d.hpp>
71#elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
72 (VISP_HAVE_OPENCV_VERSION < 0x030000)
73#include <opencv2/nonfree/nonfree.hpp>
74#endif
75
211class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
212{
213
214public:
217 constantFactorDistanceThreshold,
219 stdDistanceThreshold,
221 ratioDistanceThreshold,
224 stdAndRatioDistanceThreshold,
226 noFilterMatching
227 };
228
231 detectionThreshold,
233 detectionScore
236 };
237
239 typedef enum {
243 pgmImageFormat
244 } vpImageFormatType;
245
248#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
255#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
256 DETECTOR_STAR,
257#endif
258#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
259 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
261#endif
262#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
264#endif
265#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
269#endif
270#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
271 DETECTOR_MSD,
272#endif
273#endif
274 DETECTOR_TYPE_SIZE
275 };
276
279#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
282#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
283 DESCRIPTOR_FREAK,
284 DESCRIPTOR_BRIEF,
285#endif
286#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
287 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
289#endif
290#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
292#endif
293#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
296#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
297 DESCRIPTOR_DAISY,
298 DESCRIPTOR_LATCH,
299#endif
300#endif
301#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
302 DESCRIPTOR_VGG,
303 DESCRIPTOR_BoostDesc,
304#endif
305#endif
306 DESCRIPTOR_TYPE_SIZE
307 };
308
309 vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
310 const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
311 vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
312 const std::string &matcherName = "BruteForce-Hamming",
313 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
314 vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
315 const std::string &matcherName = "BruteForce",
316 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
317
318 unsigned int buildReference(const vpImage<unsigned char> &I);
319 unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
320 unsigned int width);
321 unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
322
323 unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
324 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
325 unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
326 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
327 bool append = false, int class_id = -1);
328
329 unsigned int buildReference(const vpImage<vpRGBa> &I_color);
330 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
331 unsigned int width);
332 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
333
334 unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
335 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
336 unsigned int buildReference(const vpImage<vpRGBa> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
337 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
338 bool append = false, int class_id = -1);
339
340 static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
341 const vpHomogeneousMatrix &cMo, cv::Point3f &point);
342
343 static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
344 const vpHomogeneousMatrix &cMo, vpPoint &point);
345
346 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
347 std::vector<cv::KeyPoint> &candidates,
348 const std::vector<vpPolygon> &polygons,
349 const std::vector<std::vector<vpPoint> > &roisPt,
350 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
351
352 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
353 std::vector<vpImagePoint> &candidates,
354 const std::vector<vpPolygon> &polygons,
355 const std::vector<std::vector<vpPoint> > &roisPt,
356 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
357
358 static void
359 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
360 std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
361 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
362 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
363
364 static void
365 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
366 std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
367 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
368 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
369
370 bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
371 const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
372 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
373
374 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
375 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
376
377 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
378 std::vector<unsigned int> &inlierIndex, double &elapsedTime,
379 bool (*func)(const vpHomogeneousMatrix &) = NULL);
380
381 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
382 vpImage<unsigned char> &IMatching);
383 void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
384
385 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
386 void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
387
388 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
389 const vpRect &rectangle = vpRect());
390 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle = vpRect());
391 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
392 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
393 const vpRect &rectangle = vpRect());
394 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
395 const vpRect &rectangle = vpRect());
396 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
397 const cv::Mat &mask = cv::Mat());
398
399 void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
400 std::vector<cv::Mat> &listOfDescriptors,
401 std::vector<vpImage<unsigned char> > *listOfAffineI = NULL);
402
403 void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
404 void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
405 void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
406 void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
407
408 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
409 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
410 void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
411 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
412 unsigned int crossSize = 3, unsigned int lineThickness = 1);
413 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
414 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
415 void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
416 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
417 void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
418 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
419 unsigned int crossSize = 3, unsigned int lineThickness = 1);
420
421 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
422 std::vector<cv::Point3f> *trainPoints = NULL);
423 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
424 std::vector<cv::Point3f> *trainPoints = NULL);
425 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
426 std::vector<cv::Point3f> *trainPoints = NULL);
427 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
428 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
429 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
430 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
431 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
432 std::vector<cv::Point3f> *trainPoints = NULL);
433
444 {
445 if (!m_computeCovariance) {
446 std::cout << "Warning : The covariance matrix has not been computed. "
447 "See setCovarianceComputation() to do it."
448 << std::endl;
449 return vpMatrix();
450 }
451
452 if (m_computeCovariance && !m_useRansacVVS) {
453 std::cout << "Warning : The covariance matrix can only be computed "
454 "with a Virtual Visual Servoing approach."
455 << std::endl
456 << "Use setUseRansacVVS(true) to choose to use a pose "
457 "estimation method based on a Virtual Visual Servoing "
458 "approach."
459 << std::endl;
460 return vpMatrix();
461 }
462
463 return m_covarianceMatrix;
464 }
465
471 inline double getDetectionTime() const { return m_detectionTime; }
472
480 inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
481 {
482 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
483 if (it_name == m_mapOfDetectorNames.end()) {
484 std::cerr << "Internal problem with the feature type and the "
485 "corresponding name!"
486 << std::endl;
487 }
488
489 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
490 m_detectors.find(it_name->second);
491 if (findDetector != m_detectors.end()) {
492 return findDetector->second;
493 }
494
495 std::cerr << "Cannot find: " << it_name->second << std::endl;
496 return cv::Ptr<cv::FeatureDetector>();
497 }
498
506 inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
507 {
508 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
509 if (findDetector != m_detectors.end()) {
510 return findDetector->second;
511 }
512
513 std::cerr << "Cannot find: " << name << std::endl;
514 return cv::Ptr<cv::FeatureDetector>();
515 }
516
520 inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
521
527 inline double getExtractionTime() const { return m_extractionTime; }
528
536 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
537 {
538 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
539 if (it_name == m_mapOfDescriptorNames.end()) {
540 std::cerr << "Internal problem with the feature type and the "
541 "corresponding name!"
542 << std::endl;
543 }
544
545 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
546 m_extractors.find(it_name->second);
547 if (findExtractor != m_extractors.end()) {
548 return findExtractor->second;
549 }
550
551 std::cerr << "Cannot find: " << it_name->second << std::endl;
552 return cv::Ptr<cv::DescriptorExtractor>();
553 }
554
562 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
563 {
564 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
565 if (findExtractor != m_extractors.end()) {
566 return findExtractor->second;
567 }
568
569 std::cerr << "Cannot find: " << name << std::endl;
570 return cv::Ptr<cv::DescriptorExtractor>();
571 }
572
576 inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
577
583 inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
584
590 inline double getMatchingTime() const { return m_matchingTime; }
591
597 inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
598
605 inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
606
614 inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
615 {
616 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
617 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
618 matchQueryToTrainKeyPoints.push_back(
619 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
620 m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
621 }
622 return matchQueryToTrainKeyPoints;
623 }
624
630 inline unsigned int getNbImages() const { return static_cast<unsigned int>(m_mapOfImages.size()); }
631
632 void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
633 void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
634
640 inline double getPoseTime() const { return m_poseTime; }
641
648 inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
649
650 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
651 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
652
658 inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
659
665 inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
666
673 inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
674
675 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
676 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
677
678 void getTrainPoints(std::vector<cv::Point3f> &points) const;
679 void getTrainPoints(std::vector<vpPoint> &points) const;
680
681 void initMatcher(const std::string &matcherName);
682
683 void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
684 vpImage<unsigned char> &IMatching);
685 void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
686
687 void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
688 void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
689
690 void loadConfigFile(const std::string &configFile);
691
692 void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
693
694 void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
695 double &elapsedTime);
696
697 unsigned int matchPoint(const vpImage<unsigned char> &I);
698 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
699 unsigned int width);
700 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
701
702 unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
704 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
706 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
707 const vpRect &rectangle = vpRect());
708
709 bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
710 const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = NULL,
711 std::vector<vpImagePoint> *imPts2 = NULL, double *meanDescriptorDistance = NULL,
712 double *detectionScore = NULL, const vpRect &rectangle = vpRect());
713
714 bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
715 double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
716 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
717
718 unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
719 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
720 unsigned int width);
721 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
722
723 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
724 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
725 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
726 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
727 const vpRect &rectangle = vpRect());
728
729 void reset();
730
731 void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true);
732
739 inline void setCovarianceComputation(const bool &flag)
740 {
741 m_computeCovariance = flag;
742 if (!m_useRansacVVS) {
743 std::cout << "Warning : The covariance matrix can only be computed "
744 "with a Virtual Visual Servoing approach."
745 << std::endl
746 << "Use setUseRansacVVS(true) to choose to use a pose "
747 "estimation method based on a Virtual "
748 "Visual Servoing approach."
749 << std::endl;
750 }
751 }
752
758 inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
759
765 inline void setDetector(const vpFeatureDetectorType &detectorType)
766 {
767 m_detectorNames.clear();
768 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
769 m_detectors.clear();
770 initDetector(m_mapOfDetectorNames[detectorType]);
771 }
772
778 inline void setDetector(const std::string &detectorName)
779 {
780 m_detectorNames.clear();
781 m_detectorNames.push_back(detectorName);
782 m_detectors.clear();
783 initDetector(detectorName);
784 }
785
786#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
795 template <typename T1, typename T2, typename T3>
796 inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
797 {
798 if (m_detectors.find(detectorName) != m_detectors.end()) {
799 m_detectors[detectorName]->set(parameterName, value);
800 }
801 }
802#endif
803
810 inline void setDetectors(const std::vector<std::string> &detectorNames)
811 {
812 m_detectorNames.clear();
813 m_detectors.clear();
814 m_detectorNames = detectorNames;
815 initDetectors(m_detectorNames);
816 }
817
823 inline void setExtractor(const vpFeatureDescriptorType &extractorType)
824 {
825 m_extractorNames.clear();
826 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
827 m_extractors.clear();
828 initExtractor(m_mapOfDescriptorNames[extractorType]);
829 }
830
837 inline void setExtractor(const std::string &extractorName)
838 {
839 m_extractorNames.clear();
840 m_extractorNames.push_back(extractorName);
841 m_extractors.clear();
842 initExtractor(extractorName);
843 }
844
845#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
854 template <typename T1, typename T2, typename T3>
855 inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
856 {
857 if (m_extractors.find(extractorName) != m_extractors.end()) {
858 m_extractors[extractorName]->set(parameterName, value);
859 }
860 }
861#endif
862
869 inline void setExtractors(const std::vector<std::string> &extractorNames)
870 {
871 m_extractorNames.clear();
872 m_extractorNames = extractorNames;
873 m_extractors.clear();
874 initExtractors(m_extractorNames);
875 }
876
882 inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
883
899 inline void setMatcher(const std::string &matcherName)
900 {
901 m_matcherName = matcherName;
902 initMatcher(m_matcherName);
903 }
904
910 void setMaxFeatures(int maxFeatures) { m_maxFeatures = maxFeatures; }
911
927 inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
928 {
929 m_filterType = filterType;
930
931 // Use k-nearest neighbors (knn) to retrieve the two best matches for a
932 // keypoint So this is useful only for ratioDistanceThreshold method
933 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
934 m_useKnn = true;
935
936#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
937 if (m_matcher != NULL && m_matcherName == "BruteForce") {
938 // if a matcher is already initialized, disable the crossCheck
939 // because it will not work with knnMatch
940 m_matcher->set("crossCheck", false);
941 }
942#endif
943 } else {
944 m_useKnn = false;
945
946#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
947 if (m_matcher != NULL && m_matcherName == "BruteForce") {
948 // if a matcher is already initialized, set the crossCheck mode if
949 // necessary
950 m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
951 }
952#endif
953 }
954 }
955
962 inline void setMatchingFactorThreshold(const double factor)
963 {
964 if (factor > 0.0) {
965 m_matchingFactorThreshold = factor;
966 } else {
967 throw vpException(vpException::badValue, "The factor must be positive.");
968 }
969 }
970
976 inline void setMatchingRatioThreshold(double ratio)
977 {
978 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
979 m_matchingRatioThreshold = ratio;
980 } else {
981 throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
982 }
983 }
984
991 inline void setRansacConsensusPercentage(double percentage)
992 {
993 if (percentage > 0.0 &&
994 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
995 m_ransacConsensusPercentage = percentage;
996 } else {
997 throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
998 }
999 }
1000
1004 inline void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag) { m_ransacFilterFlag = flag; }
1005
1012 inline void setRansacIteration(int nbIter)
1013 {
1014 if (nbIter > 0) {
1015 m_nbRansacIterations = nbIter;
1016 } else {
1017 throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1018 }
1019 }
1020
1026 inline void setRansacParallel(bool parallel) { m_ransacParallel = parallel; }
1027
1034 inline void setRansacParallelNbThreads(unsigned int nthreads) { m_ransacParallelNbThreads = nthreads; }
1035
1043 inline void setRansacReprojectionError(double reprojectionError)
1044 {
1045 if (reprojectionError > 0.0) {
1046 m_ransacReprojectionError = reprojectionError;
1047 } else {
1048 throw vpException(vpException::badValue, "The Ransac reprojection "
1049 "threshold must be positive "
1050 "as we deal with distance.");
1051 }
1052 }
1053
1059 inline void setRansacMinInlierCount(int minCount)
1060 {
1061 if (minCount > 0) {
1062 m_nbRansacMinInlierCount = minCount;
1063 } else {
1064 throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1065 }
1066 }
1067
1074 inline void setRansacThreshold(double threshold)
1075 {
1076 if (threshold > 0.0) {
1077 m_ransacThreshold = threshold;
1078 } else {
1079 throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1080 }
1081 }
1082
1090 inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1091
1092#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1099 inline void setUseBruteForceCrossCheck(bool useCrossCheck)
1100 {
1101 // Only available with BruteForce and with k=1 (i.e not used with a
1102 // ratioDistanceThreshold method)
1103 if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
1104 m_matcher->set("crossCheck", useCrossCheck);
1105 } else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") {
1106 std::cout << "Warning, you try to set the crossCheck parameter with a "
1107 "BruteForce matcher but knn is enabled";
1108 std::cout << " (the filtering method uses a ratio constraint)" << std::endl;
1109 }
1110 }
1111#endif
1112
1119 inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery) { m_useMatchTrainToQuery = useMatchTrainToQuery; }
1120
1128 inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
1129
1137 inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
1138
1145 inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
1146
1147private:
1150 bool m_computeCovariance;
1152 vpMatrix m_covarianceMatrix;
1154 int m_currentImageId;
1157 vpDetectionMethodType m_detectionMethod;
1159 double m_detectionScore;
1162 double m_detectionThreshold;
1164 double m_detectionTime;
1166 std::vector<std::string> m_detectorNames;
1169 // with a key based upon the detector name.
1170 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1172 double m_extractionTime;
1174 std::vector<std::string> m_extractorNames;
1177 // with a key based upon the extractor name.
1178 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1180 std::vector<cv::DMatch> m_filteredMatches;
1182 vpFilterMatchingType m_filterType;
1184 vpImageFormatType m_imageFormat;
1187 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1189 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1191 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1194 std::map<int, int> m_mapOfImageId;
1197 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1200 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1202 std::string m_matcherName;
1204 std::vector<cv::DMatch> m_matches;
1206 double m_matchingFactorThreshold;
1208 double m_matchingRatioThreshold;
1210 double m_matchingTime;
1212 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1214 int m_nbRansacIterations;
1216 int m_nbRansacMinInlierCount;
1219 std::vector<cv::Point3f> m_objectFilteredPoints;
1221 double m_poseTime;
1224 cv::Mat m_queryDescriptors;
1226 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1228 std::vector<cv::KeyPoint> m_queryKeyPoints;
1231 double m_ransacConsensusPercentage;
1233 vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
1235 std::vector<vpImagePoint> m_ransacInliers;
1237 std::vector<vpImagePoint> m_ransacOutliers;
1239 bool m_ransacParallel;
1241 unsigned int m_ransacParallelNbThreads;
1244 double m_ransacReprojectionError;
1247 double m_ransacThreshold;
1250 // detected in the train images).
1251 cv::Mat m_trainDescriptors;
1253 std::vector<cv::KeyPoint> m_trainKeyPoints;
1256 std::vector<cv::Point3f> m_trainPoints;
1259 std::vector<vpPoint> m_trainVpPoints;
1262 bool m_useAffineDetection;
1263#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1267 bool m_useBruteForceCrossCheck;
1268#endif
1271 bool m_useConsensusPercentage;
1273 bool m_useKnn;
1278 bool m_useMatchTrainToQuery;
1280 bool m_useRansacVVS;
1283 bool m_useSingleMatchFilter;
1287 int m_maxFeatures;
1288
1289 void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1290
1291 double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1292 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
1293
1294 void filterMatches();
1295
1296 void init();
1297 void initDetector(const std::string &detectorNames);
1298 void initDetectors(const std::vector<std::string> &detectorNames);
1299
1300 void initExtractor(const std::string &extractorName);
1301 void initExtractors(const std::vector<std::string> &extractorNames);
1302
1303 void initFeatureNames();
1304
1305 inline size_t myKeypointHash(const cv::KeyPoint &kp)
1306 {
1307 size_t _Val = 2166136261U, scale = 16777619U;
1308 Cv32suf u;
1309 u.f = kp.pt.x;
1310 _Val = (scale * _Val) ^ u.u;
1311 u.f = kp.pt.y;
1312 _Val = (scale * _Val) ^ u.u;
1313 u.f = kp.size;
1314 _Val = (scale * _Val) ^ u.u;
1315 // As the keypoint angle can be computed for certain type of keypoint only
1316 // when extracting the corresponding descriptor, the angle field is not
1317 // taking into account for the hash
1318 // u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
1319 u.f = kp.response;
1320 _Val = (scale * _Val) ^ u.u;
1321 _Val = (scale * _Val) ^ ((size_t)kp.octave);
1322 _Val = (scale * _Val) ^ ((size_t)kp.class_id);
1323 return _Val;
1324 }
1325
1326#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1327 /*
1328 * Adapts a detector to detect points over multiple levels of a Gaussian
1329 * pyramid. Useful for detectors that are not inherently scaled.
1330 * From OpenCV 2.4.11 source code.
1331 */
1332 class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
1333 {
1334 public:
1335 // maxLevel - The 0-based index of the last pyramid layer
1336 PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
1337
1338 // TODO implement read/write
1339 virtual bool empty() const;
1340
1341 protected:
1342 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1343 cv::InputArray mask = cv::noArray());
1344 virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1345 const cv::Mat &mask = cv::Mat()) const;
1346
1347 cv::Ptr<cv::FeatureDetector> detector;
1348 int maxLevel;
1349 };
1350
1351 /*
1352 * A class filters a vector of keypoints.
1353 * Because now it is difficult to provide a convenient interface for all
1354 * usage scenarios of the keypoints filter class, it has only several needed
1355 * by now static methods.
1356 */
1357 class KeyPointsFilter
1358 {
1359 public:
1360 KeyPointsFilter() {}
1361
1362 /*
1363 * Remove keypoints within borderPixels of an image edge.
1364 */
1365 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
1366 /*
1367 * Remove keypoints of sizes out of range.
1368 */
1369 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
1370 /*
1371 * Remove keypoints from some image by mask for pixels of this image.
1372 */
1373 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
1374 /*
1375 * Remove duplicated keypoints.
1376 */
1377 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1378
1379 /*
1380 * Retain the specified number of the best keypoints (according to the
1381 * response)
1382 */
1383 static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
1384 };
1385
1386#endif
1387};
1388
1389#endif
1390#endif
class that defines what is a keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
Definition vpColor.h:152
static const vpColor green
Definition vpColor.h:214
error that can be emitted by ViSP classes.
Definition vpException.h:59
@ badValue
Used to indicate that a value is not in the allowed range.
Definition vpException.h:85
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:135
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
double getDetectionTime() const
Definition vpKeyPoint.h:471
std::vector< vpImagePoint > getRansacInliers() const
Definition vpKeyPoint.h:658
void setMatchingFactorThreshold(const double factor)
Definition vpKeyPoint.h:962
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
Definition vpKeyPoint.h:597
void setRansacConsensusPercentage(double percentage)
Definition vpKeyPoint.h:991
void setRansacParallel(bool parallel)
void setRansacReprojectionError(double reprojectionError)
void setExtractor(const std::string &extractorName)
Definition vpKeyPoint.h:837
void setUseSingleMatchFilter(bool singleMatchFilter)
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition vpKeyPoint.h:927
void setRansacParallelNbThreads(unsigned int nthreads)
double getExtractionTime() const
Definition vpKeyPoint.h:527
void setUseRansacVVS(bool ransacVVS)
void setDetectors(const std::vector< std::string > &detectorNames)
Definition vpKeyPoint.h:810
void setExtractors(const std::vector< std::string > &extractorNames)
Definition vpKeyPoint.h:869
cv::Mat getTrainDescriptors() const
Definition vpKeyPoint.h:673
@ DETECTOR_SimpleBlob
Definition vpKeyPoint.h:254
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition vpKeyPoint.h:823
void setImageFormat(const vpImageFormatType &imageFormat)
Definition vpKeyPoint.h:882
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Definition vpKeyPoint.h:536
std::vector< vpImagePoint > getRansacOutliers() const
Definition vpKeyPoint.h:665
void setRansacThreshold(double threshold)
void setRansacMinInlierCount(int minCount)
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
double getPoseTime() const
Definition vpKeyPoint.h:640
unsigned int getNbImages() const
Definition vpKeyPoint.h:630
double getMatchingTime() const
Definition vpKeyPoint.h:590
vpFeatureDescriptorType
Definition vpKeyPoint.h:278
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
Definition vpKeyPoint.h:562
void setMatcher(const std::string &matcherName)
Definition vpKeyPoint.h:899
vpImageFormatType getImageFormat() const
Definition vpKeyPoint.h:583
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Definition vpKeyPoint.h:576
void setUseAffineDetection(bool useAffine)
void setUseRansacConsensusPercentage(bool usePercentage)
void setMatchingRatioThreshold(double ratio)
Definition vpKeyPoint.h:976
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
Definition vpKeyPoint.h:520
void setCovarianceComputation(const bool &flag)
Definition vpKeyPoint.h:739
void setDetector(const vpFeatureDetectorType &detectorType)
Definition vpKeyPoint.h:765
std::vector< cv::DMatch > getMatches() const
Definition vpKeyPoint.h:605
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition vpKeyPoint.h:480
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition vpKeyPoint.h:614
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
vpMatrix getCovarianceMatrix() const
Definition vpKeyPoint.h:443
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
Definition vpKeyPoint.h:506
void setDetectionMethod(const vpDetectionMethodType &method)
Definition vpKeyPoint.h:758
void setDetector(const std::string &detectorName)
Definition vpKeyPoint.h:778
void setMaxFeatures(int maxFeatures)
Definition vpKeyPoint.h:910
void setRansacIteration(int nbIter)
cv::Mat getQueryDescriptors() const
Definition vpKeyPoint.h:648
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:152
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:77
RANSAC_FILTER_FLAGS
Definition vpPose.h:108
Defines a rectangle in the plane.
Definition vpRect.h:76