37#include <visp3/core/vpConfig.h>
39#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41#include <visp3/core/vpHomogeneousMatrix.h>
42#include <visp3/core/vpImage.h>
43#include <visp3/core/vpIoTools.h>
44#include <visp3/gui/vpDisplayGDI.h>
45#include <visp3/gui/vpDisplayGTK.h>
46#include <visp3/gui/vpDisplayOpenCV.h>
47#include <visp3/gui/vpDisplayX.h>
48#include <visp3/io/vpImageIo.h>
49#include <visp3/io/vpParseArgv.h>
50#include <visp3/io/vpVideoReader.h>
51#include <visp3/mbt/vpMbEdgeTracker.h>
52#include <visp3/vision/vpKeyPoint.h>
55#define GETOPTARGS "cdh"
57void usage(
const char *name,
const char *badparam);
58bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display);
68void usage(
const char *name,
const char *badparam)
71Test keypoints matching.\n\
81 Disable the mouse click. Useful to automate the \n\
82 execution of this program without human intervention.\n\
85 Turn off the display.\n\
91 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
105bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display)
113 click_allowed =
false;
119 usage(argv[0], NULL);
124 usage(argv[0], optarg_);
130 if ((c == 1) || (c == -1)) {
132 usage(argv[0], NULL);
133 std::cerr <<
"ERROR: " << std::endl;
134 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
141template <
typename Type>
142void run_test(
const std::string &env_ipath,
bool opt_click_allowed,
bool opt_display,
vpImage<Type> &I,
145#if VISP_HAVE_DATASET_VERSION >= 0x030600
146 std::string ext(
"png");
148 std::string ext(
"pgm");
159#if defined(VISP_HAVE_X11)
161#elif defined(VISP_HAVE_GTK)
163#elif defined(VISP_HAVE_GDI)
165#elif defined(HAVE_OPENCV_HIGHGUI)
171 display.init(I, 0, 0,
"ORB keypoints matching");
214 if (opt_display && opt_click_allowed) {
218 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
227 cv::Ptr<cv::FeatureDetector> detector;
228 cv::Ptr<cv::DescriptorExtractor> extractor;
229 cv::Ptr<cv::DescriptorMatcher> matcher;
231#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
232 detector = cv::ORB::create(500, 1.2f, 1);
233 extractor = cv::ORB::create(500, 1.2f, 1);
234#elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
235 detector = cv::FeatureDetector::create(
"ORB");
236 extractor = cv::DescriptorExtractor::create(
"ORB");
238 matcher = cv::DescriptorMatcher::create(
"BruteForce-Hamming");
240#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
241 detector->set(
"nLevels", 1);
245 std::vector<cv::KeyPoint> trainKeyPoints;
248 detector->detect(matImg, trainKeyPoints);
251 std::vector<vpPolygon> polygons;
252 std::vector<std::vector<vpPoint> > roisPt;
253 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces(
false);
254 polygons = pair.first;
255 roisPt = pair.second;
258 std::vector<cv::Point3f> points3f;
262 cv::Mat trainDescriptors;
263 extractor->compute(matImg, trainKeyPoints, trainDescriptors);
265 if (trainKeyPoints.size() != (
size_t)trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
275 bool opt_click =
false;
277 while ((opt_display && !g.
end()) || (!opt_display && g.
getFrameIndex() < 30)) {
281 std::vector<cv::KeyPoint> queryKeyPoints;
282 detector->detect(matImg, queryKeyPoints);
284 cv::Mat queryDescriptors;
285 extractor->compute(matImg, queryKeyPoints, queryDescriptors);
287 std::vector<std::vector<cv::DMatch> > knn_matches;
288 std::vector<cv::DMatch> matches;
289 matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
290 for (std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end();
292 if (it->size() > 1) {
293 double ratio = (*it)[0].distance / (*it)[1].distance;
295 matches.push_back((*it)[0]);
301 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
302 vpPoint pt(points3f[(
size_t)(it->trainIdx)].x, points3f[(
size_t)(it->trainIdx)].y,
303 points3f[(
size_t)(it->trainIdx)].z);
305 double x = 0.0, y = 0.0;
307 queryKeyPoints[(
size_t)(it->queryIdx)].pt.y, x, y);
314 bool is_pose_estimated =
false;
315 if (estimated_pose.
npt >= 4) {
317 unsigned int nb_inliers = (
unsigned int)(0.6 * estimated_pose.
npt);
322 is_pose_estimated =
true;
325 is_pose_estimated =
false;
334 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
335 vpImagePoint leftPt(trainKeyPoints[(
size_t)it->trainIdx].pt.y, trainKeyPoints[(
size_t)it->trainIdx].pt.x);
336 vpImagePoint rightPt(queryKeyPoints[(
size_t)it->queryIdx].pt.y,
337 queryKeyPoints[(
size_t)it->queryIdx].pt.x + Iref.
getWidth());
341 if (is_pose_estimated) {
352 if (opt_click_allowed && opt_display) {
380int main(
int argc,
const char **argv)
383 std::string env_ipath;
384 bool opt_click_allowed =
true;
385 bool opt_display =
true;
388 if (getOptions(argc, argv, opt_click_allowed, opt_display) ==
false) {
396 if (env_ipath.empty()) {
397 std::cerr <<
"Please set the VISP_INPUT_IMAGE_PATH environment "
406 std::cout <<
"-- Test on gray level images" << std::endl;
407 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
413 std::cout <<
"-- Test on color images" << std::endl;
414 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
419 std::cerr << e.
what() << std::endl;
423 std::cout <<
"testKeyPoint-4 is ok !" << std::endl;
430 std::cerr <<
"You need OpenCV library." << std::endl;
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor none
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setDownScalingFactor(unsigned int scale)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
error that can be emitted by ViSP classes.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
static double rad(double deg)
Make the complete tracking of an object by using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &cam)
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setClipping(const unsigned int &flags)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
void setMovingEdge(const vpMe &me)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void getCameraParameters(vpCameraParameters &cam) const
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setAngleDisappear(const double &a)
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, bool displayHelp=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
void setMu1(const double &mu_1)
void setSampleStep(const double &s)
void setRange(const unsigned int &r)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskSize(const unsigned int &a)
void setNbTotalSample(const int &nb)
void setMu2(const double &mu_2)
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
void setMaskNumber(const unsigned int &a)
void setThreshold(const double &t)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
unsigned int npt
Number of point used in pose computation.
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setRansacThreshold(const double &t)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const