Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-apriltag-rs2.cpp
1
2#include <fstream>
3#include <ios>
4#include <iostream>
5
6#include <visp3/core/vpXmlParserCamera.h>
7#include <visp3/detection/vpDetectorAprilTag.h>
8#include <visp3/gui/vpDisplayGDI.h>
9#include <visp3/gui/vpDisplayOpenCV.h>
10#include <visp3/gui/vpDisplayX.h>
11#include <visp3/mbt/vpMbGenericTracker.h>
12#include <visp3/sensor/vpRealSense2.h>
13
14typedef enum { state_detection, state_tracking, state_quit } state_t;
15
16// Creates a cube.cao file in your current directory
17// cubeEdgeSize : size of cube edges in meters
18void createCaoFile(double cubeEdgeSize)
19{
20 std::ofstream fileStream;
21 fileStream.open("cube.cao", std::ofstream::out | std::ofstream::trunc);
22 fileStream << "V1\n";
23 fileStream << "# 3D Points\n";
24 fileStream << "8 # Number of points\n";
25 fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 0: (X, Y, Z)\n";
26 fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 1\n";
27 fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 2\n";
28 fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 3\n";
29 fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 4\n";
30 fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 5\n";
31 fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 6\n";
32 fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 7\n";
33 fileStream << "# 3D Lines\n";
34 fileStream << "0 # Number of lines\n";
35 fileStream << "# Faces from 3D lines\n";
36 fileStream << "0 # Number of faces\n";
37 fileStream << "# Faces from 3D points\n";
38 fileStream << "6 # Number of faces\n";
39 fileStream << "4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
40 fileStream << "4 1 2 5 6\n";
41 fileStream << "4 4 7 6 5\n";
42 fileStream << "4 0 7 4 3\n";
43 fileStream << "4 5 2 3 4\n";
44 fileStream << "4 0 1 6 7 # Face 5\n";
45 fileStream << "# 3D cylinders\n";
46 fileStream << "0 # Number of cylinders\n";
47 fileStream << "# 3D circles\n";
48 fileStream << "0 # Number of circles\n";
49 fileStream.close();
50}
51
52#if defined(VISP_HAVE_APRILTAG)
53state_t detectAprilTag(const vpImage<unsigned char> &I, vpDetectorAprilTag &detector, double tagSize,
55{
56 std::vector<vpHomogeneousMatrix> cMo_vec;
57
58 // Detection
59 bool ret = detector.detect(I, tagSize, cam, cMo_vec);
60
61 // Display camera pose
62 for (size_t i = 0; i < cMo_vec.size(); i++) {
63 vpDisplay::displayFrame(I, cMo_vec[i], cam, tagSize / 2, vpColor::none, 3);
64 }
65
66 vpDisplay::displayText(I, 40, 20, "State: waiting tag detection", vpColor::red);
67
68 if (ret && detector.getNbObjects() > 0) { // if tag detected, we pick the first one
69 cMo = cMo_vec[0];
70 return state_tracking;
71 }
72
73 return state_detection;
74}
75#endif // #if defined(VISP_HAVE_APRILTAG)
76
77state_t track(const vpImage<unsigned char> &I, vpMbGenericTracker &tracker, double projection_error_threshold,
79{
81 tracker.getCameraParameters(cam);
82
83 // Track the object
84 try {
85 tracker.track(I);
86 }
87 catch (...) {
88 return state_detection;
89 }
90
91 tracker.getPose(cMo);
92
93 // Detect tracking error
94 double projection_error = tracker.computeCurrentProjectionError(I, cMo, cam);
95 if (projection_error > projection_error_threshold) {
96 return state_detection;
97 }
98
99 // Display
100 tracker.display(I, cMo, cam, vpColor::red, 2);
101 vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
102 vpDisplay::displayText(I, 40, 20, "State: tracking in progress", vpColor::red);
103 {
104 std::stringstream ss;
105 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
106 vpDisplay::displayText(I, 60, 20, ss.str(), vpColor::red);
107 }
108
109 return state_tracking;
110}
111
112state_t track(std::map<std::string, const vpImage<unsigned char> *> mapOfImages,
113#ifdef VISP_HAVE_PCL
114 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
115#else
116 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds,
117 std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
118#endif
119 const vpImage<unsigned char> &I_gray, const vpImage<unsigned char> &I_depth,
120 const vpHomogeneousMatrix &depth_M_color, vpMbGenericTracker &tracker, double projection_error_threshold,
122{
123 vpCameraParameters cam_color, cam_depth;
124 tracker.getCameraParameters(cam_color, cam_depth);
125
126 // Track the object
127 try {
128#ifdef VISP_HAVE_PCL
129 tracker.track(mapOfImages, mapOfPointclouds);
130#else
131 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
132#endif
133 }
134 catch (...) {
135 return state_detection;
136 }
137
138 tracker.getPose(cMo);
139
140 // Detect tracking error
141 double projection_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
142 if (projection_error > projection_error_threshold) {
143 return state_detection;
144 }
145
146 // Display
147 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
148 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
149 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
150
151 return state_tracking;
152}
153
154int main(int argc, const char **argv)
155{
157#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
159
161 double opt_tag_size = 0.08;
162 float opt_quad_decimate = 1.0;
163 int opt_nthreads = 1;
164 double opt_cube_size = 0.125; // 12.5cm by default
165#ifdef VISP_HAVE_OPENCV
166 bool opt_use_texture = false;
167#endif
168 bool opt_use_depth = false;
169 double opt_projection_error_threshold = 40.;
170
171#if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
172 bool display_off = true;
173#else
174 bool display_off = false;
175#endif
176
177 for (int i = 1; i < argc; i++) {
178 if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
179 opt_tag_size = atof(argv[i + 1]);
180 }
181 else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
182 opt_quad_decimate = (float)atof(argv[i + 1]);
183 }
184 else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
185 opt_nthreads = atoi(argv[i + 1]);
186 }
187 else if (std::string(argv[i]) == "--display_off") {
188 display_off = true;
189 }
190 else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
191 opt_tag_family = (vpDetectorAprilTag::vpAprilTagFamily)atoi(argv[i + 1]);
192 }
193 else if (std::string(argv[i]) == "--cube_size" && i + 1 < argc) {
194 opt_cube_size = atof(argv[i + 1]);
195#ifdef VISP_HAVE_OPENCV
196 }
197 else if (std::string(argv[i]) == "--texture") {
198 opt_use_texture = true;
199#endif
200 }
201 else if (std::string(argv[i]) == "--depth") {
202 opt_use_depth = true;
203 }
204 else if (std::string(argv[i]) == "--projection_error" && i + 1 < argc) {
205 opt_projection_error_threshold = atof(argv[i + 1]);
206 }
207 else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
208 std::cout << "Usage: " << argv[0]
209 << " [--cube_size <size in m>] [--tag_size <size in m>]"
210 " [--quad_decimate <decimation>] [--nthreads <nb>]"
211 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
212 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
213#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
214 std::cout << " [--display_off]";
215#endif
216 std::cout << " [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
217 return EXIT_SUCCESS;
218 }
219 }
220
221 createCaoFile(opt_cube_size);
222
223 try {
225 vpRealSense2 realsense;
226 rs2::config config;
227 int width = 640, height = 480, stream_fps = 30;
228 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
229 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
230 config.disable_stream(RS2_STREAM_INFRARED);
231 realsense.open(config);
232
233 vpCameraParameters cam_color, cam_depth;
234 vpHomogeneousMatrix depth_M_color;
235 cam_color = realsense.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion);
236 if (opt_use_depth) {
237 cam_depth = realsense.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithoutDistortion);
238 depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
239 }
240
241 vpImage<vpRGBa> I_color(height, width);
242 vpImage<unsigned char> I_gray(height, width);
243 vpImage<unsigned char> I_depth(height, width);
244 vpImage<uint16_t> I_depth_raw(height, width);
245 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
246 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
247#ifdef VISP_HAVE_PCL
248 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
249 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZ>());
250#else
251 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
252 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
253 std::vector<vpColVector> pointcloud;
254#endif
255
256 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
257
258 std::cout << "Cube size: " << opt_cube_size << std::endl;
259 std::cout << "AprilTag size: " << opt_tag_size << std::endl;
260 std::cout << "AprilTag family: " << opt_tag_family << std::endl;
261 std::cout << "Camera parameters:" << std::endl;
262 std::cout << " Color:\n" << cam_color << std::endl;
263 if (opt_use_depth)
264 std::cout << " Depth:\n" << cam_depth << std::endl;
265 std::cout << "Detection: " << std::endl;
266 std::cout << " Quad decimate: " << opt_quad_decimate << std::endl;
267 std::cout << " Threads number: " << opt_nthreads << std::endl;
268 std::cout << "Tracker: " << std::endl;
269 std::cout << " Use edges : 1" << std::endl;
270 std::cout << " Use texture: "
271#ifdef VISP_HAVE_OPENCV
272 << opt_use_texture << std::endl;
273#else
274 << " na" << std::endl;
275#endif
276 std::cout << " Use depth : " << opt_use_depth << std::endl;
277 std::cout << " Projection error: " << opt_projection_error_threshold << std::endl;
278
279 // Construct display
280 vpDisplay *d_gray = NULL;
281 vpDisplay *d_depth = NULL;
282
283 if (!display_off) {
284#ifdef VISP_HAVE_X11
285 d_gray = new vpDisplayX(I_gray, 50, 50, "Color stream");
286 if (opt_use_depth)
287 d_depth = new vpDisplayX(I_depth, 80 + I_gray.getWidth(), 50, "Depth stream");
288#elif defined(VISP_HAVE_GDI)
289 d_gray = new vpDisplayGDI(I_gray);
290 if (opt_use_depth)
291 d_depth = new vpDisplayGDI(I_depth);
292#elif defined(HAVE_OPENCV_HIGHGUI)
293 d_gray = new vpDisplayOpenCV(I_gray);
294 if (opt_use_depth)
295 d_depth = new vpDisplayOpenCV(I_depth);
296#endif
297 }
298
299 // Initialize AprilTag detector
300 vpDetectorAprilTag detector(opt_tag_family);
301 detector.setAprilTagQuadDecimate(opt_quad_decimate);
302 detector.setAprilTagNbThreads(opt_nthreads);
303
304 // Prepare MBT
305 std::vector<int> trackerTypes;
306#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
307 if (opt_use_texture)
309 else
310#endif
311 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
312
313 if (opt_use_depth)
314 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
315
316 vpMbGenericTracker tracker(trackerTypes);
317 // edges
318 vpMe me;
319 me.setMaskSize(5);
320 me.setMaskNumber(180);
322 me.setRange(12);
325 me.setThreshold(20);
326 me.setMu1(0.5);
327 me.setMu2(0.5);
328 me.setSampleStep(4);
329 tracker.setMovingEdge(me);
330
331#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
332 if (opt_use_texture) {
333 vpKltOpencv klt_settings;
334 klt_settings.setMaxFeatures(300);
335 klt_settings.setWindowSize(5);
336 klt_settings.setQuality(0.015);
337 klt_settings.setMinDistance(8);
338 klt_settings.setHarrisFreeParameter(0.01);
339 klt_settings.setBlockSize(3);
340 klt_settings.setPyramidLevels(3);
341 tracker.setKltOpencv(klt_settings);
342 tracker.setKltMaskBorder(5);
343 }
344#endif
345
346 if (opt_use_depth) {
347 // camera calibration params
348 tracker.setCameraParameters(cam_color, cam_depth);
349 // model definition
350 tracker.loadModel("cube.cao", "cube.cao");
351 mapOfCameraTransformations["Camera2"] = depth_M_color;
352 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
353 tracker.setAngleAppear(vpMath::rad(70), vpMath::rad(70));
354 tracker.setAngleDisappear(vpMath::rad(80), vpMath::rad(80));
355 }
356 else {
357 // camera calibration params
358 tracker.setCameraParameters(cam_color);
359 // model definition
360 tracker.loadModel("cube.cao");
361 tracker.setAngleAppear(vpMath::rad(70));
362 tracker.setAngleDisappear(vpMath::rad(80));
363 }
364 tracker.setDisplayFeatures(true);
365
367 state_t state = state_detection;
368
369 // wait for a tag detection
370 while (state != state_quit) {
371 if (opt_use_depth) {
372#ifdef VISP_HAVE_PCL
373 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, pointcloud, NULL);
374#else
375 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL,
376 NULL);
377#endif
378 vpImageConvert::convert(I_color, I_gray);
379 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
380 vpDisplay::display(I_gray);
381 vpDisplay::display(I_depth);
382
383 mapOfImages["Camera1"] = &I_gray;
384 mapOfImages["Camera2"] = &I_depth;
385#ifdef VISP_HAVE_PCL
386 mapOfPointclouds["Camera2"] = pointcloud;
387#else
388 mapOfPointclouds["Camera2"] = &pointcloud;
389 mapOfWidths["Camera2"] = width;
390 mapOfHeights["Camera2"] = height;
391#endif
392 }
393 else {
394 realsense.acquire(I_gray);
395 vpDisplay::display(I_gray);
396 }
397
398 if (state == state_detection) {
399 state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
400
401 // Initialize the tracker with the result of the detection
402 if (state == state_tracking) {
403 if (opt_use_depth) {
404 mapOfCameraPoses["Camera1"] = cMo;
405 mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
406 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
407 }
408 else {
409 tracker.initFromPose(I_gray, cMo);
410 }
411 }
412 }
413
414 if (state == state_tracking) {
415 if (opt_use_depth) {
416#ifdef VISP_HAVE_PCL
417 state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
418 opt_projection_error_threshold, cMo);
419#else
420 state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
421 tracker, opt_projection_error_threshold, cMo);
422#endif
423 }
424 else {
425 state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
426 }
427 {
428 std::stringstream ss;
429 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
430 << ", depth " << tracker.getNbFeaturesDepthDense();
431 vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
432 }
433 }
434
435 vpDisplay::displayText(I_gray, 20, 20, "Click to quit...", vpColor::red);
436 if (vpDisplay::getClick(I_gray, false)) { // exit
437 state = state_quit;
438 }
439 vpDisplay::flush(I_gray);
440
441 if (opt_use_depth) {
442 vpDisplay::displayText(I_depth, 20, 20, "Click to quit...", vpColor::red);
443 if (vpDisplay::getClick(I_depth, false)) { // exit
444 state = state_quit;
445 }
446 vpDisplay::flush(I_depth);
447 }
448 }
449
450 if (!display_off) {
451 delete d_gray;
452 if (opt_use_depth)
453 delete d_depth;
454 }
455 }
456 catch (const vpException &e) {
457 std::cerr << "Catch an exception: " << e.getMessage() << std::endl;
458 }
459
460 return EXIT_SUCCESS;
461#else
462 (void)argc;
463 (void)argv;
464#ifndef VISP_HAVE_APRILTAG
465 std::cout << "ViSP is not build with Apriltag support" << std::endl;
466#endif
467#ifndef VISP_HAVE_REALSENSE2
468 std::cout << "ViSP is not build with librealsense2 support" << std::endl;
469#endif
470 std::cout << "Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
471#endif
472 return EXIT_SUCCESS;
473}
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
void setAprilTagQuadDecimate(float quadDecimate)
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
bool detect(const vpImage< unsigned char > &I)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
Class that defines generic functionalities for display.
Definition vpDisplay.h:173
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:135
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition vpKltOpencv.h:73
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Definition vpMath.h:116
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual void setKltMaskBorder(const unsigned int &e)
virtual unsigned int getNbFeaturesEdge() const
virtual void setAngleAppear(const double &a)
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setAngleDisappear(const double &a)
virtual void setMovingEdge(const vpMe &me)
virtual unsigned int getNbFeaturesDepthDense() const
virtual void setKltOpencv(const vpKltOpencv &t)
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
Definition vpMe.h:122
void setMu1(const double &mu_1)
Definition vpMe.h:353
void setSampleStep(const double &s)
Definition vpMe.h:390
void setRange(const unsigned int &r)
Definition vpMe.h:383
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:445
void setMaskSize(const unsigned int &a)
Definition vpMe.cpp:452
void setMu2(const double &mu_2)
Definition vpMe.h:360
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
Definition vpMe.h:132
void setMaskNumber(const unsigned int &a)
Definition vpMe.cpp:445
void setThreshold(const double &t)
Definition vpMe.h:435
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const