Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
testGenericTracker.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See https://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Regression test for MBT.
33 *
34*****************************************************************************/
35
42#include <cstdlib>
43#include <iostream>
44#include <visp3/core/vpConfig.h>
45
46#if defined(VISP_HAVE_MODULE_MBT) && \
47 (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
48
49#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
50#include <type_traits>
51#endif
52
53#include <visp3/core/vpFont.h>
54#include <visp3/core/vpImageDraw.h>
55#include <visp3/core/vpIoTools.h>
56#include <visp3/gui/vpDisplayD3D.h>
57#include <visp3/gui/vpDisplayGDI.h>
58#include <visp3/gui/vpDisplayGTK.h>
59#include <visp3/gui/vpDisplayOpenCV.h>
60#include <visp3/gui/vpDisplayX.h>
61#include <visp3/io/vpImageIo.h>
62#include <visp3/io/vpParseArgv.h>
63#include <visp3/mbt/vpMbGenericTracker.h>
64
65#define GETOPTARGS "i:dsclt:e:DmCh"
66
67namespace
68{
69void usage(const char *name, const char *badparam)
70{
71 fprintf(stdout, "\n\
72 Regression test for vpGenericTracker.\n\
73 \n\
74 SYNOPSIS\n\
75 %s [-i <test image path>] [-c] [-d] [-s] [-h] [-l] \n\
76 [-t <tracker type>] [-e <last frame index>] [-D] [-m] [-C]\n",
77 name);
78
79 fprintf(stdout, "\n\
80 OPTIONS: \n\
81 -i <input image path> \n\
82 Set image input path.\n\
83 These images come from ViSP-images-x.y.z.tar.gz available \n\
84 on the ViSP website.\n\
85 Setting the VISP_INPUT_IMAGE_PATH environment\n\
86 variable produces the same behavior than using\n\
87 this option.\n\
88 \n\
89 -d \n\
90 Turn off the display.\n\
91 \n\
92 -s \n\
93 If display is turn off, tracking results are saved in a video folder.\n\
94 \n\
95 -c\n\
96 Disable the mouse click. Useful to automate the \n\
97 execution of this program without human intervention.\n\
98 \n\
99 -t <tracker type>\n\
100 Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
101 \n\
102 -l\n\
103 Use the scanline for visibility tests.\n\
104 \n\
105 -e <last frame index>\n\
106 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
107 \n\
108 -D \n\
109 Use depth.\n\
110 \n\
111 -m \n\
112 Set a tracking mask.\n\
113 \n\
114 -C \n\
115 Use color images.\n\
116 \n\
117 -h \n\
118 Print the help.\n\n");
119
120 if (badparam)
121 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
122}
123
124bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_allowed, bool &display, bool &save,
125 bool &useScanline, int &trackerType, int &lastFrame, bool &use_depth, bool &use_mask,
126 bool &use_color_image)
127{
128 const char *optarg_;
129 int c;
130 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
131
132 switch (c) {
133 case 'i':
134 ipath = optarg_;
135 break;
136 case 'c':
137 click_allowed = false;
138 break;
139 case 'd':
140 display = false;
141 break;
142 case 's':
143 save = true;
144 break;
145 case 'l':
146 useScanline = true;
147 break;
148 case 't':
149 trackerType = atoi(optarg_);
150 break;
151 case 'e':
152 lastFrame = atoi(optarg_);
153 break;
154 case 'D':
155 use_depth = true;
156 break;
157 case 'm':
158 use_mask = true;
159 break;
160 case 'C':
161 use_color_image = true;
162 break;
163 case 'h':
164 usage(argv[0], NULL);
165 return false;
166 break;
167
168 default:
169 usage(argv[0], optarg_);
170 return false;
171 break;
172 }
173 }
174
175 if ((c == 1) || (c == -1)) {
176 // standalone param or error
177 usage(argv[0], NULL);
178 std::cerr << "ERROR: " << std::endl;
179 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
180 return false;
181 }
182
183 return true;
184}
185
186template <typename Type>
187bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage<Type> &I,
188 vpImage<uint16_t> &I_depth, std::vector<vpColVector> &pointcloud, vpHomogeneousMatrix &cMo)
189{
190#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
191 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
192 "Template function supports only unsigned char and vpRGBa images!");
193#endif
194#if VISP_HAVE_DATASET_VERSION >= 0x030600
195 std::string ext("png");
196#else
197 std::string ext("pgm");
198#endif
199 char buffer[FILENAME_MAX];
200 snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Images/Image_%04d." + ext).c_str(), cpt);
201 std::string image_filename = buffer;
202
203 snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Depth/Depth_%04d.bin").c_str(), cpt);
204 std::string depth_filename = buffer;
205
206 snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/CameraPose/Camera_%03d.txt").c_str(), cpt);
207 std::string pose_filename = buffer;
208
209 if (!vpIoTools::checkFilename(image_filename) || !vpIoTools::checkFilename(depth_filename) ||
210 !vpIoTools::checkFilename(pose_filename))
211 return false;
212
213 vpImageIo::read(I, image_filename);
214
215 unsigned int depth_width = 0, depth_height = 0;
216 std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
217 if (!file_depth.is_open())
218 return false;
219
220 vpIoTools::readBinaryValueLE(file_depth, depth_height);
221 vpIoTools::readBinaryValueLE(file_depth, depth_width);
222 I_depth.resize(depth_height, depth_width);
223 pointcloud.resize(depth_height * depth_width);
224
225 const float depth_scale = 0.000030518f;
226 for (unsigned int i = 0; i < I_depth.getHeight(); i++) {
227 for (unsigned int j = 0; j < I_depth.getWidth(); j++) {
228 vpIoTools::readBinaryValueLE(file_depth, I_depth[i][j]);
229 double x = 0.0, y = 0.0, Z = I_depth[i][j] * depth_scale;
230 vpPixelMeterConversion::convertPoint(cam_depth, j, i, x, y);
231 vpColVector pt3d(4, 1.0);
232 pt3d[0] = x * Z;
233 pt3d[1] = y * Z;
234 pt3d[2] = Z;
235 pointcloud[i * I_depth.getWidth() + j] = pt3d;
236 }
237 }
238
239 std::ifstream file_pose(pose_filename.c_str());
240 if (!file_pose.is_open()) {
241 return false;
242 }
243
244 for (unsigned int i = 0; i < 4; i++) {
245 for (unsigned int j = 0; j < 4; j++) {
246 file_pose >> cMo[i][j];
247 }
248 }
249
250 return true;
251}
252
253void convert(const vpImage<vpRGBa> &src, vpImage<vpRGBa> &dst) { dst = src; }
254
255void convert(const vpImage<unsigned char> &src, vpImage<vpRGBa> &dst) { vpImageConvert::convert(src, dst); }
256
257template <typename Type>
258bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_display, bool useScanline,
259 int trackerType_image, int opt_lastFrame, bool use_depth, bool use_mask, bool save)
260{
261#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
262 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
263 "Template function supports only unsigned char and vpRGBa images!");
264#endif
265 // Initialise a display
266#if defined(VISP_HAVE_X11)
267 vpDisplayX display1, display2;
268#elif defined(VISP_HAVE_GDI)
269 vpDisplayGDI display1, display2;
270#elif defined(HAVE_OPENCV_HIGHGUI)
271 vpDisplayOpenCV display1, display2;
272#elif defined(VISP_HAVE_D3D9)
273 vpDisplayD3D display1, display2;
274#elif defined(VISP_HAVE_GTK)
275 vpDisplayGTK display1, display2;
276#else
277 opt_display = false;
278#endif
279
280 std::vector<int> tracker_type(2);
281 tracker_type[0] = trackerType_image;
283 vpMbGenericTracker tracker(tracker_type);
284 std::string configFileCam1 = input_directory + std::string("/Config/chateau.xml");
285 std::string configFileCam2 = input_directory + std::string("/Config/chateau_depth.xml");
286 std::cout << "Load config file for camera 1: " << configFileCam1 << std::endl;
287 std::cout << "Load config file for camera 2: " << configFileCam2 << std::endl;
288 tracker.loadConfigFile(configFileCam1, configFileCam2);
289#if 0
290 // Corresponding parameters manually set to have an example code
291 {
292 vpCameraParameters cam_color, cam_depth;
293 cam_color.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
294 cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
295 tracker.setCameraParameters(cam_color, cam_depth);
296 }
297
298 // Edge
299 vpMe me;
300 me.setMaskSize(5);
301 me.setMaskNumber(180);
302 me.setRange(8);
304 me.setThreshold(5);
305 me.setMu1(0.5);
306 me.setMu2(0.5);
307 me.setSampleStep(5);
308 tracker.setMovingEdge(me);
309
310 // Klt
311#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
312 vpKltOpencv klt;
313 tracker.setKltMaskBorder(5);
314 klt.setMaxFeatures(10000);
315 klt.setWindowSize(5);
316 klt.setQuality(0.01);
317 klt.setMinDistance(5);
318 klt.setHarrisFreeParameter(0.02);
319 klt.setBlockSize(3);
320 klt.setPyramidLevels(3);
321
322 tracker.setKltOpencv(klt);
323#endif
324
325 // Depth
326 tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION);
327 tracker.setDepthNormalPclPlaneEstimationMethod(2);
328 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
329 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
330 tracker.setDepthNormalSamplingStep(2, 2);
331
332 tracker.setDepthDenseSamplingStep(4, 4);
333
334 tracker.setAngleAppear(vpMath::rad(85.0));
335 tracker.setAngleDisappear(vpMath::rad(89.0));
336 tracker.setNearClippingDistance(0.01);
337 tracker.setFarClippingDistance(2.0);
338 tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING);
339#endif
340
341#ifdef VISP_HAVE_COIN3D
342 tracker.loadModel(input_directory + "/Models/chateau.wrl", input_directory + "/Models/chateau.cao");
343#else
344 tracker.loadModel(input_directory + "/Models/chateau.cao", input_directory + "/Models/chateau.cao");
345#endif
347 T[0][0] = -1;
348 T[0][3] = -0.2;
349 T[1][1] = 0;
350 T[1][2] = 1;
351 T[1][3] = 0.12;
352 T[2][1] = 1;
353 T[2][2] = 0;
354 T[2][3] = -0.15;
355 tracker.loadModel(input_directory + "/Models/cube.cao", false, T);
356 vpCameraParameters cam_color, cam_depth;
357 tracker.getCameraParameters(cam_color, cam_depth);
358 tracker.setDisplayFeatures(true);
359 tracker.setScanLineVisibilityTest(useScanline);
360
361 std::map<int, std::pair<double, double> > map_thresh;
362 // Take the highest thresholds between all CI machines
363#ifdef VISP_HAVE_COIN3D
365 useScanline ? std::pair<double, double>(0.005, 3.9) : std::pair<double, double>(0.007, 3.9);
366#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
368 useScanline ? std::pair<double, double>(0.007, 1.9) : std::pair<double, double>(0.007, 1.8);
370 useScanline ? std::pair<double, double>(0.005, 3.7) : std::pair<double, double>(0.006, 3.4);
371#endif
373 useScanline ? std::pair<double, double>(0.003, 1.7) : std::pair<double, double>(0.002, 0.8);
374#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
376 std::pair<double, double>(0.002, 0.3);
379 useScanline ? std::pair<double, double>(0.002, 1.8) : std::pair<double, double>(0.002, 0.7);
380#endif
381#else
383 useScanline ? std::pair<double, double>(0.008, 2.3) : std::pair<double, double>(0.007, 2.1);
384#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
386 useScanline ? std::pair<double, double>(0.006, 1.7) : std::pair<double, double>(0.005, 1.4);
388 useScanline ? std::pair<double, double>(0.004, 1.2) : std::pair<double, double>(0.004, 1.2);
389#endif
391 useScanline ? std::pair<double, double>(0.002, 0.7) : std::pair<double, double>(0.001, 0.4);
392#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
394 std::pair<double, double>(0.002, 0.3);
397 useScanline ? std::pair<double, double>(0.001, 0.5) : std::pair<double, double>(0.001, 0.4);
398#endif
399#endif
400
401 vpImage<Type> I, I_depth;
402 vpImage<uint16_t> I_depth_raw;
403 vpHomogeneousMatrix cMo_truth;
404 std::vector<vpColVector> pointcloud;
405 int cpt_frame = 1;
406 if (!read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
407 std::cerr << "Cannot read first frame!" << std::endl;
408 return EXIT_FAILURE;
409 }
410
411 vpImage<bool> mask(I.getHeight(), I.getWidth());
412 const double roi_step = 7.0;
413 const double roi_step2 = 6.0;
414 if (use_mask) {
415 mask = false;
416 for (unsigned int i = (unsigned int)(I.getRows() / roi_step);
417 i < (unsigned int)(I.getRows() * roi_step2 / roi_step); i++) {
418 for (unsigned int j = (unsigned int)(I.getCols() / roi_step);
419 j < (unsigned int)(I.getCols() * roi_step2 / roi_step); j++) {
420 mask[i][j] = true;
421 }
422 }
423 tracker.setMask(mask);
424 }
425
426 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
427
428 vpImage<vpRGBa> results(I.getHeight(), I.getWidth() + I_depth.getWidth());
429 vpImage<vpRGBa> resultsColor(I.getHeight(), I.getWidth());
430 vpImage<vpRGBa> resultsDepth(I_depth.getHeight(), I_depth.getWidth());
431 if (save) {
432 vpIoTools::makeDirectory("results");
433 }
434 if (opt_display) {
435#ifdef VISP_HAVE_DISPLAY
436 display1.init(I, 0, 0, "Image");
437 display2.init(I_depth, (int)I.getWidth(), 0, "Depth");
438#endif
439 }
440
441 vpHomogeneousMatrix depth_M_color;
442 depth_M_color[0][3] = -0.05;
443 tracker.setCameraTransformationMatrix("Camera2", depth_M_color);
444 tracker.initFromPose(I, cMo_truth);
445
446 vpFont font(24);
447 bool click = false, quit = false, correct_accuracy = true;
448 std::vector<double> vec_err_t, vec_err_tu;
449 std::vector<double> time_vec;
450 while (read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
451 (opt_lastFrame > 0 ? (int)cpt_frame <= opt_lastFrame : true)) {
452 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
453
454 if (opt_display) {
456 vpDisplay::display(I_depth);
457 }
458 else if (save) {
459 convert(I, resultsColor);
460 convert(I_depth, resultsDepth);
461 }
462
463 double t = vpTime::measureTimeMs();
464 std::map<std::string, const vpImage<Type> *> mapOfImages;
465 mapOfImages["Camera1"] = &I;
466 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
467 mapOfPointclouds["Camera2"] = &pointcloud;
468 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
469 if (!use_depth) {
470 mapOfWidths["Camera2"] = 0;
471 mapOfHeights["Camera2"] = 0;
472 }
473 else {
474 mapOfWidths["Camera2"] = I_depth.getWidth();
475 mapOfHeights["Camera2"] = I_depth.getHeight();
476 }
477
478 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
479 vpHomogeneousMatrix cMo = tracker.getPose();
480 t = vpTime::measureTimeMs() - t;
481 time_vec.push_back(t);
482
483 if (opt_display) {
484 tracker.display(I, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
485 vpDisplay::displayFrame(I, cMo, cam_depth, 0.05, vpColor::none, 3);
486 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
487
488 std::stringstream ss;
489 ss << "Frame: " << cpt_frame;
490 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
491 ss.str("");
492 ss << "Nb features: " << tracker.getError().getRows();
493 vpDisplay::displayText(I_depth, 40, 20, ss.str(), vpColor::red);
494 }
495 else if (save) {
497 std::map<std::string, std::vector<std::vector<double> > > mapOfModels;
498 std::map<std::string, unsigned int> mapOfW;
499 mapOfW["Camera1"] = I.getWidth();
500 mapOfW["Camera2"] = I.getHeight();
501 std::map<std::string, unsigned int> mapOfH;
502 mapOfH["Camera1"] = I_depth.getWidth();
503 mapOfH["Camera2"] = I_depth.getHeight();
504 std::map<std::string, vpHomogeneousMatrix> mapOfcMos;
505 mapOfcMos["Camera1"] = cMo;
506 mapOfcMos["Camera2"] = depth_M_color * cMo;
507 std::map<std::string, vpCameraParameters> mapOfCams;
508 mapOfCams["Camera1"] = cam_color;
509 mapOfCams["Camera2"] = cam_depth;
510 tracker.getModelForDisplay(mapOfModels, mapOfW, mapOfH, mapOfcMos, mapOfCams);
511 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfModels.begin();
512 it != mapOfModels.end(); ++it) {
513 for (size_t i = 0; i < it->second.size(); i++) {
514 // test if it->second[i][0] = 0
515 if (std::fabs(it->second[i][0]) <= std::numeric_limits<double>::epsilon()) {
516 vpImageDraw::drawLine(it->first == "Camera1" ? resultsColor : resultsDepth,
517 vpImagePoint(it->second[i][1], it->second[i][2]),
518 vpImagePoint(it->second[i][3], it->second[i][4]), vpColor::red, 3);
519 }
520 }
521 }
523
525 std::map<std::string, std::vector<std::vector<double> > > mapOfFeatures;
526 tracker.getFeaturesForDisplay(mapOfFeatures);
527 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfFeatures.begin();
528 it != mapOfFeatures.end(); ++it) {
529 for (size_t i = 0; i < it->second.size(); i++) {
530 if (std::fabs(it->second[i][0]) <=
531 std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 0 for ME
532 vpColor color = vpColor::yellow;
533 if (std::fabs(it->second[i][3]) <= std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 0
534 color = vpColor::green;
535 }
536 else if (std::fabs(it->second[i][3] - 1) <=
537 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 1
538 color = vpColor::blue;
539 }
540 else if (std::fabs(it->second[i][3] - 2) <=
541 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 2
542 color = vpColor::purple;
543 }
544 else if (std::fabs(it->second[i][3] - 3) <=
545 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 3
546 color = vpColor::red;
547 }
548 else if (std::fabs(it->second[i][3] - 4) <=
549 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 4
550 color = vpColor::cyan;
551 }
552 vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
553 vpImagePoint(it->second[i][1], it->second[i][2]), 3, color, 1);
554 }
555 else if (std::fabs(it->second[i][0] - 1) <=
556 std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 1 for KLT
557 vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
558 vpImagePoint(it->second[i][1], it->second[i][2]), 10, vpColor::red, 1);
559 }
560 }
561 }
563
564 // Computation time
565 std::ostringstream oss;
566 oss << "Tracking time: " << t << " ms";
567 font.drawText(resultsColor, oss.str(), vpImagePoint(20, 20), vpColor::red);
568 }
569
570 vpPoseVector pose_est(cMo);
571 vpPoseVector pose_truth(cMo_truth);
572 vpColVector t_est(3), t_truth(3);
573 vpColVector tu_est(3), tu_truth(3);
574 for (unsigned int i = 0; i < 3; i++) {
575 t_est[i] = pose_est[i];
576 t_truth[i] = pose_truth[i];
577 tu_est[i] = pose_est[i + 3];
578 tu_truth[i] = pose_truth[i + 3];
579 }
580
581 vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
582 const double t_thresh =
583 map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].first;
584 const double tu_thresh =
585 map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].second;
586 double t_err2 = sqrt(t_err.sumSquare()), tu_err2 = vpMath::deg(sqrt(tu_err.sumSquare()));
587 vec_err_t.push_back(t_err2);
588 vec_err_tu.push_back(tu_err2);
589 if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) { // no accuracy test with mask
590 std::cerr << "Pose estimated exceeds the threshold (t_thresh = " << t_thresh << " ; tu_thresh = " << tu_thresh
591 << ")!" << std::endl;
592 std::cout << "t_err: " << t_err2 << " ; tu_err: " << tu_err2 << std::endl;
593 correct_accuracy = false;
594 }
595
596 if (opt_display) {
597 if (use_mask) {
598 vpRect roi(vpImagePoint(I.getRows() / roi_step, I.getCols() / roi_step),
599 vpImagePoint(I.getRows() * roi_step2 / roi_step, I.getCols() * roi_step2 / roi_step));
602 }
603
605 vpDisplay::flush(I_depth);
606 }
607 else if (save) {
609 char buffer[FILENAME_MAX];
610 std::ostringstream oss;
611 oss << "results/image_%04d.png";
612 snprintf(buffer, FILENAME_MAX, oss.str().c_str(), cpt_frame);
613
614 results.insert(resultsColor, vpImagePoint());
615 results.insert(resultsDepth, vpImagePoint(0, resultsColor.getWidth()));
616
617 vpImageIo::write(results, buffer);
619 }
620
621 if (opt_display && opt_click_allowed) {
623 if (vpDisplay::getClick(I, button, click)) {
624 switch (button) {
626 quit = !click;
627 break;
628
630 click = !click;
631 break;
632
633 default:
634 break;
635 }
636 }
637 }
638
639 cpt_frame++;
640 }
641
642 if (!time_vec.empty())
643 std::cout << "Computation time, Mean: " << vpMath::getMean(time_vec)
644 << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
645 << std::endl;
646
647 if (!vec_err_t.empty())
648 std::cout << "Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
649
650 if (!vec_err_tu.empty())
651 std::cout << "Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
652
653 std::cout << "Test result: " << (correct_accuracy ? "success" : "failure") << std::endl;
654 return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
655}
656} // namespace
657
658int main(int argc, const char *argv [])
659{
660 try {
661 std::string env_ipath;
662 std::string opt_ipath = "";
663 bool opt_click_allowed = true;
664 bool opt_display = true;
665 bool opt_save = false;
666 bool useScanline = false;
667 int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
668#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
669 // To avoid Debian test timeout
670 int opt_lastFrame = 5;
671#else
672 int opt_lastFrame = -1;
673#endif
674 bool use_depth = false;
675 bool use_mask = false;
676 bool use_color_image = false;
677
678 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
679 // environment variable value
681
682 // Read the command line options
683 if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, opt_save, useScanline, trackerType_image,
684 opt_lastFrame, use_depth, use_mask, use_color_image)) {
685 return EXIT_FAILURE;
686 }
687
688#if ! (defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO))
689 if (trackerType_image == 2 || trackerType_image == 3) {
690 std::cout << "Using klt tracker is not possible without OpenCV imgproc and video modules." << std::endl;
691 std::cout << "Use rather command line option -t 1 to use edges." << std::endl;
692 return EXIT_SUCCESS;
693 }
694#endif
695 std::cout << "trackerType_image: " << trackerType_image << std::endl;
696 std::cout << "useScanline: " << useScanline << std::endl;
697 std::cout << "use_depth: " << use_depth << std::endl;
698 std::cout << "use_mask: " << use_mask << std::endl;
699 std::cout << "use_color_image: " << use_color_image << std::endl;
700#ifdef VISP_HAVE_COIN3D
701 std::cout << "COIN3D available." << std::endl;
702#endif
703
704#if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
705 if (trackerType_image & 2) {
706 std::cout << "KLT features cannot be used: ViSP is not built with "
707 "KLT module or OpenCV is not available.\nTest is not run."
708 << std::endl;
709 return EXIT_SUCCESS;
710 }
711#endif
712
713 // Test if an input path is set
714 if (opt_ipath.empty() && env_ipath.empty()) {
715 usage(argv[0], NULL);
716 std::cerr << std::endl << "ERROR:" << std::endl;
717 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
718 << " environment variable to specify the location of the " << std::endl
719 << " image path where test images are located." << std::endl
720 << std::endl;
721
722 return EXIT_FAILURE;
723 }
724
725 std::string input_directory =
726 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu");
727 if (!vpIoTools::checkDirectory(input_directory)) {
728 std::cerr << "ViSP-images does not contain the folder: " << input_directory << "!" << std::endl;
729 return EXIT_SUCCESS;
730 }
731
732 if (use_color_image) {
733 return run<vpRGBa>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image, opt_lastFrame,
734 use_depth, use_mask, opt_save);
735 }
736 else {
737 return run<unsigned char>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image,
738 opt_lastFrame, use_depth, use_mask, opt_save);
739 }
740
741 std::cout << "Test succeed" << std::endl;
742 return EXIT_SUCCESS;
743 }
744 catch (const vpException &e) {
745 std::cout << "Catch an exception: " << e << std::endl;
746 return EXIT_FAILURE;
747 }
748}
749#elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
750int main()
751{
752 std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
753 return EXIT_SUCCESS;
754}
755#else
756int main()
757{
758 std::cout << "Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
759 return EXIT_SUCCESS;
760}
761#endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
double sumSquare() const
Class to define RGB colors available for display functionalities.
Definition vpColor.h:152
static const vpColor red
Definition vpColor.h:211
static const vpColor cyan
Definition vpColor.h:220
static const vpColor none
Definition vpColor.h:223
static const vpColor blue
Definition vpColor.h:217
static const vpColor purple
Definition vpColor.h:222
static const vpColor yellow
Definition vpColor.h:219
static const vpColor green
Definition vpColor.h:214
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
Font drawing functions for image.
Definition vpFont.h:54
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void drawLine(vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, unsigned char color, unsigned int thickness=1)
static void drawCross(vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, unsigned char color, unsigned int thickness=1)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:135
unsigned int getWidth() const
Definition vpImage.h:242
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition vpImage.h:795
unsigned int getCols() const
Definition vpImage.h:175
unsigned int getHeight() const
Definition vpImage.h:184
unsigned int getRows() const
Definition vpImage.h:214
static std::string getViSPImagesDataPath()
static bool checkFilename(const std::string &filename)
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static bool checkDirectory(const std::string &dirname)
static std::string createFilePath(const std::string &parent, const std::string &child)
static void makeDirectory(const std::string &dirname)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition vpKltOpencv.h:73
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Definition vpMath.h:116
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
static double deg(double rad)
Definition vpMath.h:106
Real-time 6D object pose tracking using its CAD model.
Definition vpMe.h:122
void setMu1(const double &mu_1)
Definition vpMe.h:353
void setSampleStep(const double &s)
Definition vpMe.h:390
void setRange(const unsigned int &r)
Definition vpMe.h:383
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:445
void setMaskSize(const unsigned int &a)
Definition vpMe.cpp:452
void setMu2(const double &mu_2)
Definition vpMe.h:360
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
Definition vpMe.h:132
void setMaskNumber(const unsigned int &a)
Definition vpMe.cpp:445
void setThreshold(const double &t)
Definition vpMe.h:435
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Defines a rectangle in the plane.
Definition vpRect.h:76
VISP_EXPORT double measureTimeMs()