Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
testGenericTracker.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Regression test for MBT.
32 */
33
39
40#include <cstdlib>
41#include <iostream>
42#include <visp3/core/vpConfig.h>
43
44#if defined(VISP_HAVE_MODULE_MBT) && \
45 (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
46
47#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
48#include <type_traits>
49#endif
50
51#include <visp3/core/vpFont.h>
52#include <visp3/core/vpImageDraw.h>
53#include <visp3/core/vpIoTools.h>
54#include <visp3/gui/vpDisplayD3D.h>
55#include <visp3/gui/vpDisplayGDI.h>
56#include <visp3/gui/vpDisplayGTK.h>
57#include <visp3/gui/vpDisplayOpenCV.h>
58#include <visp3/gui/vpDisplayX.h>
59#include <visp3/io/vpImageIo.h>
60#include <visp3/io/vpParseArgv.h>
61#include <visp3/mbt/vpMbGenericTracker.h>
62
63#define GETOPTARGS "i:dsclt:e:DmCh"
64
65#ifdef ENABLE_VISP_NAMESPACE
66using namespace VISP_NAMESPACE_NAME;
67#endif
68
69namespace
70{
71void usage(const char *name, const char *badparam)
72{
73 fprintf(stdout, "\n\
74 Regression test for vpGenericTracker.\n\
75 \n\
76 SYNOPSIS\n\
77 %s [-i <test image path>] [-c] [-d] [-s] [-h] [-l] \n\
78 [-t <tracker type>] [-e <last frame index>] [-D] [-m] [-C]\n",
79 name);
80
81 fprintf(stdout, "\n\
82 OPTIONS: \n\
83 -i <input image path> \n\
84 Set image input path.\n\
85 These images come from ViSP-images-x.y.z.tar.gz available \n\
86 on the ViSP website.\n\
87 Setting the VISP_INPUT_IMAGE_PATH environment\n\
88 variable produces the same behavior than using\n\
89 this option.\n\
90 \n\
91 -d \n\
92 Turn off the display.\n\
93 \n\
94 -s \n\
95 If display is turn off, tracking results are saved in a video folder.\n\
96 \n\
97 -c\n\
98 Disable the mouse click. Useful to automate the \n\
99 execution of this program without human intervention.\n\
100 \n\
101 -t <tracker type>\n\
102 Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
103 \n\
104 -l\n\
105 Use the scanline for visibility tests.\n\
106 \n\
107 -e <last frame index>\n\
108 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
109 \n\
110 -D \n\
111 Use depth.\n\
112 \n\
113 -m \n\
114 Set a tracking mask.\n\
115 \n\
116 -C \n\
117 Use color images.\n\
118 \n\
119 -h \n\
120 Print the help.\n\n");
121
122 if (badparam)
123 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
124}
125
126bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_allowed, bool &display, bool &save,
127 bool &useScanline, int &trackerType, int &lastFrame, bool &use_depth, bool &use_mask,
128 bool &use_color_image)
129{
130 const char *optarg_;
131 int c;
132 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
133
134 switch (c) {
135 case 'i':
136 ipath = optarg_;
137 break;
138 case 'c':
139 click_allowed = false;
140 break;
141 case 'd':
142 display = false;
143 break;
144 case 's':
145 save = true;
146 break;
147 case 'l':
148 useScanline = true;
149 break;
150 case 't':
151 trackerType = atoi(optarg_);
152 break;
153 case 'e':
154 lastFrame = atoi(optarg_);
155 break;
156 case 'D':
157 use_depth = true;
158 break;
159 case 'm':
160 use_mask = true;
161 break;
162 case 'C':
163 use_color_image = true;
164 break;
165 case 'h':
166 usage(argv[0], nullptr);
167 return false;
168
169 default:
170 usage(argv[0], optarg_);
171 return false;
172 }
173 }
174
175 if ((c == 1) || (c == -1)) {
176 // standalone param or error
177 usage(argv[0], nullptr);
178 std::cerr << "ERROR: " << std::endl;
179 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
180 return false;
181 }
182
183 return true;
184}
185
186template <typename Type>
187bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage<Type> &I,
188 vpImage<uint16_t> &I_depth, std::vector<vpColVector> &pointcloud, vpHomogeneousMatrix &cMo)
189{
190#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
191 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
192 "Template function supports only unsigned char and vpRGBa images!");
193#endif
194#if defined(VISP_HAVE_DATASET)
195#if VISP_HAVE_DATASET_VERSION >= 0x030600
196 std::string ext("png");
197#else
198 std::string ext("pgm");
199#endif
200#else
201 // We suppose that the user will download a recent dataset
202 std::string ext("png");
203#endif
204 std::string image_filename = vpIoTools::formatString(input_directory + "/Images/Image_%04d." + ext, cpt);
205 std::string depth_filename = vpIoTools::formatString(input_directory + "/Depth/Depth_%04d.bin", cpt);
206 std::string pose_filename = vpIoTools::formatString(input_directory + "/CameraPose/Camera_%03d.txt", cpt);
207
208 if (!vpIoTools::checkFilename(image_filename) || !vpIoTools::checkFilename(depth_filename) ||
209 !vpIoTools::checkFilename(pose_filename))
210 return false;
211
212 vpImageIo::read(I, image_filename);
213
214 unsigned int depth_width = 0, depth_height = 0;
215 std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
216 if (!file_depth.is_open())
217 return false;
218
219 vpIoTools::readBinaryValueLE(file_depth, depth_height);
220 vpIoTools::readBinaryValueLE(file_depth, depth_width);
221 I_depth.resize(depth_height, depth_width);
222 pointcloud.resize(depth_height * depth_width);
223
224 const float depth_scale = 0.000030518f;
225 for (unsigned int i = 0; i < I_depth.getHeight(); i++) {
226 for (unsigned int j = 0; j < I_depth.getWidth(); j++) {
227 vpIoTools::readBinaryValueLE(file_depth, I_depth[i][j]);
228 double x = 0.0, y = 0.0, Z = I_depth[i][j] * depth_scale;
229 vpPixelMeterConversion::convertPoint(cam_depth, j, i, x, y);
230 vpColVector pt3d(4, 1.0);
231 pt3d[0] = x * Z;
232 pt3d[1] = y * Z;
233 pt3d[2] = Z;
234 pointcloud[i * I_depth.getWidth() + j] = pt3d;
235 }
236 }
237
238 std::ifstream file_pose(pose_filename.c_str());
239 if (!file_pose.is_open()) {
240 return false;
241 }
242
243 for (unsigned int i = 0; i < 4; i++) {
244 for (unsigned int j = 0; j < 4; j++) {
245 file_pose >> cMo[i][j];
246 }
247 }
248
249 return true;
250}
251
252void convert(const vpImage<vpRGBa> &src, vpImage<vpRGBa> &dst) { dst = src; }
253
254void convert(const vpImage<unsigned char> &src, vpImage<vpRGBa> &dst) { vpImageConvert::convert(src, dst); }
255
256template <typename Type>
257bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_display, bool useScanline,
258 int trackerType_image, int opt_lastFrame, bool use_depth, bool use_mask, bool save)
259{
260#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
261 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
262 "Template function supports only unsigned char and vpRGBa images!");
263#endif
264 // Initialise a display
265#if defined(VISP_HAVE_X11)
266 vpDisplayX display1, display2;
267#elif defined(VISP_HAVE_GDI)
268 vpDisplayGDI display1, display2;
269#elif defined(HAVE_OPENCV_HIGHGUI)
270 vpDisplayOpenCV display1, display2;
271#elif defined(VISP_HAVE_D3D9)
272 vpDisplayD3D display1, display2;
273#elif defined(VISP_HAVE_GTK)
274 vpDisplayGTK display1, display2;
275#else
276 opt_display = false;
277#endif
278
279 std::vector<int> tracker_type(2);
280 tracker_type[0] = trackerType_image;
282 vpMbGenericTracker tracker(tracker_type);
283
284#if defined(VISP_HAVE_PUGIXML)
285 std::string configFileCam1 = input_directory + std::string("/Config/chateau.xml");
286 std::string configFileCam2 = input_directory + std::string("/Config/chateau_depth.xml");
287 std::cout << "Load config file for camera 1: " << configFileCam1 << std::endl;
288 std::cout << "Load config file for camera 2: " << configFileCam2 << std::endl;
289 tracker.loadConfigFile(configFileCam1, configFileCam2);
290#else
291 // Corresponding parameters manually set to have an example code
292 {
294 cam_color.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
295 cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
296 tracker.setCameraParameters(cam_color, cam_depth);
297 }
298
299 // Edge
300 vpMe me;
301 me.setMaskSize(5);
302 me.setMaskNumber(180);
303 me.setRange(8);
305 me.setThreshold(5);
306 me.setMu1(0.5);
307 me.setMu2(0.5);
308 me.setSampleStep(5);
309 tracker.setMovingEdge(me);
310
311 // Klt
312#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
313 vpKltOpencv klt;
314 tracker.setKltMaskBorder(5);
315 klt.setMaxFeatures(10000);
316 klt.setWindowSize(5);
317 klt.setQuality(0.01);
318 klt.setMinDistance(5);
319 klt.setHarrisFreeParameter(0.02);
320 klt.setBlockSize(3);
321 klt.setPyramidLevels(3);
322
323 tracker.setKltOpencv(klt);
324#endif
325
326 // Depth
327 tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION);
328 tracker.setDepthNormalPclPlaneEstimationMethod(2);
329 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
330 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
331 tracker.setDepthNormalSamplingStep(2, 2);
332
333 tracker.setDepthDenseSamplingStep(4, 4);
334
335 tracker.setAngleAppear(vpMath::rad(85.0));
336 tracker.setAngleDisappear(vpMath::rad(89.0));
337 tracker.setNearClippingDistance(0.01);
338 tracker.setFarClippingDistance(2.0);
339 tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING);
340#endif
341
342#ifdef VISP_HAVE_COIN3D
343 tracker.loadModel(input_directory + "/Models/chateau.wrl", input_directory + "/Models/chateau.cao");
344#else
345 tracker.loadModel(input_directory + "/Models/chateau.cao", input_directory + "/Models/chateau.cao");
346#endif
348 T[0][0] = -1;
349 T[0][3] = -0.2;
350 T[1][1] = 0;
351 T[1][2] = 1;
352 T[1][3] = 0.12;
353 T[2][1] = 1;
354 T[2][2] = 0;
355 T[2][3] = -0.15;
356 tracker.loadModel(input_directory + "/Models/cube.cao", false, T);
358 tracker.getCameraParameters(cam_color, cam_depth);
359 tracker.setDisplayFeatures(true);
360 tracker.setScanLineVisibilityTest(useScanline);
361
362 std::map<int, std::pair<double, double> > map_thresh;
363 // Take the highest thresholds between all CI machines
364#ifdef VISP_HAVE_COIN3D
366 useScanline ? std::pair<double, double>(0.007, 6.) : std::pair<double, double>(0.008, 3.9);
367#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
369 useScanline ? std::pair<double, double>(0.007, 1.9) : std::pair<double, double>(0.007, 1.8);
371 useScanline ? std::pair<double, double>(0.005, 3.8) : std::pair<double, double>(0.007, 3.7);
372#endif
374 useScanline ? std::pair<double, double>(0.003, 1.7) : std::pair<double, double>(0.002, 0.8);
375#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
377 std::pair<double, double>(0.002, 0.3);
380 useScanline ? std::pair<double, double>(0.002, 1.8) : std::pair<double, double>(0.002, 0.7);
381#endif
382#else
384 useScanline ? std::pair<double, double>(0.015, 3.0) : std::pair<double, double>(0.009, 4.0);
385#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
387 useScanline ? std::pair<double, double>(0.006, 1.7) : std::pair<double, double>(0.005, 1.4);
389 useScanline ? std::pair<double, double>(0.004, 1.2) : std::pair<double, double>(0.004, 1.2);
390#endif
392 useScanline ? std::pair<double, double>(0.002, 0.7) : std::pair<double, double>(0.001, 0.4);
393#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
395 std::pair<double, double>(0.002, 0.3);
398 useScanline ? std::pair<double, double>(0.001, 0.5) : std::pair<double, double>(0.001, 0.4);
399#endif
400#endif
401
403 vpImage<uint16_t> I_depth_raw;
404 vpHomogeneousMatrix cMo_truth;
405 std::vector<vpColVector> pointcloud;
406 int cpt_frame = 1;
407 if (!read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
408 std::cerr << "Cannot read first frame!" << std::endl;
409 return EXIT_FAILURE;
410 }
411
412 vpImage<bool> mask(I.getHeight(), I.getWidth());
413 const double roi_step = 7.0;
414 const double roi_step2 = 6.0;
415 if (use_mask) {
416 mask = false;
417 for (unsigned int i = static_cast<unsigned int>(I.getRows() / roi_step);
418 i < static_cast<unsigned int>(I.getRows() * roi_step2 / roi_step); i++) {
419 for (unsigned int j = static_cast<unsigned int>(I.getCols() / roi_step);
420 j < static_cast<unsigned int>(I.getCols() * roi_step2 / roi_step); j++) {
421 mask[i][j] = true;
422 }
423 }
424 tracker.setMask(mask);
425 }
426
427 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
428
429 vpImage<vpRGBa> results(I.getHeight(), I.getWidth() + I_depth.getWidth());
430 vpImage<vpRGBa> resultsColor(I.getHeight(), I.getWidth());
431 vpImage<vpRGBa> resultsDepth(I_depth.getHeight(), I_depth.getWidth());
432 if (save) {
433 vpIoTools::makeDirectory("results");
434 }
435 if (opt_display) {
436#ifdef VISP_HAVE_DISPLAY
437 display1.init(I, 0, 0, "Image");
438 display2.init(I_depth, static_cast<int>(I.getWidth()), 0, "Depth");
439#endif
440 }
441
443 depth_M_color[0][3] = -0.05;
444 tracker.setCameraTransformationMatrix("Camera2", depth_M_color);
445 tracker.initFromPose(I, cMo_truth);
446
447 vpFont font(24);
448 bool click = false, quit = false, correct_accuracy = true;
449 std::vector<double> vec_err_t, vec_err_tu;
450 std::vector<double> time_vec;
451 while (read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
452 (opt_lastFrame > 0 ? static_cast<int>(cpt_frame) <= opt_lastFrame : true)) {
453 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
454
455 if (opt_display) {
457 vpDisplay::display(I_depth);
458 }
459 else if (save) {
460 convert(I, resultsColor);
461 convert(I_depth, resultsDepth);
462 }
463
464 double t = vpTime::measureTimeMs();
465 std::map<std::string, const vpImage<Type> *> mapOfImages;
466 mapOfImages["Camera1"] = &I;
467 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
468 mapOfPointclouds["Camera2"] = &pointcloud;
469 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
470 if (!use_depth) {
471 mapOfWidths["Camera2"] = 0;
472 mapOfHeights["Camera2"] = 0;
473 }
474 else {
475 mapOfWidths["Camera2"] = I_depth.getWidth();
476 mapOfHeights["Camera2"] = I_depth.getHeight();
477 }
478
479 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
480 vpHomogeneousMatrix cMo = tracker.getPose();
482 time_vec.push_back(t);
483
484 if (opt_display) {
485 tracker.display(I, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
486 vpDisplay::displayFrame(I, cMo, cam_depth, 0.05, vpColor::none, 3);
487 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
488
489 std::stringstream ss;
490 ss << "Frame: " << cpt_frame;
491 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
492 ss.str("");
493 ss << "Nb features: " << tracker.getError().getRows();
494 vpDisplay::displayText(I_depth, 40, 20, ss.str(), vpColor::red);
495 }
496 else if (save) {
498 std::map<std::string, std::vector<std::vector<double> > > mapOfModels;
499 std::map<std::string, unsigned int> mapOfW;
500 mapOfW["Camera1"] = I.getWidth();
501 mapOfW["Camera2"] = I_depth.getWidth();
502 std::map<std::string, unsigned int> mapOfH;
503 mapOfH["Camera1"] = I.getHeight();
504 mapOfH["Camera2"] = I_depth.getHeight();
505 std::map<std::string, vpHomogeneousMatrix> mapOfcMos;
506 mapOfcMos["Camera1"] = cMo;
507 mapOfcMos["Camera2"] = depth_M_color * cMo;
508 std::map<std::string, vpCameraParameters> mapOfCams;
509 mapOfCams["Camera1"] = cam_color;
510 mapOfCams["Camera2"] = cam_depth;
511 tracker.getModelForDisplay(mapOfModels, mapOfW, mapOfH, mapOfcMos, mapOfCams);
512 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfModels.begin();
513 it != mapOfModels.end(); ++it) {
514 for (size_t i = 0; i < it->second.size(); i++) {
515 // test if it->second[i][0] = 0
516 if (std::fabs(it->second[i][0]) <= std::numeric_limits<double>::epsilon()) {
517 vpImageDraw::drawLine(it->first == "Camera1" ? resultsColor : resultsDepth,
518 vpImagePoint(it->second[i][1], it->second[i][2]),
519 vpImagePoint(it->second[i][3], it->second[i][4]), vpColor::red, 3);
520 }
521 }
522 }
524
526 std::map<std::string, std::vector<std::vector<double> > > mapOfFeatures;
527 tracker.getFeaturesForDisplay(mapOfFeatures);
528 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfFeatures.begin();
529 it != mapOfFeatures.end(); ++it) {
530 for (size_t i = 0; i < it->second.size(); i++) {
531 if (std::fabs(it->second[i][0]) <=
532 std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 0 for ME
534 if (std::fabs(it->second[i][3]) <= std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 0
536 }
537 else if (std::fabs(it->second[i][3] - 1) <=
538 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 1
540 }
541 else if (std::fabs(it->second[i][3] - 2) <=
542 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 2
544 }
545 else if (std::fabs(it->second[i][3] - 3) <=
546 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 3
548 }
549 else if (std::fabs(it->second[i][3] - 4) <=
550 std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 4
552 }
553 vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
554 vpImagePoint(it->second[i][1], it->second[i][2]), 3, color, 1);
555 }
556 else if (std::fabs(it->second[i][0] - 1) <=
557 std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 1 for KLT
558 vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
559 vpImagePoint(it->second[i][1], it->second[i][2]), 10, vpColor::red, 1);
560 }
561 }
562 }
564
565 // Computation time
566 std::ostringstream oss;
567 oss << "Tracking time: " << t << " ms";
568 font.drawText(resultsColor, oss.str(), vpImagePoint(20, 20), vpColor::red);
569 }
570
571 vpPoseVector pose_est(cMo);
572 vpPoseVector pose_truth(cMo_truth);
573 vpColVector t_est(3), t_truth(3);
574 vpColVector tu_est(3), tu_truth(3);
575 for (unsigned int i = 0; i < 3; i++) {
576 t_est[i] = pose_est[i];
577 t_truth[i] = pose_truth[i];
578 tu_est[i] = pose_est[i + 3];
579 tu_truth[i] = pose_truth[i + 3];
580 }
581
582 vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
583 const double t_thresh =
584 map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].first;
585 const double tu_thresh =
586 map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].second;
587 double t_err2 = sqrt(t_err.sumSquare()), tu_err2 = vpMath::deg(sqrt(tu_err.sumSquare()));
588 vec_err_t.push_back(t_err2);
589 vec_err_tu.push_back(tu_err2);
590 if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) { // no accuracy test with mask
591 std::cerr << "Pose estimated exceeds the threshold (t_thresh = " << t_thresh << " ; tu_thresh = " << tu_thresh
592 << ")!" << std::endl;
593 std::cout << "t_err: " << t_err2 << " ; tu_err: " << tu_err2 << std::endl;
594 correct_accuracy = false;
595 }
596
597 if (opt_display) {
598 if (use_mask) {
599 vpRect roi(vpImagePoint(I.getRows() / roi_step, I.getCols() / roi_step),
600 vpImagePoint(I.getRows() * roi_step2 / roi_step, I.getCols() * roi_step2 / roi_step));
603 }
604
606 vpDisplay::flush(I_depth);
607 }
608 else if (save) {
610 results.insert(resultsColor, vpImagePoint());
611 results.insert(resultsDepth, vpImagePoint(0, resultsColor.getWidth()));
612 std::string filename = vpIoTools::formatString("results/image_%04d.png", cpt_frame);
613 vpImageIo::write(results, filename);
615 }
616
617 if (opt_display && opt_click_allowed) {
619 if (vpDisplay::getClick(I, button, click)) {
620 switch (button) {
622 quit = !click;
623 break;
624
626 click = !click;
627 break;
628
629 default:
630 break;
631 }
632 }
633 }
634
635 cpt_frame++;
636 }
637
638 if (!time_vec.empty())
639 std::cout << "Computation time, Mean: " << vpMath::getMean(time_vec)
640 << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
641 << std::endl;
642
643 if (!vec_err_t.empty())
644 std::cout << "Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
645
646 if (!vec_err_tu.empty())
647 std::cout << "Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
648
649 std::cout << "Test result: " << (correct_accuracy ? "success" : "failure") << std::endl;
650 return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
651}
652} // namespace
653
654int main(int argc, const char *argv[])
655{
656 try {
657 std::string env_ipath;
658 std::string opt_ipath = "";
659 bool opt_click_allowed = true;
660 bool opt_display = true;
661 bool opt_save = false;
662 bool useScanline = false;
663 int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
664#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
665 // To avoid Debian test timeout
666 int opt_lastFrame = 5;
667#else
668 int opt_lastFrame = -1;
669#endif
670 bool use_depth = false;
671 bool use_mask = false;
672 bool use_color_image = false;
673
674 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
675 // environment variable value
677
678 // Read the command line options
679 if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, opt_save, useScanline, trackerType_image,
680 opt_lastFrame, use_depth, use_mask, use_color_image)) {
681 return EXIT_FAILURE;
682 }
683
684#if ! (defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO))
685 if (trackerType_image == 2 || trackerType_image == 3) {
686 std::cout << "Using klt tracker is not possible without OpenCV imgproc and video modules." << std::endl;
687 std::cout << "Use rather command line option -t 1 to use edges." << std::endl;
688 return EXIT_SUCCESS;
689 }
690#endif
691 std::cout << "trackerType_image: " << trackerType_image << std::endl;
692 std::cout << "useScanline: " << useScanline << std::endl;
693 std::cout << "use_depth: " << use_depth << std::endl;
694 std::cout << "use_mask: " << use_mask << std::endl;
695 std::cout << "use_color_image: " << use_color_image << std::endl;
696#ifdef VISP_HAVE_COIN3D
697 std::cout << "COIN3D available." << std::endl;
698#endif
699
700#if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
701 if (trackerType_image & 2) {
702 std::cout << "KLT features cannot be used: ViSP is not built with "
703 "KLT module or OpenCV is not available.\nTest is not run."
704 << std::endl;
705 return EXIT_SUCCESS;
706 }
707#endif
708
709 // Test if an input path is set
710 if (opt_ipath.empty() && env_ipath.empty()) {
711 usage(argv[0], nullptr);
712 std::cerr << std::endl << "ERROR:" << std::endl;
713 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
714 << " environment variable to specify the location of the " << std::endl
715 << " image path where test images are located." << std::endl
716 << std::endl;
717
718 return EXIT_FAILURE;
719 }
720
721 std::string input_directory =
722 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu");
723 if (!vpIoTools::checkDirectory(input_directory)) {
724 std::cerr << "ViSP-images does not contain the folder: " << input_directory << "!" << std::endl;
725 return EXIT_SUCCESS;
726 }
727
728 if (use_color_image) {
729 return run<vpRGBa>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image, opt_lastFrame,
730 use_depth, use_mask, opt_save);
731 }
732 else {
733 return run<unsigned char>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image,
734 opt_lastFrame, use_depth, use_mask, opt_save);
735 }
736
737 std::cout << "Test succeed" << std::endl;
738 return EXIT_SUCCESS;
739 }
740 catch (const vpException &e) {
741 std::cout << "Catch an exception: " << e << std::endl;
742 return EXIT_FAILURE;
743 }
744}
745#elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
746int main()
747{
748 std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
749 return EXIT_SUCCESS;
750}
751#else
752int main()
753{
754 std::cout << "Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
755 return EXIT_SUCCESS;
756}
757#endif
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
double sumSquare() const
Class to define RGB colors available for display functionalities.
Definition vpColor.h:157
static const vpColor red
Definition vpColor.h:198
static const vpColor cyan
Definition vpColor.h:207
static const vpColor none
Definition vpColor.h:210
static const vpColor blue
Definition vpColor.h:204
static const vpColor purple
Definition vpColor.h:209
static const vpColor yellow
Definition vpColor.h:206
static const vpColor green
Definition vpColor.h:201
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:135
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="") VP_OVERRIDE
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
Font drawing functions for image.
Definition vpFont.h:55
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void drawLine(vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, unsigned char color, unsigned int thickness=1)
static void drawCross(vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, unsigned char color, unsigned int thickness=1)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
static std::string getViSPImagesDataPath()
static bool checkFilename(const std::string &filename)
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static bool checkDirectory(const std::string &dirname)
static std::string formatString(const std::string &name, unsigned int val)
static std::string createFilePath(const std::string &parent, const std::string &child)
static void makeDirectory(const std::string &dirname)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition vpKltOpencv.h:83
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Definition vpMath.h:129
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:343
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:374
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:323
static double deg(double rad)
Definition vpMath.h:119
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
Definition vpMe.h:143
void setMu1(const double &mu_1)
Definition vpMe.h:408
void setRange(const unsigned int &range)
Definition vpMe.h:438
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:531
void setMaskNumber(const unsigned int &mask_number)
Definition vpMe.cpp:555
void setThreshold(const double &threshold)
Definition vpMe.h:489
void setSampleStep(const double &sample_step)
Definition vpMe.h:445
void setMaskSize(const unsigned int &mask_size)
Definition vpMe.cpp:563
void setMu2(const double &mu_2)
Definition vpMe.h:415
@ NORMALIZED_THRESHOLD
Definition vpMe.h:154
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Defines a rectangle in the plane.
Definition vpRect.h:79
read_data(CameraParameters|None cam_depth, ImageGray I, rs.pipeline pipe)
VISP_EXPORT double measureTimeMs()