42#include <visp3/core/vpConfig.h>
44#if defined(VISP_HAVE_MODULE_MBT) && (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
46#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
50#include <visp3/core/vpIoTools.h>
51#include <visp3/gui/vpDisplayD3D.h>
52#include <visp3/gui/vpDisplayGDI.h>
53#include <visp3/gui/vpDisplayGTK.h>
54#include <visp3/gui/vpDisplayOpenCV.h>
55#include <visp3/gui/vpDisplayX.h>
56#include <visp3/io/vpImageIo.h>
57#include <visp3/io/vpParseArgv.h>
58#include <visp3/mbt/vpMbGenericTracker.h>
60#define GETOPTARGS "i:dcle:mCh"
62#ifdef ENABLE_VISP_NAMESPACE
68void usage(
const char *name,
const char *badparam)
71 Regression test for vpGenericTracker and depth.\n\
74 %s [-i <test image path>] [-c] [-d] [-h] [-l] \n\
75 [-e <last frame index>] [-m] [-C]\n",
80 -i <input image path> \n\
81 Set image input path.\n\
82 These images come from ViSP-images-x.y.z.tar.gz available \n\
83 on the ViSP website.\n\
84 Setting the VISP_INPUT_IMAGE_PATH environment\n\
85 variable produces the same behavior than using\n\
89 Turn off the display.\n\
92 Disable the mouse click. Useful to automate the \n\
93 execution of this program without human intervention.\n\
96 Use the scanline for visibility tests.\n\
98 -e <last frame index>\n\
99 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
102 Set a tracking mask.\n\
108 Print the help.\n\n");
111 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
114bool getOptions(
int argc,
const char **argv, std::string &ipath,
bool &click_allowed,
bool &display,
bool &useScanline,
115 int &lastFrame,
bool &use_mask,
bool &use_color_image)
126 click_allowed =
false;
135 lastFrame = atoi(optarg_);
141 use_color_image =
true;
144 usage(argv[0],
nullptr);
148 usage(argv[0], optarg_);
153 if ((c == 1) || (c == -1)) {
155 usage(argv[0],
nullptr);
156 std::cerr <<
"ERROR: " << std::endl;
157 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
164template <
typename Type>
168#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
169 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
170 "Template function supports only unsigned char and vpRGBa images!");
172#if defined(VISP_HAVE_DATASET)
173#if VISP_HAVE_DATASET_VERSION >= 0x030600
174 std::string ext(
"png");
176 std::string ext(
"pgm");
180 std::string ext(
"png");
193 std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
194 if (!file_depth.is_open())
199 I_depth.resize(depth_height, depth_width);
200 pointcloud.resize(depth_height * depth_width);
203 for (
unsigned int i = 0;
i <
I_depth.getHeight();
i++) {
204 for (
unsigned int j = 0;
j <
I_depth.getWidth();
j++) {
212 pointcloud[
i *
I_depth.getWidth() +
j] = pt3d;
216 std::ifstream file_pose(pose_filename.c_str());
217 if (!file_pose.is_open()) {
221 for (
unsigned int i = 0;
i < 4;
i++) {
222 for (
unsigned int j = 0;
j < 4;
j++) {
223 file_pose >>
cMo[
i][
j];
230template <
typename Type>
231bool run(
vpImage<Type> &I,
const std::string &input_directory,
bool opt_click_allowed,
bool opt_display,
232 bool useScanline,
int opt_lastFrame,
bool use_mask)
234#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
235 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
236 "Template function supports only unsigned char and vpRGBa images!");
239#if defined(VISP_HAVE_X11)
241#elif defined(VISP_HAVE_GDI)
243#elif defined(HAVE_OPENCV_HIGHGUI)
245#elif defined(VISP_HAVE_D3D9)
247#elif defined(VISP_HAVE_GTK)
253 std::vector<int> tracker_type;
257#if defined(VISP_HAVE_PUGIXML)
258 tracker.loadConfigFile(input_directory +
"/Config/chateau_depth.xml");
263 cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
264 tracker.setCameraParameters(cam_depth);
268 tracker.setDepthNormalPclPlaneEstimationMethod(2);
269 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
270 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
271 tracker.setDepthNormalSamplingStep(2, 2);
273 tracker.setDepthDenseSamplingStep(4, 4);
275#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
281 tracker.setNearClippingDistance(0.01);
282 tracker.setFarClippingDistance(2.0);
285 tracker.loadModel(input_directory +
"/Models/chateau.cao");
295 tracker.loadModel(input_directory +
"/Models/cube.cao",
false, T);
297 tracker.getCameraParameters(cam_depth);
298 tracker.setDisplayFeatures(
true);
299 tracker.setScanLineVisibilityTest(useScanline);
304 std::vector<vpColVector> pointcloud;
306 if (!
read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
307 std::cerr <<
"Cannot read first frame!" << std::endl;
312 const double roi_step = 7.0;
313 const double roi_step2 = 6.0;
316 for (
unsigned int i =
static_cast<unsigned int>(I.getRows() / roi_step);
317 i <
static_cast<unsigned int>(I.getRows() * roi_step2 / roi_step); i++) {
318 for (
unsigned int j =
static_cast<unsigned int>(I.getCols() / roi_step);
319 j <
static_cast<unsigned int>(I.getCols() * roi_step2 / roi_step); j++) {
328#ifdef VISP_HAVE_DISPLAY
329 display1.
init(I, 0, 0,
"Image");
330 display2.
init(I_depth,
static_cast<int>(I.getWidth()), 0,
"Depth");
336 tracker.initFromPose(I, depth_M_color * cMo_truth);
338 bool click =
false, quit =
false, correct_accuracy =
true;
339 std::vector<double> vec_err_t, vec_err_tu;
340 std::vector<double> time_vec;
341 while (
read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
342 (opt_lastFrame > 0 ?
static_cast<int>(cpt_frame) <= opt_lastFrame :
true)) {
351 std::map<std::string, const vpImage<Type> *> mapOfImages;
352 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
353 mapOfPointclouds[
"Camera"] = &pointcloud;
354 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
355 mapOfWidths[
"Camera"] =
I_depth.getWidth();
356 mapOfHeights[
"Camera"] =
I_depth.getHeight();
358 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
361 time_vec.push_back(t);
367 std::stringstream ss;
368 ss <<
"Frame: " << cpt_frame;
371 ss <<
"Nb features: " <<
tracker.getError().getRows();
379 for (
unsigned int i = 0;
i < 3;
i++) {
380 t_est[
i] = pose_est[
i];
381 t_truth[
i] = pose_truth[
i];
382 tu_est[
i] = pose_est[
i + 3];
383 tu_truth[
i] = pose_truth[
i + 3];
386 vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
388 vec_err_t.push_back(t_err2);
389 vec_err_tu.push_back(tu_err2);
390 const double t_thresh = useScanline ? 0.003 : 0.002;
391 const double tu_thresh = useScanline ? 0.5 : 0.4;
392 if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) {
393 std::cerr <<
"Pose estimated exceeds the threshold (t_thresh = " << t_thresh <<
", tu_thresh = " << tu_thresh
394 <<
")!" << std::endl;
395 std::cout <<
"t_err: " << t_err2 <<
" ; tu_err: " << tu_err2 << std::endl;
396 correct_accuracy =
false;
402 vpImagePoint(I.getRows() * roi_step2 / roi_step, I.getCols() * roi_step2 / roi_step));
410 if (opt_display && opt_click_allowed) {
431 if (!time_vec.empty())
436 if (!vec_err_t.empty())
437 std::cout <<
"Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
439 if (!vec_err_tu.empty())
440 std::cout <<
"Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
442 return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
446int main(
int argc,
const char *argv[])
449 std::string env_ipath;
450 std::string opt_ipath =
"";
451 bool opt_click_allowed =
true;
452 bool opt_display =
true;
453 bool useScanline =
false;
454#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
456 int opt_lastFrame = 5;
458 int opt_lastFrame = -1;
460 bool use_mask =
false;
461 bool use_color_image =
false;
468 if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask,
473 std::cout <<
"useScanline: " << useScanline << std::endl;
474 std::cout <<
"use_mask: " << use_mask << std::endl;
475 std::cout <<
"use_color_image: " << use_color_image << std::endl;
478 if (opt_ipath.empty() && env_ipath.empty()) {
479 usage(argv[0],
nullptr);
480 std::cerr << std::endl <<
"ERROR:" << std::endl;
481 std::cerr <<
" Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
482 <<
" environment variable to specify the location of the " << std::endl
483 <<
" image path where test images are located." << std::endl
489 std::string input_directory =
492 std::cerr <<
"ViSP-images does not contain the folder: " << input_directory <<
"!" << std::endl;
496 if (use_color_image) {
498 return run(I_color, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask);
502 return run(I_gray, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask);
506 std::cout <<
"Catch an exception: " <<
e << std::endl;
510#elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
513 std::cout <<
"Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
519 std::cout <<
"Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
static const vpColor none
static const vpColor yellow
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="") VP_OVERRIDE
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
static double rad(double deg)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Defines a rectangle in the plane.
read_data(CameraParameters|None cam_depth, ImageGray I, rs.pipeline pipe)
VISP_EXPORT double measureTimeMs()