37#include <visp3/core/vpConfig.h>
39#if defined(VISP_HAVE_CATCH2)
41#include <catch_amalgamated.hpp>
43#include <visp3/core/vpIoTools.h>
44#include <visp3/io/vpImageIo.h>
45#include <visp3/mbt/vpMbGenericTracker.h>
49#include <visp3/gui/vpDisplayX.h>
52#ifdef ENABLE_VISP_NAMESPACE
58bool runBenchmark =
false;
60template <
typename Type>
64 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
65 "Template function supports only unsigned char and vpRGBa images!");
66#if defined(VISP_HAVE_DATASET)
67#if VISP_HAVE_DATASET_VERSION >= 0x030600
68 std::string ext(
"png");
70 std::string ext(
"pgm");
74 std::string ext(
"png");
87 std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
88 if (!file_depth.is_open())
93 I_depth.resize(depth_height, depth_width);
94 pointcloud.resize(depth_height * depth_width);
97 for (
unsigned int i = 0;
i <
I_depth.getHeight();
i++) {
98 for (
unsigned int j = 0;
j <
I_depth.getWidth();
j++) {
106 pointcloud[
i *
I_depth.getWidth() +
j] = pt3d;
110 std::ifstream file_pose(pose_filename.c_str());
111 if (!file_pose.is_open()) {
115 for (
unsigned int i = 0;
i < 4;
i++) {
116 for (
unsigned int j = 0;
j < 4;
j++) {
117 file_pose >>
cMo[
i][
j];
125TEST_CASE(
"Benchmark generic tracker",
"[benchmark]")
128 std::vector<int> tracker_type(2);
133 const std::string input_directory =
136 const bool verbose =
false;
137#if defined(VISP_HAVE_PUGIXML)
138 const std::string configFileCam1 = input_directory + std::string(
"/Config/chateau.xml");
139 const std::string configFileCam2 = input_directory + std::string(
"/Config/chateau_depth.xml");
142 tracker.loadConfigFile(configFileCam1, configFileCam2, verbose);
146 cam_color.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
147 cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
148 tracker.setCameraParameters(cam_color, cam_depth);
164#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
180 tracker.setDepthNormalPclPlaneEstimationMethod(2);
181 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
182 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
183 tracker.setDepthNormalSamplingStep(2, 2);
185 tracker.setDepthDenseSamplingStep(4, 4);
189 tracker.setNearClippingDistance(0.01);
190 tracker.setFarClippingDistance(2.0);
195 tracker.loadModel(input_directory +
"/Models/chateau.cao", input_directory +
"/Models/chateau.cao", verbose);
206 tracker.loadModel(input_directory +
"/Models/cube.cao", verbose, T);
211 std::vector<vpColVector> pointcloud;
214 tracker.getCameraParameters(cam_color, cam_depth);
218 tracker.setCameraTransformationMatrix(
"Camera2", depth_M_color);
221 std::vector<vpImage<unsigned char> > images;
222 std::vector<vpImage<uint16_t> > depth_raws;
223 std::vector<std::vector<vpColVector> > pointclouds;
224 std::vector<vpHomogeneousMatrix> cMo_truth_all;
226 for (
int i = 1;
i <= 40;
i++) {
227 if (
read_data(input_directory, i, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
229 depth_raws.push_back(I_depth_raw);
230 pointclouds.push_back(pointcloud);
231 cMo_truth_all.push_back(cMo_truth);
235 for (
int i = 40;
i >= 1;
i--) {
236 if (
read_data(input_directory, i, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
238 depth_raws.push_back(I_depth_raw);
239 pointclouds.push_back(pointcloud);
240 cMo_truth_all.push_back(cMo_truth);
246 std::vector<std::map<std::string, int> > mapOfTrackerTypes;
247 mapOfTrackerTypes.push_back(
249 mapOfTrackerTypes.push_back(
251#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
252 mapOfTrackerTypes.push_back(
260 std::vector<std::string> benchmarkNames = {
262 "Edge + Depth dense MBT",
263#if defined(VISP_HAVE_OPENCV)
265 "KLT + depth dense MBT",
266 "Edge + KLT + depth dense MBT"
270 std::vector<bool> monoculars = {
273#if defined(VISP_HAVE_OPENCV)
280 for (
size_t idx = 0;
idx < mapOfTrackerTypes.size();
idx++) {
282 tracker.setTrackerType(mapOfTrackerTypes[idx]);
284 const bool verbose =
false;
285#if defined(VISP_HAVE_PUGIXML)
286 tracker.loadConfigFile(configFileCam1, configFileCam2, verbose);
290 cam_color.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
291 cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
292 tracker.setCameraParameters(cam_color, cam_depth);
308#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
324 tracker.setDepthNormalPclPlaneEstimationMethod(2);
325 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
326 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
327 tracker.setDepthNormalSamplingStep(2, 2);
329 tracker.setDepthDenseSamplingStep(4, 4);
333 tracker.setNearClippingDistance(0.01);
334 tracker.setFarClippingDistance(2.0);
337 tracker.loadModel(input_directory +
"/Models/chateau.cao", input_directory +
"/Models/chateau.cao", verbose);
338 tracker.loadModel(input_directory +
"/Models/cube.cao", verbose, T);
339 tracker.initFromPose(images.front(), cMo_truth_all.front());
341 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
342 mapOfWidths[
"Camera2"] = monoculars[
idx] ? 0 : I_depth_raw.
getWidth();
343 mapOfHeights[
"Camera2"] = monoculars[
idx] ? 0 : I_depth_raw.
getHeight();
347 BENCHMARK(benchmarkNames[idx].c_str())
353 vpDisplayX d_depth(I_depth, I.getWidth(), 0,
"Depth image");
354 tracker.setDisplayFeatures(
true);
357 tracker.initFromPose(images.front(), cMo_truth_all.front());
359 for (
size_t i = 0;
i < images.size();
i++) {
361 const std::vector<vpColVector> &pointcloud_current = pointclouds[
i];
370 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
371 mapOfImages[
"Camera1"] = &I_current;
373 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
374 mapOfPointclouds[
"Camera2"] = &pointcloud_current;
376 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
380 tracker.display(I, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
385 I, 40, 20, std::string(
"Nb features: " + std::to_string(
tracker.getError().getRows())),
vpColor::red);
403 for (
unsigned int i = 0;
i < 3;
i++) {
404 t_err[
i] = pose_est[
i] - pose_truth[
i];
405 tu_err[
i] = pose_est[
i + 3] - pose_truth[
i + 3];
408 const double max_translation_error = 0.006;
409 const double max_rotation_error = 0.03;
410 CHECK(sqrt(t_err.sumSquare()) < max_translation_error);
411 CHECK(sqrt(tu_err.sumSquare()) < max_rotation_error);
417int main(
int argc,
char *argv[])
419 Catch::Session session;
421 auto cli = session.cli()
422 | Catch::Clara::Opt(runBenchmark)
424 (
"run benchmark comparing naive code with ViSP implementation");
428 session.applyCommandLine(argc, argv);
429 int numFailed = session.run();
436int main() {
return EXIT_SUCCESS; }
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
static const vpColor none
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition of the vpImage class member functions.
unsigned int getWidth() const
unsigned int getHeight() const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
read_data(CameraParameters|None cam_depth, ImageGray I, rs.pipeline pipe)
VISP_EXPORT int wait(double t0, double t)