32#include <visp3/core/vpConfig.h>
34#if defined(VISP_HAVE_NLOHMANN_JSON)
36#include <visp3/core/vpException.h>
37#include <visp3/core/vpImageException.h>
38#include <visp3/core/vpIoTools.h>
39#include <visp3/core/vpRGBa.h>
41#include <visp3/io/vpVideoReader.h>
42#include <visp3/io/vpVideoWriter.h>
44#include <visp3/ar/vpPanda3DFrameworkManager.h>
46#include <visp3/rbt/vpRBTracker.h>
48#ifdef ENABLE_VISP_NAMESPACE
52#include "render-based-tutorial-utils.h"
62 .addArgument(
"--color",
colorSequence,
true,
"The color sequence (in video reader format, eg., /path/to/I\%04d.png)")
63 .addArgument(
"--depth",
depthFolder,
false,
"The depth images associated to the color sequence. Frames should be aligned")
64 .addArgument(
"--start",
startFrame,
false,
"The first frame of the sequence")
65 .addArgument(
"--step",
frameStep,
false,
"How many frames should be read between calls to the tracker")
66 .addFlag(
"--step-by-step",
stepByStep,
"Go through the sequence interactively, frame by frame");
83int main(
int argc,
const char **argv)
85 vpRBTrackerTutorial::BaseArguments baseArgs;
87 vpRBTrackerTutorial::vpRBExperimentLogger logger;
88 vpRBTrackerTutorial::vpRBExperimentPlotter plotter;
90 vpJsonArgumentParser parser(
"Tutorial showing how to use the Render-Based Tracker on an offline sequence",
"--config",
"/");
91 baseArgs.registerArguments(parser);
93 logger.registerArguments(parser);
94 plotter.registerArguments(parser);
96 parser.parse(argc, argv);
98 baseArgs.postProcessArguments();
100 plotter.postProcessArguments(baseArgs.display);
102 if (baseArgs.enableRenderProfiling) {
103 vpRBTrackerTutorial::enableRendererProfiling();
106 baseArgs.display =
true;
115 tracker.loadConfigurationFile(baseArgs.trackerConfiguration);
122 std::cout <<
"Input video" << std::endl;
123 std::cout <<
" Filename : " << sequenceArgs.
colorSequence << std::endl;
126 std::cout <<
" First frame: " << sequenceArgs.
startFrame << std::endl;
129 readerRGB.
open(Icol);
139 std::cout <<
" Image size : " <<
width <<
" x " <<
height << std::endl;
153 std::vector<std::shared_ptr<vpDisplay>> displays, debugDisplays;
155 if (baseArgs.display) {
156 displays = vpRBTrackerTutorial::createDisplays(Id, Icol, depthDisplay, IProbaDisplay);
157 if (baseArgs.debugDisplay) {
162 "Normals in object frame", InormDisplay,
163 "Depth canny", ICannyDisplay,
164 "Color render", IRender
172 nlohmann::json result = nlohmann::json::array();
175 std::cout <<
"Starting init" << std::endl;
177 if (baseArgs.hasInlineInit()) {
178 tracker.setPose(baseArgs.cMoInit);
180 else if (baseArgs.display) {
181 tracker.initClick(Id, baseArgs.initFile,
true);
187 if (baseArgs.display) {
192 unsigned int iter = 1;
199 for (
unsigned int sp = 0; sp < sequenceArgs.
frameStep; ++sp) {
206 float scale = 9.999999747378752e-05;
207 depth.resize(dataArray.getHeight(), dataArray.getWidth());
208 depthDisplay.resize(dataArray.getHeight(), dataArray.getWidth());
209#ifdef VISP_HAVE_OPENMP
210#pragma omp parallel for
212 for (
int i = 0; i < static_cast<int>(dataArray.getSize()); ++
i) {
213 float value =
static_cast<float>(dataArray.bitmap[
i]) * scale;
215 depthDisplay.bitmap[
i] = value > baseArgs.maxDepthDisplay ? 0.f :
static_cast<unsigned char>((
depth.bitmap[
i] / baseArgs.maxDepthDisplay) * 255.f);
223 if (
depth.getSize() == 0) {
224 trackingResult =
tracker.track(Id, Icol);
227 trackingResult =
tracker.track(Id, Icol, depth);
231 case vpRBTrackingStoppingReason::EXCEPTION:
233 std::cout <<
"Encountered an exception during tracking, pose was not updated" << std::endl;
236 case vpRBTrackingStoppingReason::NOT_ENOUGH_FEATURES:
238 std::cout <<
"There were not enough feature to perform tracking" << std::endl;
241 case vpRBTrackingStoppingReason::OBJECT_NOT_IN_IMAGE:
243 std::cout <<
"Object is not in image" << std::endl;
246 case vpRBTrackingStoppingReason::CONVERGENCE_CRITERION:
248 std::cout <<
"Convergence criterion reached:" << std::endl;
249 std::cout <<
"- Num iterations: " << trackingResult.
getNumIterations() << std::endl;
253 case vpRBTrackingStoppingReason::MAX_ITERS:
264 if (baseArgs.display) {
265 if (baseArgs.debugDisplay) {
268 vpRBTrackerTutorial::displayNormals(lastFrame.
renders.
normals, InormDisplay);
282 tracker.display(Id, Icol, depthDisplay);
283 tracker.displayMask(IProbaDisplay);
289 if (
depth.getSize() > 0) {
296 result.push_back(cMo);
298 logger.logFrame(tracker, iter, Id, Icol, depthDisplay, IProbaDisplay);
300 if (sequenceArgs.
stepByStep && baseArgs.display) {
304 std::cout <<
"Iter: " <<
iter << std::endl;
309 std::cout <<
"End of video reached" << std::endl;
313 std::cout <<
"Frame took: " << frameEnd - frameStart <<
"ms" << std::endl;
314 plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
330 std::cout <<
"This tutorial requires nlohmann_json 3rdparty." << std::endl;
Generic class defining intrinsic camera parameters.
static const vpColor none
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
@ notImplementedError
Not implemented.
unsigned int getWidth() const
Return the number of columns in the image.
unsigned int getHeight() const
Return the number of rows in the image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
unsigned int getSize() const
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
static vpPanda3DFrameworkManager & getInstance()
Class implementing the Render-Based Tracker (RBT).
vpRBTrackingStoppingReason getStoppingReason() const
const std::vector< double > & getConvergenceMetricValues() const
unsigned int getNumIterations() const
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
long getFirstFrameIndex()
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
VISP_EXPORT NpyArray npy_load(const std::string &fname)
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT double measureTimeMs()
void registerArguments(vpJsonArgumentParser &parser)
std::string colorSequence
void postProcessArguments()
std::vector< size_t > shape
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.
vpImage< float > silhouetteCanny