#include <iostream>
#include <visp3/core/vpConfig.h>
#include <visp3/core/vpDisplay.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/gui/vpDisplayFactory.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_SEGMENTATION) && defined(VISP_HAVE_PCL_FILTERS) && defined(VISP_HAVE_PCL_COMMON)
#include <pcl/common/common.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#ifdef ENABLE_VISP_NAMESPACE
#endif
namespace
{
typedef enum DepthType
{
DEPTH_UNUSED = 0,
DEPTH_DENSE = 1,
DEPTH_NORMAL = 2,
DEPTH_COUNT = 3
}DepthType;
std::string depthTypeToString(const DepthType &type)
{
std::string name;
switch (type) {
case DEPTH_UNUSED:
name = "unused";
break;
case DEPTH_DENSE:
name = "dense";
break;
case DEPTH_NORMAL:
name = "normals";
break;
case DEPTH_COUNT:
default:
name = "unknown";
break;
}
return name;
}
DepthType depthTypeFromString(const std::string &name)
{
DepthType type(DEPTH_COUNT);
unsigned int i = 0;
bool notFound = true;
while ((i < static_cast<unsigned int>(DEPTH_COUNT)) && notFound) {
DepthType candidate = static_cast<DepthType>(i);
notFound = false;
type = candidate;
}
++i;
}
return type;
}
std::string getDepthTypeList(const std::string &prefix = "<", const std::string &sep = " , ", const std::string &suffix = ">")
{
std::string list(prefix);
unsigned int i = 0;
while (i < static_cast<unsigned int>(DEPTH_COUNT - 1)) {
DepthType type = static_cast<DepthType>(i);
std::string name = depthTypeToString(type);
list += name + sep;
++i;
}
DepthType type = static_cast<DepthType>(DEPTH_COUNT - 1);
std::string name = depthTypeToString(type);
list += name + suffix;
return list;
}
struct vpRealsenseIntrinsics_t
{
float ppx;
float ppy;
float fx;
float fy;
float coeffs[5];
};
void rs_deproject_pixel_to_point(float point[3], const vpRealsenseIntrinsics_t &intrin, const float pixel[2], float depth)
{
float x = (pixel[0] - intrin.
ppx) / intrin.
fx;
float y = (pixel[1] - intrin.
ppy) / intrin.
fy;
float r2 = x * x + y * y;
float f = 1 + intrin.
coeffs[0] * r2 + intrin.coeffs[1] * r2 * r2 + intrin.coeffs[4] * r2 * r2 * r2;
float ux = x * f + 2 * intrin.coeffs[2] * x * y + intrin.coeffs[3] * (r2 + 2 * x * x);
float uy = y * f + 2 * intrin.coeffs[3] * x * y + intrin.coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
bool read_data(
unsigned int cpt,
const std::string &input_directory,
vpImage<vpRGBa> &I_color,
{
std::cerr << "Cannot read: " << filename_color << std::endl;
return false;
}
std::ifstream file_depth(filename_depth.c_str(), std::ios::in | std::ios::binary);
if (!file_depth.is_open()) {
return false;
}
unsigned int height = 0, width = 0;
I_depth_raw.
resize(height, width);
uint16_t depth_value = 0;
for (unsigned int i = 0; i < height; i++) {
for (unsigned int j = 0; j < width; j++) {
I_depth_raw[i][j] = depth_value;
}
}
pointcloud->width = width;
pointcloud->height = height;
pointcloud->resize(static_cast<size_t>(width * height));
const float depth_scale = 0.00100000005f;
vpRealsenseIntrinsics_t depth_intrinsic;
depth_intrinsic.ppx = 320.503509521484f;
depth_intrinsic.ppy = 235.602951049805f;
depth_intrinsic.fx = 383.970001220703f;
depth_intrinsic.fy = 383.970001220703f;
depth_intrinsic.coeffs[0] = 0.0f;
depth_intrinsic.coeffs[1] = 0.0f;
depth_intrinsic.coeffs[2] = 0.0f;
depth_intrinsic.coeffs[3] = 0.0f;
depth_intrinsic.coeffs[4] = 0.0f;
for (unsigned int i = 0; i < height; i++) {
for (unsigned int j = 0; j < width; j++) {
float scaled_depth = I_depth_raw[i][j] * depth_scale;
float point[3];
float pixel[2] = { static_cast<float>(j), static_cast<float>(i) };
rs_deproject_pixel_to_point(point, depth_intrinsic, pixel, scaled_depth);
pointcloud->points[static_cast<size_t>(i * width + j)].x = point[0];
pointcloud->points[static_cast<size_t>(i * width + j)].y = point[1];
pointcloud->points[static_cast<size_t>(i * width + j)].z = point[2];
}
}
return true;
}
}
int main(int argc, char *argv[])
{
std::string input_directory = "data";
std::string config_color = "model/cube/cube.xml", config_depth = "model/cube/cube_depth.xml";
std::string model_color = "model/cube/cube.cao", model_depth = "model/cube/cube.cao";
std::string init_file = "model/cube/cube.init";
unsigned int frame_cpt = 0;
DepthType use_depth = DEPTH_DENSE;
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--input_directory" && i + 1 < argc) {
input_directory = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
config_color = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
config_depth = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
model_color = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
model_depth = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
init_file = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
use_depth = depthTypeFromString(std::string(argv[i + 1]));
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: \n"
<< argv[0]
<< " --input_directory <data directory> --config_color <object.xml> --config_depth <object.xml>"
" --model_color <object.cao> --model_depth <object.cao> --init_file <object.init> --use_depth " << getDepthTypeList()
<< std::endl;
std::cout
<< "\nExample:\n"
<< argv[0]
<< " --config_color model/cube/cube.xml --config_depth model/cube/cube.xml"
" --model_color model/cube/cube.cao --model_depth model/cube/cube.cao --init_file model/cube/cube.init --use_depth "<< depthTypeToString(use_depth) <<"\n"
<< std::endl;
return EXIT_SUCCESS;
}
}
std::cout << "Tracked features: " << std::endl;
#if defined(VISP_HAVE_OPENCV)
std::cout << " Use edges : 1" << std::endl;
#if defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
std::cout << " Use klt : 1" << std::endl;
#else
std::cout << " Use klt : 0" << std::endl;
#endif
std::cout << " Use depth : " << depthTypeToString(use_depth) << std::endl;
#else
std::cout << " Use edges : 1" << std::endl;
std::cout << " Use klt : 0" << std::endl;
std::cout << " Use depth : 0" << std::endl;
#endif
std::cout << "Config files: " << std::endl;
std::cout << " Input directory: "
<< "\"" << input_directory << "\"" << std::endl;
std::cout << " Config color: "
<< "\"" << config_color << "\"" << std::endl;
std::cout << " Config depth: "
<< "\"" << config_depth << "\"" << std::endl;
std::cout << " Model color : "
<< "\"" << model_color << "\"" << std::endl;
std::cout << " Model depth : "
<< "\"" << model_depth << "\"" << std::endl;
std::cout << " Init file : "
<< "\"" << init_file << "\"" << std::endl;
pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZ>());
read_data(frame_cpt, input_directory, I_color, I_depth_raw, pointcloud);
#if defined(VISP_HAVE_DISPLAY)
#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
#else
#endif
unsigned int _posx = 100, _posy = 50, _posdx = 10;
display1->init(I_gray, _posx, _posy, "Color stream");
display2->init(I_depth, _posx + I_gray.
getWidth() + _posdx, _posy,
"Depth stream");
#endif
std::vector<int> trackerTypes;
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
#else
#endif
if (use_depth == DEPTH_DENSE) {
}
else if (use_depth == DEPTH_NORMAL) {
}
#if defined(VISP_HAVE_PUGIXML)
tracker.loadConfigFile(config_color, config_depth);
#else
{
cam_color.initPersProjWithoutDistortion(614.9, 614.9, 320.2, 241.5);
cam_depth.initPersProjWithoutDistortion(384.0, 384.0, 320.5, 235.6);
tracker.setCameraParameters(cam_color, cam_depth);
}
#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
#endif
tracker.setDepthNormalPclPlaneEstimationMethod(2);
tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
tracker.setDepthNormalSamplingStep(2, 2);
tracker.setDepthDenseSamplingStep(4, 4);
tracker.setNearClippingDistance(0.001);
tracker.setFarClippingDistance(5.0);
#endif
tracker.loadModel(model_color, model_depth);
tracker.getCameraParameters(cam_color, cam_depth);
std::cout <<
"Camera parameters for color camera (from XML file): " <<
cam_color << std::endl;
std::cout <<
"Camera parameters for depth camera (from XML file): " <<
cam_depth << std::endl;
{
std::ifstream file((std::string(input_directory + "/depth_M_color.txt")).c_str());
file.close();
}
std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
mapOfImages["Camera1"] = &I_gray;
std::map<std::string, std::string> mapOfInitFiles;
mapOfInitFiles["Camera1"] = init_file;
tracker.initClick(mapOfImages, mapOfInitFiles,
true);
mapOfImages.clear();
pcl::PointCloud<pcl::PointXYZ>::Ptr empty_pointcloud(new pcl::PointCloud<pcl::PointXYZ>);
std::vector<double> times_vec;
try {
bool quit = false;
while (!quit) {
quit = !
read_data(frame_cpt, input_directory, I_color, I_depth_raw, pointcloud);
mapOfImages["Camera1"] = &I_gray;
std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
if (use_depth == DEPTH_UNUSED) {
mapOfPointclouds["Camera2"] = empty_pointcloud;
}
else {
mapOfPointclouds["Camera2"] = pointcloud;
}
tracker.track(mapOfImages, mapOfPointclouds);
std::cout <<
"iter: " << frame_cpt <<
" cMo:\n" <<
cMo << std::endl;
tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
times_vec.push_back(t);
std::stringstream ss;
ss <<
"Computation time: " <<
t <<
" ms";
{
std::stringstream ss;
ss <<
"Nb features: " <<
tracker.getError().size();
}
{
std::stringstream ss;
ss <<
"Features: edges " <<
tracker.getNbFeaturesEdge() <<
", klt " <<
tracker.getNbFeaturesKlt() <<
", depth "
<<
tracker.getNbFeaturesDepthDense();
}
quit = true;
}
frame_cpt++;
}
}
std::cout <<
"Catch exception: " <<
e.getStringMessage() << std::endl;
}
#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) && defined(VISP_HAVE_DISPLAY)
if (display1 != nullptr) {
delete display1;
}
if (display2 != nullptr) {
delete display2;
}
#endif
<< std::endl;
return EXIT_SUCCESS;
}
#else
int main()
{
std::cout << "To run this tutorial, ViSP should be build with PCL library."
" Install libpcl, configure and build again ViSP..."
<< std::endl;
return EXIT_SUCCESS;
}
#endif
Generic class defining intrinsic camera parameters.
static const vpColor none
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
unsigned int getHeight() const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
read_data(CameraParameters|None cam_depth, ImageGray I, rs.pipeline pipe)
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()