34#include <visp3/core/vpCPUFeatures.h>
35#include <visp3/mbt/vpMbtFaceDepthDense.h>
37#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
38#include <pcl/common/point_tests.h>
41#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
43#define VISP_HAVE_SSE2 1
47#if !defined(__FMA__) && defined(__AVX2__)
51#if defined _WIN32 && defined(_M_ARM64)
52#define _ARM64_DISTINCT_NEON_TYPES
55#define VISP_HAVE_NEON 1
56#elif (defined(__ARM_NEON__) || defined (__ARM_NEON)) && defined(__aarch64__)
58#define VISP_HAVE_NEON 1
60#define VISP_HAVE_NEON 0
63#define USE_SIMD_CODE 1
65#if VISP_HAVE_SSE2 && USE_SIMD_CODE
71#if VISP_HAVE_NEON && USE_SIMD_CODE
77#if defined(VISP_HAVE_OPENCV) && \
78 (VISP_HAVE_OPENCV_VERSION >= 0x040101 || (VISP_HAVE_OPENCV_VERSION < 0x040000 && VISP_HAVE_OPENCV_VERSION >= 0x030407)) && USE_SIMD_CODE
85#if (VISP_HAVE_OPENCV_VERSION >= 0x040B00) || (VISP_HAVE_OPENCV_VERSION < 0x040900) || \
86 ( (VISP_HAVE_OPENCV_VERSION >= 0x040900) && (VISP_HAVE_OPENCV_VERSION < 0x040B00) && (USE_SSE || USE_NEON) )
87# define USE_OPENCV_HAL 1
88# include <opencv2/core/simd_intrinsics.hpp>
89# include <opencv2/core/hal/intrin.hpp>
91# define USE_OPENCV_HAL 0
94# define USE_OPENCV_HAL 0
97#if !USE_OPENCV_HAL && (USE_SSE || USE_NEON)
98#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
105inline void v_load_deinterleave(
const uint64_t *ptr, __m128i &a, __m128i &b, __m128i &c)
107 __m128i t0 = _mm_loadu_si128((
const __m128i *)ptr);
108 __m128i
t1 = _mm_loadu_si128((
const __m128i *)(ptr + 2));
109 __m128i t2 = _mm_loadu_si128((
const __m128i *)(ptr + 4));
111 t1 = _mm_shuffle_epi32(t1, 0x4e);
113 a = _mm_unpacklo_epi64(t0, t1);
114 b = _mm_unpacklo_epi64(_mm_unpackhi_epi64(t0, t0), t2);
115 c = _mm_unpackhi_epi64(t1, t2);
118inline void v_load_deinterleave(
const double *ptr, __m128d &a0, __m128d &b0, __m128d &c0)
121 v_load_deinterleave((
const uint64_t *)ptr, a1, b1, c1);
122 a0 = _mm_castsi128_pd(a1);
123 b0 = _mm_castsi128_pd(b1);
124 c0 = _mm_castsi128_pd(c1);
127inline __m128d v_combine_low(
const __m128d &a,
const __m128d &b)
129 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
130 return _mm_castsi128_pd(_mm_unpacklo_epi64(a1, b1));
133inline __m128d v_combine_high(
const __m128d &a,
const __m128d &b)
135 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
136 return _mm_castsi128_pd(_mm_unpackhi_epi64(a1, b1));
139inline __m128d v_fma(
const __m128d &a,
const __m128d &b,
const __m128d &c)
142 return _mm_fmadd_pd(a, b, c);
144 return _mm_add_pd(_mm_mul_pd(a, b), c);
148inline void v_load_deinterleave(
const double *ptr, float64x2_t &a0, float64x2_t &b0, float64x2_t &c0)
150 float64x2x3_t
v = vld3q_f64(ptr);
156inline float64x2_t v_combine_low(
const float64x2_t &a,
const float64x2_t &b)
158 return vcombine_f64(vget_low_f64(a), vget_low_f64(b));
161inline float64x2_t v_combine_high(
const float64x2_t &a,
const float64x2_t &b)
163 return vcombine_f64(vget_high_f64(a), vget_high_f64(b));
166inline float64x2_t v_fma(
const float64x2_t &a,
const float64x2_t &b,
const float64x2_t &c)
168 return vfmaq_f64(c, a, b);
248 vpUniRand &rand_gen,
int polygon, std::string name)
251 PolygonLine polygon_line;
255 polygon_line.m_poly.
addPoint(0, P1);
256 polygon_line.m_poly.
addPoint(1, P2);
262 polygon_line.m_p1 = &polygon_line.m_poly.
p[0];
263 polygon_line.m_p2 = &polygon_line.m_poly.
p[1];
268 bool already_here =
false;
307#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
309 const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
310 unsigned int stepX,
unsigned int stepY
311#
if DEBUG_DISPLAY_DEPTH_DENSE
314 std::vector<std::vector<vpImagePoint> > &roiPts_vec
319 unsigned int width = point_cloud->width, height = point_cloud->height;
322 if (point_cloud->width == 0 || point_cloud->height == 0)
325 std::vector<vpImagePoint> roiPts;
326 double distanceToFace;
328#
if DEBUG_DISPLAY_DEPTH_DENSE
335 if (roiPts.size() <= 2) {
337 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
350 unsigned int top =
static_cast<unsigned int>(std::max<double>(0.0, bb.getTop()));
351 unsigned int bottom =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(height), std::max<double>(0.0, bb.getBottom())));
352 unsigned int left =
static_cast<unsigned int>(std::max<double>(0.0, bb.getLeft()));
353 unsigned int right =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(width), std::max<double>(0.0, bb.getRight())));
356 bb.setBottom(bottom);
360 if (bb.getHeight() < 0 || bb.getWidth() < 0) {
364 m_pointCloudFace.reserve(
static_cast<size_t>(bb.getWidth() * bb.getHeight()));
366 int totalTheoreticalPoints = 0, totalPoints = 0;
367 for (
unsigned int i = top; i < bottom; i += stepY) {
368 for (
unsigned int j = left; j < right; j += stepX) {
369 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
370 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
373 totalTheoreticalPoints++;
375 if (
vpMeTracker::inRoiMask(mask, i, j) && pcl::isFinite((*point_cloud)(j, i)) && (*point_cloud)(j, i).z > 0) {
382#if DEBUG_DISPLAY_DEPTH_DENSE
383 debugImage[i][j] = 255;
400 unsigned int height,
const std::vector<vpColVector> &point_cloud,
401 unsigned int stepX,
unsigned int stepY
402#
if DEBUG_DISPLAY_DEPTH_DENSE
405 std::vector<std::vector<vpImagePoint> > &roiPts_vec
412 if (width == 0 || height == 0)
415 std::vector<vpImagePoint> roiPts;
416 double distanceToFace;
418#
if DEBUG_DISPLAY_DEPTH_DENSE
425 if (roiPts.size() <= 2) {
427 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
440 unsigned int top =
static_cast<unsigned int>(std::max<double>(0.0, bb.getTop()));
441 unsigned int bottom =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(height), std::max<double>(0.0, bb.getBottom())));
442 unsigned int left =
static_cast<unsigned int>(std::max<double>(0.0, bb.getLeft()));
443 unsigned int right =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(width), std::max<double>(0.0, bb.getRight())));
446 bb.setBottom(bottom);
450 m_pointCloudFace.reserve(
static_cast<size_t>(bb.getWidth() * bb.getHeight()));
452 int totalTheoreticalPoints = 0, totalPoints = 0;
453 for (
unsigned int i = top; i < bottom; i += stepY) {
454 for (
unsigned int j = left; j < right; j += stepX) {
455 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
456 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
459 totalTheoreticalPoints++;
468#if DEBUG_DISPLAY_DEPTH_DENSE
469 debugImage[i][j] = 255;
485 unsigned int height,
const vpMatrix &point_cloud,
486 unsigned int stepX,
unsigned int stepY
487#
if DEBUG_DISPLAY_DEPTH_DENSE
490 std::vector<std::vector<vpImagePoint> > &roiPts_vec
497 if (width == 0 || height == 0)
500 std::vector<vpImagePoint> roiPts;
501 double distanceToFace;
503#
if DEBUG_DISPLAY_DEPTH_DENSE
510 if (roiPts.size() <= 2) {
512 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
525 unsigned int top =
static_cast<unsigned int>(std::max<double>(0.0, bb.getTop()));
526 unsigned int bottom =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(height), std::max<double>(0.0, bb.getBottom())));
527 unsigned int left =
static_cast<unsigned int>(std::max<double>(0.0, bb.getLeft()));
528 unsigned int right =
static_cast<unsigned int>(std::min<double>(
static_cast<double>(width), std::max<double>(0.0, bb.getRight())));
531 bb.setBottom(bottom);
535 m_pointCloudFace.reserve(
static_cast<size_t>(bb.getWidth() * bb.getHeight()));
537 int totalTheoreticalPoints = 0, totalPoints = 0;
538 for (
unsigned int i = top; i < bottom; i += stepY) {
539 for (
unsigned int j = left; j < right; j += stepX) {
540 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
541 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
544 totalTheoreticalPoints++;
553#if DEBUG_DISPLAY_DEPTH_DENSE
554 debugImage[i][j] = 255;
578 bool isvisible =
false;
582 int index = *itindex;
627#if defined(VISP_HAVE_SIMDLIB)
635#if !USE_SSE && !USE_NEON && !USE_OPENCV_HAL
640#if USE_SSE || USE_NEON || USE_OPENCV_HAL
644 double *ptr_L = L.data;
645 double *ptr_error = error.data;
648 const cv::v_float64x2 vnx = cv::v_setall_f64(nx);
649 const cv::v_float64x2 vny = cv::v_setall_f64(ny);
650 const cv::v_float64x2 vnz = cv::v_setall_f64(nz);
651 const cv::v_float64x2 vd = cv::v_setall_f64(D);
653 const __m128d vnx = _mm_set1_pd(nx);
654 const __m128d vny = _mm_set1_pd(ny);
655 const __m128d vnz = _mm_set1_pd(nz);
656 const __m128d vd = _mm_set1_pd(D);
658 const float64x2_t vnx = vdupq_n_f64(nx);
659 const float64x2_t vny = vdupq_n_f64(ny);
660 const float64x2_t vnz = vdupq_n_f64(nz);
661 const float64x2_t vd = vdupq_n_f64(D);
664 for (; cpt <=
m_pointCloudFace.size() - 6; cpt += 6, ptr_point_cloud += 6) {
666 cv::v_float64x2 vx, vy, vz;
667 cv::v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
669#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x040900)
670 cv::v_float64x2 va1 = cv::v_sub(cv::v_mul(vnz, vy), cv::v_mul(vny, vz));
671 cv::v_float64x2 va2 = cv::v_sub(cv::v_mul(vnx, vz), cv::v_mul(vnz, vx));
672 cv::v_float64x2 va3 = cv::v_sub(cv::v_mul(vny, vx), cv::v_mul(vnx, vy));
673#elif defined(VISP_HAVE_OPENCV)
674 cv::v_float64x2 va1 = vnz*vy - vny*vz;
675 cv::v_float64x2 va2 = vnx*vz - vnz*vx;
676 cv::v_float64x2 va3 = vny*vx - vnx*vy;
679 cv::v_float64x2 vnxy = cv::v_combine_low(vnx, vny);
680 cv::v_store(ptr_L, vnxy);
682 vnxy = cv::v_combine_low(vnz, va1);
683 cv::v_store(ptr_L, vnxy);
685 vnxy = cv::v_combine_low(va2, va3);
686 cv::v_store(ptr_L, vnxy);
689 vnxy = cv::v_combine_high(vnx, vny);
690 cv::v_store(ptr_L, vnxy);
692 vnxy = cv::v_combine_high(vnz, va1);
693 cv::v_store(ptr_L, vnxy);
695 vnxy = cv::v_combine_high(va2, va3);
696 cv::v_store(ptr_L, vnxy);
699#if (VISP_HAVE_OPENCV_VERSION >= 0x040900)
700 cv::v_float64x2 verr = cv::v_add(vd, cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, cv::v_mul(vnz, vz))));
702 cv::v_float64x2 verr = vd + cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, vnz*vz));
705 cv::v_store(ptr_error, verr);
709 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
711 __m128d va1 = _mm_sub_pd(_mm_mul_pd(vnz, vy), _mm_mul_pd(vny, vz));
712 __m128d va2 = _mm_sub_pd(_mm_mul_pd(vnx, vz), _mm_mul_pd(vnz, vx));
713 __m128d va3 = _mm_sub_pd(_mm_mul_pd(vny, vx), _mm_mul_pd(vnx, vy));
715 __m128d vnxy = v_combine_low(vnx, vny);
716 _mm_storeu_pd(ptr_L, vnxy);
718 vnxy = v_combine_low(vnz, va1);
719 _mm_storeu_pd(ptr_L, vnxy);
721 vnxy = v_combine_low(va2, va3);
722 _mm_storeu_pd(ptr_L, vnxy);
725 vnxy = v_combine_high(vnx, vny);
726 _mm_storeu_pd(ptr_L, vnxy);
728 vnxy = v_combine_high(vnz, va1);
729 _mm_storeu_pd(ptr_L, vnxy);
731 vnxy = v_combine_high(va2, va3);
732 _mm_storeu_pd(ptr_L, vnxy);
735 const __m128d verror = _mm_add_pd(vd, v_fma(vnx, vx, v_fma(vny, vy, _mm_mul_pd(vnz, vz))));
736 _mm_storeu_pd(ptr_error, verror);
739 float64x2_t vx, vy, vz;
740 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
742 float64x2_t va1 = vsubq_f64(vmulq_f64(vnz, vy), vmulq_f64(vny, vz));
743 float64x2_t va2 = vsubq_f64(vmulq_f64(vnx, vz), vmulq_f64(vnz, vx));
744 float64x2_t va3 = vsubq_f64(vmulq_f64(vny, vx), vmulq_f64(vnx, vy));
746 float64x2_t vnxy = v_combine_low(vnx, vny);
747 vst1q_f64(ptr_L, vnxy);
749 vnxy = v_combine_low(vnz, va1);
750 vst1q_f64(ptr_L, vnxy);
752 vnxy = v_combine_low(va2, va3);
753 vst1q_f64(ptr_L, vnxy);
756 vnxy = v_combine_high(vnx, vny);
757 vst1q_f64(ptr_L, vnxy);
759 vnxy = v_combine_high(vnz, va1);
760 vst1q_f64(ptr_L, vnxy);
762 vnxy = v_combine_high(va2, va3);
763 vst1q_f64(ptr_L, vnxy);
766 const float64x2_t verror = vaddq_f64(vd, v_fma(vnx, vx, v_fma(vny, vy, vmulq_f64(vnz, vz))));
767 vst1q_f64(ptr_error, verror);
778 double _a1 = (nz * y) - (ny * z);
779 double _a2 = (nx * z) - (nz * x);
780 double _a3 = (ny * x) - (nx * y);
783 L[
static_cast<unsigned int>(cpt / 3)][0] = nx;
784 L[
static_cast<unsigned int>(cpt / 3)][1] = ny;
785 L[
static_cast<unsigned int>(cpt / 3)][2] = nz;
786 L[
static_cast<unsigned int>(cpt / 3)][3] = _a1;
787 L[
static_cast<unsigned int>(cpt / 3)][4] = _a2;
788 L[
static_cast<unsigned int>(cpt / 3)][5] = _a3;
801 error[
static_cast<unsigned int>(cpt / 3)] = D + (normal.t() * pt);
812 unsigned int idx = 0;
818 double _a1 = (nz * y) - (ny * z);
819 double _a2 = (nx * z) - (nz * x);
820 double _a3 = (ny * x) - (nx * y);
834 error[idx] = D + (normal.
t() * pt);
840 std::vector<vpImagePoint> &roiPts
841#
if DEBUG_DISPLAY_DEPTH_DENSE
843 std::vector<std::vector<vpImagePoint> > &roiPts_vec
846 double &distanceToFace)
849 m_cam.computeFov(width, height);
853 it->m_p1->changeFrame(cMo);
854 it->m_p2->changeFrame(cMo);
858 it->m_poly.changeFrame(cMo);
859 it->m_poly.computePolygonClipped(
m_cam);
861 if (it->m_poly.polyClipped.size() == 2 &&
869 std::vector<std::pair<vpPoint, vpPoint> > linesLst;
870 m_hiddenFace->computeScanLineQuery(it->m_poly.polyClipped[0].first, it->m_poly.polyClipped[1].first, linesLst,
875 for (
unsigned int i = 0; i < linesLst.size(); i++) {
876 linesLst[i].first.project();
877 linesLst[i].second.project();
885 roiPts.push_back(ip1);
886 roiPts.push_back(ip2);
888 faceCentroid.
set_X(faceCentroid.
get_X() + linesLst[i].first.get_X() + linesLst[i].second.get_X());
889 faceCentroid.
set_Y(faceCentroid.
get_Y() + linesLst[i].first.get_Y() + linesLst[i].second.get_Y());
890 faceCentroid.
set_Z(faceCentroid.
get_Z() + linesLst[i].first.get_Z() + linesLst[i].second.get_Z());
892#if DEBUG_DISPLAY_DEPTH_DENSE
893 std::vector<vpImagePoint> roiPts_;
894 roiPts_.push_back(ip1);
895 roiPts_.push_back(ip2);
896 roiPts_vec.push_back(roiPts_);
900 if (linesLst.empty()) {
901 distanceToFace = std::numeric_limits<double>::max();
904 faceCentroid.
set_X(faceCentroid.
get_X() / (2 * linesLst.size()));
905 faceCentroid.
set_Y(faceCentroid.
get_Y() / (2 * linesLst.size()));
906 faceCentroid.
set_Z(faceCentroid.
get_Z() / (2 * linesLst.size()));
909 sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
920 std::vector<vpPoint> polygonsClipped;
921 m_polygon->getPolygonClipped(polygonsClipped);
923 if (polygonsClipped.empty()) {
924 distanceToFace = std::numeric_limits<double>::max();
929 for (
size_t i = 0; i < polygonsClipped.size(); i++) {
930 faceCentroid.
set_X(faceCentroid.
get_X() + polygonsClipped[i].get_X());
931 faceCentroid.
set_Y(faceCentroid.
get_Y() + polygonsClipped[i].get_Y());
932 faceCentroid.
set_Z(faceCentroid.
get_Z() + polygonsClipped[i].get_Z());
935 faceCentroid.
set_X(faceCentroid.
get_X() / polygonsClipped.size());
936 faceCentroid.
set_Y(faceCentroid.
get_Y() / polygonsClipped.size());
937 faceCentroid.
set_Z(faceCentroid.
get_Z() / polygonsClipped.size());
939 distanceToFace = sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
943#if DEBUG_DISPLAY_DEPTH_DENSE
944 roiPts_vec.push_back(roiPts);
951 bool displayFullModel)
953 std::vector<std::vector<double> > models =
956 for (
size_t i = 0; i < models.size(); i++) {
965 bool displayFullModel)
967 std::vector<std::vector<double> > models =
970 for (
size_t i = 0; i < models.size(); i++) {
1001 bool displayFullModel)
1003 std::vector<std::vector<double> > models;
1011 std::vector<std::vector<double> > lineModels =
1013 models.insert(models.end(), lineModels.begin(), lineModels.end());
1035 if (dx <= std::numeric_limits<double>::epsilon() && dy <= std::numeric_limits<double>::epsilon() &&
1036 dz <= std::numeric_limits<double>::epsilon())
1048 (*it)->setCameraParameters(camera);
1058 (*it)->useScanLine = v;
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Implementation of a matrix and operations on matrices.
Implementation of the polygons management for the model-based trackers.
bool isVisible(unsigned int i)
Manage the line of a polygon used in the model-based tracker.
void setIndex(unsigned int i)
vpPoint * p2
The second extremity.
std::list< int > Lindex_polygon
Index of the faces which contain the line.
void buildFrom(vpPoint &_p1, vpPoint &_p2, vpUniRand &rand_gen)
vpMbHiddenFaces< vpMbtPolygon > * hiddenface
Pointer to the list of faces.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
vpMbtPolygon & getPolygon()
bool useScanLine
Use scanline rendering.
vpPoint * p1
The first extremity.
void setCameraParameters(const vpCameraParameters &camera)
void setName(const std::string &line_name)
void setVisible(bool _isvisible)
void addPolygon(const int &index)
double m_depthDenseFilteringMinDist
Minimum distance threshold.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
void computeVisibilityDisplay()
virtual ~vpMbtFaceDepthDense()
bool m_isVisible
Visibility flag.
double m_distFarClip
Distance for near clipping.
std::vector< double > m_pointCloudFace
List of depth points inside the face.
vpPlane m_planeObject
Plane equation described in the object frame.
void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setCameraParameters(const vpCameraParameters &camera)
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void computeROI(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, std::vector< vpImagePoint > &roiPts, double &distanceToFace)
unsigned int getNbFeatures() const
bool samePoint(const vpPoint &P1, const vpPoint &P2) const
void computeInteractionMatrixAndResidu(const vpHomogeneousMatrix &cMo, vpMatrix &L, vpColVector &error)
void setScanLineVisibilityTest(bool v)
vpMbtPolygon * m_polygon
Polygon defining the face.
bool m_useScanLine
Scan line visibility.
bool m_isTrackedDepthDenseFace
Flag to define if the face should be tracked or not.
double m_depthDenseFilteringMaxDist
Maximum distance threshold.
vpMbtFaceDepthDense & operator=(const vpMbtFaceDepthDense &mbt_face)
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=nullptr)
std::vector< PolygonLine > m_polygonLines
Polygon lines used for scan-line visibility.
int m_depthDenseFilteringMethod
Method to use to consider or not the face.
@ DEPTH_OCCUPANCY_RATIO_FILTERING
unsigned int m_clippingFlag
Flags specifying which clipping to used.
std::vector< vpMbtDistanceLine * > m_listOfFaceLines
vpCameraParameters m_cam
Camera intrinsic parameters.
double m_depthDenseFilteringOccupancyRatio
Ratio between available depth points and theoretical number of points.
double m_distNearClip
Distance for near clipping.
void displayFeature(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05, unsigned int thickness=1)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
static bool inRoiMask(const vpImage< bool > *mask, unsigned int i, unsigned int j)
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
double get_Y() const
Get the point cY coordinate in the camera frame.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_X(double cX)
Set the point cX coordinate in the camera frame.
void set_Y(double cY)
Set the point cY coordinate in the camera frame.
double get_Z() const
Get the point cZ coordinate in the camera frame.
void set_Z(double cZ)
Set the point cZ coordinate in the camera frame.
double get_oY() const
Get the point oY coordinate in the object frame.
double get_X() const
Get the point cX coordinate in the camera frame.
Implements a 3D polygon with render functionalities like clipping.
void setFarClippingDistance(const double &dist)
void setNearClippingDistance(const double &dist)
vpPoint * p
corners in the object frame
virtual void setNbPoint(unsigned int nb)
void setClipping(const unsigned int &flags)
void addPoint(unsigned int n, const vpPoint &P)
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Defines a rectangle in the plane.
Class for generating random numbers with uniform probability density.
VISP_EXPORT bool checkSSE2()
VISP_EXPORT bool checkNeon()