36#include <visp3/core/vpCPUFeatures.h>
37#include <visp3/mbt/vpMbtFaceDepthDense.h>
40#include <pcl/common/point_tests.h>
43#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
45#define VISP_HAVE_SSE2 1
49#if !defined(__FMA__) && defined(__AVX2__)
53#if defined _WIN32 && defined(_M_ARM64)
54#define _ARM64_DISTINCT_NEON_TYPES
57#define VISP_HAVE_NEON 1
58#elif (defined(__ARM_NEON__) || defined (__ARM_NEON)) && defined(__aarch64__)
60#define VISP_HAVE_NEON 1
63#define USE_SIMD_CODE 1
65#if VISP_HAVE_SSE2 && USE_SIMD_CODE
71#if VISP_HAVE_NEON && USE_SIMD_CODE
77#if (VISP_HAVE_OPENCV_VERSION >= 0x040101 || (VISP_HAVE_OPENCV_VERSION < 0x040000 && VISP_HAVE_OPENCV_VERSION >= 0x030407)) && USE_SIMD_CODE
78#define USE_OPENCV_HAL 1
79#include <opencv2/core/simd_intrinsics.hpp>
80#include <opencv2/core/hal/intrin.hpp>
83#if !USE_OPENCV_HAL && (USE_SSE || USE_NEON)
84#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
91inline void v_load_deinterleave(
const uint64_t *ptr, __m128i& a, __m128i& b, __m128i& c)
93 __m128i t0 = _mm_loadu_si128((
const __m128i*)ptr);
94 __m128i t1 = _mm_loadu_si128((
const __m128i*)(ptr + 2));
95 __m128i t2 = _mm_loadu_si128((
const __m128i*)(ptr + 4));
97 t1 = _mm_shuffle_epi32(t1, 0x4e);
99 a = _mm_unpacklo_epi64(t0, t1);
100 b = _mm_unpacklo_epi64(_mm_unpackhi_epi64(t0, t0), t2);
101 c = _mm_unpackhi_epi64(t1, t2);
104inline void v_load_deinterleave(
const double* ptr, __m128d& a0, __m128d& b0, __m128d& c0)
107 v_load_deinterleave((
const uint64_t*)ptr, a1, b1, c1);
108 a0 = _mm_castsi128_pd(a1);
109 b0 = _mm_castsi128_pd(b1);
110 c0 = _mm_castsi128_pd(c1);
113inline __m128d v_combine_low(
const __m128d& a,
const __m128d& b)
115 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
116 return _mm_castsi128_pd(_mm_unpacklo_epi64(a1, b1));
119inline __m128d v_combine_high(
const __m128d& a,
const __m128d& b)
121 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
122 return _mm_castsi128_pd(_mm_unpackhi_epi64(a1, b1));
125inline __m128d v_fma(
const __m128d& a,
const __m128d& b,
const __m128d& c)
128 return _mm_fmadd_pd(a, b, c);
130 return _mm_add_pd(_mm_mul_pd(a, b), c);
134inline void v_load_deinterleave(
const double* ptr, float64x2_t& a0, float64x2_t& b0, float64x2_t& c0)
136 float64x2x3_t v = vld3q_f64(ptr);
142inline float64x2_t v_combine_low(
const float64x2_t& a,
const float64x2_t& b)
144 return vcombine_f64(vget_low_f64(a), vget_low_f64(b));
147inline float64x2_t v_combine_high(
const float64x2_t& a,
const float64x2_t& b)
149 return vcombine_f64(vget_high_f64(a), vget_high_f64(b));
152inline float64x2_t v_fma(
const float64x2_t& a,
const float64x2_t& b,
const float64x2_t& c)
154 return vfmaq_f64(c, a, b);
161 : m_cam(), m_clippingFlag(
vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL),
162 m_planeObject(), m_polygon(NULL), m_useScanLine(false),
163 m_depthDenseFilteringMethod(DEPTH_OCCUPANCY_RATIO_FILTERING), m_depthDenseFilteringMaxDist(3.0),
164 m_depthDenseFilteringMinDist(0.8), m_depthDenseFilteringOccupancyRatio(0.3), m_isTrackedDepthDenseFace(true),
165 m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), m_pointCloudFace(), m_polygonLines()
191 vpUniRand &rand_gen,
int polygon, std::string name)
194 PolygonLine polygon_line;
197 polygon_line.m_poly.setNbPoint(2);
198 polygon_line.m_poly.addPoint(0, P1);
199 polygon_line.m_poly.addPoint(1, P2);
205 polygon_line.m_p1 = &polygon_line.m_poly.p[0];
206 polygon_line.m_p2 = &polygon_line.m_poly.p[1];
211 bool already_here =
false;
252 const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
253 unsigned int stepX,
unsigned int stepY
254#
if DEBUG_DISPLAY_DEPTH_DENSE
257 std::vector<std::vector<vpImagePoint> > &roiPts_vec
262 unsigned int width = point_cloud->width, height = point_cloud->height;
265 if (point_cloud->width == 0 || point_cloud->height == 0)
268 std::vector<vpImagePoint> roiPts;
269 double distanceToFace;
271#
if DEBUG_DISPLAY_DEPTH_DENSE
278 if (roiPts.size() <= 2) {
280 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
293 unsigned int top = (
unsigned int)std::max(0.0, bb.
getTop());
294 unsigned int bottom = (
unsigned int)std::min((
double)height, std::max(0.0, bb.
getBottom()));
295 unsigned int left = (
unsigned int)std::max(0.0, bb.
getLeft());
296 unsigned int right = (
unsigned int)std::min((
double)width, std::max(0.0, bb.
getRight()));
309 int totalTheoreticalPoints = 0, totalPoints = 0;
310 for (
unsigned int i = top; i < bottom; i += stepY) {
311 for (
unsigned int j = left; j < right; j += stepX) {
312 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
313 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
316 totalTheoreticalPoints++;
318 if (
vpMeTracker::inMask(mask, i, j) && pcl::isFinite((*point_cloud)(j, i)) && (*point_cloud)(j, i).z > 0) {
325#if DEBUG_DISPLAY_DEPTH_DENSE
326 debugImage[i][j] = 255;
343 unsigned int height,
const std::vector<vpColVector> &point_cloud,
344 unsigned int stepX,
unsigned int stepY
345#
if DEBUG_DISPLAY_DEPTH_DENSE
348 std::vector<std::vector<vpImagePoint> > &roiPts_vec
355 if (width == 0 || height == 0)
358 std::vector<vpImagePoint> roiPts;
359 double distanceToFace;
361#
if DEBUG_DISPLAY_DEPTH_DENSE
368 if (roiPts.size() <= 2) {
370 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
383 unsigned int top = (
unsigned int)std::max(0.0, bb.
getTop());
384 unsigned int bottom = (
unsigned int)std::min((
double)height, std::max(0.0, bb.
getBottom()));
385 unsigned int left = (
unsigned int)std::max(0.0, bb.
getLeft());
386 unsigned int right = (
unsigned int)std::min((
double)width, std::max(0.0, bb.
getRight()));
395 int totalTheoreticalPoints = 0, totalPoints = 0;
396 for (
unsigned int i = top; i < bottom; i += stepY) {
397 for (
unsigned int j = left; j < right; j += stepX) {
398 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
399 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
402 totalTheoreticalPoints++;
411#if DEBUG_DISPLAY_DEPTH_DENSE
412 debugImage[i][j] = 255;
436 bool isvisible =
false;
440 int index = *itindex;
487#if !USE_SSE && !USE_NEON && !USE_OPENCV_HAL
492#if USE_SSE || USE_NEON|| USE_OPENCV_HAL
496 double *ptr_L = L.data;
497 double *ptr_error = error.
data;
500 const cv::v_float64x2 vnx = cv::v_setall_f64(nx);
501 const cv::v_float64x2 vny = cv::v_setall_f64(ny);
502 const cv::v_float64x2 vnz = cv::v_setall_f64(nz);
503 const cv::v_float64x2 vd = cv::v_setall_f64(D);
505 const __m128d vnx = _mm_set1_pd(nx);
506 const __m128d vny = _mm_set1_pd(ny);
507 const __m128d vnz = _mm_set1_pd(nz);
508 const __m128d vd = _mm_set1_pd(D);
510 const float64x2_t vnx = vdupq_n_f64(nx);
511 const float64x2_t vny = vdupq_n_f64(ny);
512 const float64x2_t vnz = vdupq_n_f64(nz);
513 const float64x2_t vd = vdupq_n_f64(D);
516 for (; cpt <=
m_pointCloudFace.size() - 6; cpt += 6, ptr_point_cloud += 6) {
518 cv::v_float64x2 vx, vy, vz;
519 cv::v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
521#if (VISP_HAVE_OPENCV_VERSION >= 0x040900)
522 cv::v_float64x2 va1 = cv::v_sub(cv::v_mul(vnz, vy), cv::v_mul(vny, vz));
523 cv::v_float64x2 va2 = cv::v_sub(cv::v_mul(vnx, vz), cv::v_mul(vnz, vx));
524 cv::v_float64x2 va3 = cv::v_sub(cv::v_mul(vny, vx), cv::v_mul(vnx, vy));
526 cv::v_float64x2 va1 = vnz*vy - vny*vz;
527 cv::v_float64x2 va2 = vnx*vz - vnz*vx;
528 cv::v_float64x2 va3 = vny*vx - vnx*vy;
531 cv::v_float64x2 vnxy = cv::v_combine_low(vnx, vny);
532 cv::v_store(ptr_L, vnxy);
534 vnxy = cv::v_combine_low(vnz, va1);
535 cv::v_store(ptr_L, vnxy);
537 vnxy = cv::v_combine_low(va2, va3);
538 cv::v_store(ptr_L, vnxy);
541 vnxy = cv::v_combine_high(vnx, vny);
542 cv::v_store(ptr_L, vnxy);
544 vnxy = cv::v_combine_high(vnz, va1);
545 cv::v_store(ptr_L, vnxy);
547 vnxy = cv::v_combine_high(va2, va3);
548 cv::v_store(ptr_L, vnxy);
551#if (VISP_HAVE_OPENCV_VERSION >= 0x040900)
552 cv::v_float64x2 verr = cv::v_add(vd, cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, cv::v_mul(vnz, vz))));
554 cv::v_float64x2 verr = vd + cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, vnz*vz));
557 cv::v_store(ptr_error, verr);
561 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
563 __m128d va1 = _mm_sub_pd(_mm_mul_pd(vnz, vy), _mm_mul_pd(vny, vz));
564 __m128d va2 = _mm_sub_pd(_mm_mul_pd(vnx, vz), _mm_mul_pd(vnz, vx));
565 __m128d va3 = _mm_sub_pd(_mm_mul_pd(vny, vx), _mm_mul_pd(vnx, vy));
567 __m128d vnxy = v_combine_low(vnx, vny);
568 _mm_storeu_pd(ptr_L, vnxy);
570 vnxy = v_combine_low(vnz, va1);
571 _mm_storeu_pd(ptr_L, vnxy);
573 vnxy = v_combine_low(va2, va3);
574 _mm_storeu_pd(ptr_L, vnxy);
577 vnxy = v_combine_high(vnx, vny);
578 _mm_storeu_pd(ptr_L, vnxy);
580 vnxy = v_combine_high(vnz, va1);
581 _mm_storeu_pd(ptr_L, vnxy);
583 vnxy = v_combine_high(va2, va3);
584 _mm_storeu_pd(ptr_L, vnxy);
587 const __m128d verror = _mm_add_pd(vd, v_fma(vnx, vx, v_fma(vny, vy, _mm_mul_pd(vnz, vz))));
588 _mm_storeu_pd(ptr_error, verror);
591 float64x2_t vx, vy, vz;
592 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
594 float64x2_t va1 = vsubq_f64(vmulq_f64(vnz, vy), vmulq_f64(vny, vz));
595 float64x2_t va2 = vsubq_f64(vmulq_f64(vnx, vz), vmulq_f64(vnz, vx));
596 float64x2_t va3 = vsubq_f64(vmulq_f64(vny, vx), vmulq_f64(vnx, vy));
598 float64x2_t vnxy = v_combine_low(vnx, vny);
599 vst1q_f64(ptr_L, vnxy);
601 vnxy = v_combine_low(vnz, va1);
602 vst1q_f64(ptr_L, vnxy);
604 vnxy = v_combine_low(va2, va3);
605 vst1q_f64(ptr_L, vnxy);
608 vnxy = v_combine_high(vnx, vny);
609 vst1q_f64(ptr_L, vnxy);
611 vnxy = v_combine_high(vnz, va1);
612 vst1q_f64(ptr_L, vnxy);
614 vnxy = v_combine_high(va2, va3);
615 vst1q_f64(ptr_L, vnxy);
618 const float64x2_t verror = vaddq_f64(vd, v_fma(vnx, vx, v_fma(vny, vy, vmulq_f64(vnz, vz))));
619 vst1q_f64(ptr_error, verror);
630 double _a1 = (nz * y) - (ny * z);
631 double _a2 = (nx * z) - (nz * x);
632 double _a3 = (ny * x) - (nx * y);
635 L[(
unsigned int)(cpt / 3)][0] = nx;
636 L[(
unsigned int)(cpt / 3)][1] = ny;
637 L[(
unsigned int)(cpt / 3)][2] = nz;
638 L[(
unsigned int)(cpt / 3)][3] = _a1;
639 L[(
unsigned int)(cpt / 3)][4] = _a2;
640 L[(
unsigned int)(cpt / 3)][5] = _a3;
653 error[(
unsigned int)(cpt / 3)] = D + (normal.
t() * pt);
663 unsigned int idx = 0;
669 double _a1 = (nz * y) - (ny * z);
670 double _a2 = (nx * z) - (nz * x);
671 double _a3 = (ny * x) - (nx * y);
685 error[idx] = D + (normal.
t() * pt);
691 std::vector<vpImagePoint> &roiPts
692#
if DEBUG_DISPLAY_DEPTH_DENSE
694 std::vector<std::vector<vpImagePoint> > &roiPts_vec
697 double &distanceToFace)
704 it->m_p1->changeFrame(cMo);
705 it->m_p2->changeFrame(cMo);
709 it->m_poly.changeFrame(cMo);
710 it->m_poly.computePolygonClipped(
m_cam);
712 if (it->m_poly.polyClipped.size() == 2 &&
720 std::vector<std::pair<vpPoint, vpPoint> > linesLst;
726 for (
unsigned int i = 0; i < linesLst.size(); i++) {
728 linesLst[i].second.project();
736 roiPts.push_back(ip1);
737 roiPts.push_back(ip2);
739 faceCentroid.
set_X(faceCentroid.
get_X() + linesLst[i].first.get_X() + linesLst[i].second.get_X());
740 faceCentroid.
set_Y(faceCentroid.
get_Y() + linesLst[i].first.get_Y() + linesLst[i].second.get_Y());
741 faceCentroid.
set_Z(faceCentroid.
get_Z() + linesLst[i].first.get_Z() + linesLst[i].second.get_Z());
743#if DEBUG_DISPLAY_DEPTH_DENSE
744 std::vector<vpImagePoint> roiPts_;
745 roiPts_.push_back(ip1);
746 roiPts_.push_back(ip2);
747 roiPts_vec.push_back(roiPts_);
751 if (linesLst.empty()) {
752 distanceToFace = std::numeric_limits<double>::max();
754 faceCentroid.
set_X(faceCentroid.
get_X() / (2 * linesLst.size()));
755 faceCentroid.
set_Y(faceCentroid.
get_Y() / (2 * linesLst.size()));
756 faceCentroid.
set_Z(faceCentroid.
get_Z() / (2 * linesLst.size()));
759 sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
769 std::vector<vpPoint> polygonsClipped;
772 if (polygonsClipped.empty()) {
773 distanceToFace = std::numeric_limits<double>::max();
777 for (
size_t i = 0; i < polygonsClipped.size(); i++) {
778 faceCentroid.
set_X(faceCentroid.
get_X() + polygonsClipped[i].get_X());
779 faceCentroid.
set_Y(faceCentroid.
get_Y() + polygonsClipped[i].get_Y());
780 faceCentroid.
set_Z(faceCentroid.
get_Z() + polygonsClipped[i].get_Z());
783 faceCentroid.
set_X(faceCentroid.
get_X() / polygonsClipped.size());
784 faceCentroid.
set_Y(faceCentroid.
get_Y() / polygonsClipped.size());
785 faceCentroid.
set_Z(faceCentroid.
get_Z() / polygonsClipped.size());
787 distanceToFace = sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
791#if DEBUG_DISPLAY_DEPTH_DENSE
792 roiPts_vec.push_back(roiPts);
799 bool displayFullModel)
801 std::vector<std::vector<double> > models =
804 for (
size_t i = 0; i < models.size(); i++) {
813 bool displayFullModel)
815 std::vector<std::vector<double> > models =
818 for (
size_t i = 0; i < models.size(); i++) {
851 bool displayFullModel)
853 std::vector<std::vector<double> > models;
861 std::vector<std::vector<double> > lineModels =
863 models.insert(models.end(), lineModels.begin(), lineModels.end());
885 if (dx <= std::numeric_limits<double>::epsilon() && dy <= std::numeric_limits<double>::epsilon() &&
886 dz <= std::numeric_limits<double>::epsilon())
898 (*it)->setCameraParameters(camera);
908 (*it)->useScanLine = v;
Type * data
Address of the first element of the data array.
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
void resize(unsigned int i, bool flagNullify=true)
Class to define RGB colors available for display functionalities.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
unsigned int getHeight() const
Implementation of a matrix and operations on matrices.
Implementation of the polygons management for the model-based trackers.
bool isVisible(unsigned int i)
void computeScanLineQuery(const vpPoint &a, const vpPoint &b, std::vector< std::pair< vpPoint, vpPoint > > &lines, const bool &displayResults=false)
vpMbScanLine & getMbScanLineRenderer()
Manage the line of a polygon used in the model-based tracker.
void setIndex(unsigned int i)
vpPoint * p2
The second extremity.
std::list< int > Lindex_polygon
Index of the faces which contain the line.
void buildFrom(vpPoint &_p1, vpPoint &_p2, vpUniRand &rand_gen)
vpMbHiddenFaces< vpMbtPolygon > * hiddenface
Pointer to the list of faces.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
vpMbtPolygon & getPolygon()
bool useScanLine
Use scanline rendering.
vpPoint * p1
The first extremity.
void setCameraParameters(const vpCameraParameters &camera)
void setName(const std::string &line_name)
void setVisible(bool _isvisible)
void addPolygon(const int &index)
double m_depthDenseFilteringMinDist
Minimum distance threshold.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
void computeVisibilityDisplay()
virtual ~vpMbtFaceDepthDense()
bool m_isVisible
Visibility flag.
double m_distFarClip
Distance for near clipping.
std::vector< double > m_pointCloudFace
List of depth points inside the face.
vpPlane m_planeObject
Plane equation described in the object frame.
void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setCameraParameters(const vpCameraParameters &camera)
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=NULL)
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void computeROI(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, std::vector< vpImagePoint > &roiPts, double &distanceToFace)
unsigned int getNbFeatures() const
bool samePoint(const vpPoint &P1, const vpPoint &P2) const
void computeInteractionMatrixAndResidu(const vpHomogeneousMatrix &cMo, vpMatrix &L, vpColVector &error)
void setScanLineVisibilityTest(bool v)
vpMbtPolygon * m_polygon
Polygon defining the face.
bool m_useScanLine
Scan line visibility.
bool m_isTrackedDepthDenseFace
Flag to define if the face should be tracked or not.
double m_depthDenseFilteringMaxDist
Maximum distance threshold.
std::vector< PolygonLine > m_polygonLines
Polygon lines used for scan-line visibility.
int m_depthDenseFilteringMethod
Method to use to consider or not the face.
@ DEPTH_OCCUPANCY_RATIO_FILTERING
unsigned int m_clippingFlag
Flags specifying which clipping to used.
std::vector< vpMbtDistanceLine * > m_listOfFaceLines
vpCameraParameters m_cam
Camera intrinsic parameters.
double m_depthDenseFilteringOccupancyRatio
Ratio between available depth points and theoretical number of points.
double m_distNearClip
Distance for near clipping.
void displayFeature(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05, unsigned int thickness=1)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
virtual bool isVisible(const vpHomogeneousMatrix &cMo, double alpha, const bool &modulo=false, const vpCameraParameters &cam=vpCameraParameters(), unsigned int width=0, unsigned int height=0)
static bool inMask(const vpImage< bool > *mask, unsigned int i, unsigned int j)
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
void changeFrame(const vpHomogeneousMatrix &cMo)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
double get_Y() const
Get the point cY coordinate in the camera frame.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_X(double cX)
Set the point cX coordinate in the camera frame.
void set_Y(double cY)
Set the point cY coordinate in the camera frame.
double get_Z() const
Get the point cZ coordinate in the camera frame.
void set_Z(double cZ)
Set the point cZ coordinate in the camera frame.
double get_oY() const
Get the point oY coordinate in the object frame.
double get_X() const
Get the point cX coordinate in the camera frame.
Implements a 3D polygon with render functionalities like clipping.
void setFarClippingDistance(const double &dist)
void setNearClippingDistance(const double &dist)
void setClipping(const unsigned int &flags)
void getRoiClipped(const vpCameraParameters &cam, std::vector< vpImagePoint > &roi)
void getPolygonClipped(std::vector< std::pair< vpPoint, unsigned int > > &poly)
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Defines a rectangle in the plane.
void setRight(double pos)
void setBottom(double pos)
Class for generating random numbers with uniform probability density.
VISP_EXPORT bool checkSSE2()
VISP_EXPORT bool checkNeon()