6#include <visp3/core/vpXmlParserCamera.h>
7#include <visp3/detection/vpDetectorAprilTag.h>
8#include <visp3/gui/vpDisplayGDI.h>
9#include <visp3/gui/vpDisplayOpenCV.h>
10#include <visp3/gui/vpDisplayX.h>
11#include <visp3/mbt/vpMbGenericTracker.h>
12#include <visp3/sensor/vpRealSense2.h>
14typedef enum { state_detection, state_tracking, state_quit } state_t;
18void createCaoFile(
double cubeEdgeSize)
20 std::ofstream fileStream;
21 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
23 fileStream <<
"# 3D Points\n";
24 fileStream <<
"8 # Number of points\n";
25 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
26 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
27 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
28 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
29 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
30 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
31 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
32 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
33 fileStream <<
"# 3D Lines\n";
34 fileStream <<
"0 # Number of lines\n";
35 fileStream <<
"# Faces from 3D lines\n";
36 fileStream <<
"0 # Number of faces\n";
37 fileStream <<
"# Faces from 3D points\n";
38 fileStream <<
"6 # Number of faces\n";
39 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
40 fileStream <<
"4 1 2 5 6\n";
41 fileStream <<
"4 4 7 6 5\n";
42 fileStream <<
"4 0 7 4 3\n";
43 fileStream <<
"4 5 2 3 4\n";
44 fileStream <<
"4 0 1 6 7 # Face 5\n";
45 fileStream <<
"# 3D cylinders\n";
46 fileStream <<
"0 # Number of cylinders\n";
47 fileStream <<
"# 3D circles\n";
48 fileStream <<
"0 # Number of circles\n";
52#if defined(VISP_HAVE_APRILTAG)
56 std::vector<vpHomogeneousMatrix> cMo_vec;
59 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
62 for (
size_t i = 0; i < cMo_vec.size(); i++) {
70 return state_tracking;
73 return state_detection;
88 return state_detection;
95 if (projection_error > projection_error_threshold) {
96 return state_detection;
104 std::stringstream ss;
109 return state_tracking;
114 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
116 std::map<std::string,
const std::vector<vpColVector> *> mapOfPointclouds,
117 std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
129 tracker.
track(mapOfImages, mapOfPointclouds);
131 tracker.
track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
135 return state_detection;
142 if (projection_error > projection_error_threshold) {
143 return state_detection;
147 tracker.
display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
151 return state_tracking;
154int main(
int argc,
const char **argv)
157#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
161 double opt_tag_size = 0.08;
162 float opt_quad_decimate = 1.0;
163 int opt_nthreads = 1;
164 double opt_cube_size = 0.125;
165#ifdef VISP_HAVE_OPENCV
166 bool opt_use_texture =
false;
168 bool opt_use_depth =
false;
169 double opt_projection_error_threshold = 40.;
171#if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
172 bool display_off =
true;
174 bool display_off =
false;
177 for (
int i = 1; i < argc; i++) {
178 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
179 opt_tag_size = atof(argv[i + 1]);
181 else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
182 opt_quad_decimate = (float)atof(argv[i + 1]);
184 else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
185 opt_nthreads = atoi(argv[i + 1]);
187 else if (std::string(argv[i]) ==
"--display_off") {
190 else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
193 else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
194 opt_cube_size = atof(argv[i + 1]);
195#ifdef VISP_HAVE_OPENCV
197 else if (std::string(argv[i]) ==
"--texture") {
198 opt_use_texture =
true;
201 else if (std::string(argv[i]) ==
"--depth") {
202 opt_use_depth =
true;
204 else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
205 opt_projection_error_threshold = atof(argv[i + 1]);
207 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
208 std::cout <<
"Usage: " << argv[0]
209 <<
" [--cube_size <size in m>] [--tag_size <size in m>]"
210 " [--quad_decimate <decimation>] [--nthreads <nb>]"
211 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
212 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
213#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
214 std::cout <<
" [--display_off]";
216 std::cout <<
" [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
221 createCaoFile(opt_cube_size);
227 int width = 640, height = 480, stream_fps = 30;
228 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
229 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
230 config.disable_stream(RS2_STREAM_INFRARED);
231 realsense.
open(config);
245 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
246 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
248 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
249 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(
new pcl::PointCloud<pcl::PointXYZ>());
251 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
252 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
253 std::vector<vpColVector> pointcloud;
256 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
258 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
259 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
260 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
261 std::cout <<
"Camera parameters:" << std::endl;
262 std::cout <<
" Color:\n" << cam_color << std::endl;
264 std::cout <<
" Depth:\n" << cam_depth << std::endl;
265 std::cout <<
"Detection: " << std::endl;
266 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
267 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
268 std::cout <<
"Tracker: " << std::endl;
269 std::cout <<
" Use edges : 1" << std::endl;
270 std::cout <<
" Use texture: "
271#ifdef VISP_HAVE_OPENCV
272 << opt_use_texture << std::endl;
274 <<
" na" << std::endl;
276 std::cout <<
" Use depth : " << opt_use_depth << std::endl;
277 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
285 d_gray =
new vpDisplayX(I_gray, 50, 50,
"Color stream");
287 d_depth =
new vpDisplayX(I_depth, 80 + I_gray.getWidth(), 50,
"Depth stream");
288#elif defined(VISP_HAVE_GDI)
292#elif defined(HAVE_OPENCV_HIGHGUI)
305 std::vector<int> trackerTypes;
306#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
331#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
332 if (opt_use_texture) {
350 tracker.
loadModel(
"cube.cao",
"cube.cao");
351 mapOfCameraTransformations[
"Camera2"] = depth_M_color;
367 state_t state = state_detection;
370 while (state != state_quit) {
373 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, NULL, pointcloud, NULL);
375 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL,
383 mapOfImages[
"Camera1"] = &I_gray;
384 mapOfImages[
"Camera2"] = &I_depth;
386 mapOfPointclouds[
"Camera2"] = pointcloud;
388 mapOfPointclouds[
"Camera2"] = &pointcloud;
389 mapOfWidths[
"Camera2"] = width;
390 mapOfHeights[
"Camera2"] = height;
398 if (state == state_detection) {
399 state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
402 if (state == state_tracking) {
404 mapOfCameraPoses[
"Camera1"] = cMo;
405 mapOfCameraPoses[
"Camera2"] = depth_M_color * cMo;
414 if (state == state_tracking) {
417 state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
418 opt_projection_error_threshold, cMo);
420 state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
421 tracker, opt_projection_error_threshold, cMo);
425 state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
428 std::stringstream ss;
457 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
464#ifndef VISP_HAVE_APRILTAG
465 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
467#ifndef VISP_HAVE_REALSENSE2
468 std::cout <<
"ViSP is not build with librealsense2 support" << std::endl;
470 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
void setAprilTagQuadDecimate(float quadDecimate)
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
bool detect(const vpImage< unsigned char > &I)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual void setKltMaskBorder(const unsigned int &e)
virtual unsigned int getNbFeaturesEdge() const
virtual void setAngleAppear(const double &a)
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setAngleDisappear(const double &a)
virtual void setMovingEdge(const vpMe &me)
virtual unsigned int getNbFeaturesDepthDense() const
virtual void setKltOpencv(const vpKltOpencv &t)
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
void setSampleStep(const double &s)
void setRange(const unsigned int &r)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskSize(const unsigned int &a)
void setMu2(const double &mu_2)
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
void setMaskNumber(const unsigned int &a)
void setThreshold(const double &t)
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const