Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
AROgre.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Implementation of a simple augmented reality application using the vpAROgre
32 * class.
33 */
34
39
40#include <iostream>
41#include <visp3/core/vpConfig.h>
42
43#if defined(VISP_HAVE_OGRE) && defined(VISP_HAVE_DISPLAY)
44
45#include <visp3/ar/vpAROgre.h>
46#include <visp3/blob/vpDot2.h>
47#include <visp3/core/vpDebug.h>
48#include <visp3/core/vpImagePoint.h>
49#include <visp3/core/vpIoTools.h>
50#include <visp3/core/vpPixelMeterConversion.h>
51#include <visp3/core/vpPoint.h>
52#include <visp3/gui/vpDisplayFactory.h>
53#include <visp3/io/vpParseArgv.h>
54#include <visp3/io/vpVideoReader.h>
55#include <visp3/vision/vpPose.h>
56
57// List of allowed command line options
58#define GETOPTARGS "ci:p:h"
59
60#ifdef ENABLE_VISP_NAMESPACE
61using namespace VISP_NAMESPACE_NAME;
62#endif
63
72void usage(const char *name, const char *badparam, const std::string &ipath, const std::string &ppath)
73{
74#if defined(VISP_HAVE_DATASET)
75#if VISP_HAVE_DATASET_VERSION >= 0x030600
76 std::string ext("png");
77#else
78 std::string ext("pgm");
79#endif
80#else
81 // We suppose that the user will download a recent dataset
82 std::string ext("png");
83#endif
84
85 fprintf(stdout, "\n\
86Test augmented reality using the vpAROgre class.\n\
87\n\
88SYNOPSIS\n\
89 %s [-i <test image path>] [-p <personal image path>]\n\
90 [-c] [-h]\n", name);
91
92 fprintf(stdout, "\n\
93OPTIONS: Default\n\
94 -i <input image path> %s\n\
95 Set image input path.\n\
96 From this path read images \n\
97 \"mire-2/image.%%04d.%s\". These \n\
98 images come from visp-images-x.y.z.tar.gz available \n\
99 on the ViSP website.\n\
100 Setting the VISP_INPUT_IMAGE_PATH environment\n\
101 variable produces the same behaviour than using\n\
102 this option.\n\
103 \n\
104 -p <personal image path> %s\n\
105 Specify a personal sequence containing images \n\
106 to process.\n\
107 By image sequence, we mean one file per image.\n\
108 Example : \"/Temp/visp-images/cube/image.%%04d.%s\"\n\
109 %%04d is for the image numbering.\n\
110\n\
111 -c\n\
112 Disable the mouse click. Useful to automate the \n\
113 execution of this program without human intervention.\n\
114\n\
115 -h\n\
116 Print the help.\n",
117 ipath.c_str(), ext.c_str(), ppath.c_str(), ext.c_str());
118
119 if (badparam)
120 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
121}
122
134bool getOptions(int argc, const char **argv, std::string &ipath, std::string &ppath, bool &click_allowed)
135{
136 const char *optarg_;
137 int c;
138 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
139
140 switch (c) {
141 case 'c':
142 click_allowed = false;
143 break;
144 case 'i':
145 ipath = optarg_;
146 break;
147 case 'p':
148 ppath = optarg_;
149 break;
150 case 'h':
151 usage(argv[0], nullptr, ipath, ppath);
152 return false;
153
154 default:
155 usage(argv[0], optarg_, ipath, ppath);
156 return false;
157 }
158 }
159
160 if ((c == 1) || (c == -1)) {
161 // standalone param or error
162 usage(argv[0], nullptr, ipath, ppath);
163 std::cerr << "ERROR: " << std::endl;
164 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
165 return false;
166 }
167
168 return true;
169}
170
171#ifndef DOXYGEN_SHOULD_SKIP_THIS
172
173class vpAROgreExample : public vpAROgre
174{
175public:
176 // The constructor doesn't change here
177 vpAROgreExample(const vpCameraParameters &cam = vpCameraParameters(), unsigned int width = 640,
178 unsigned int height = 480, const char *resourcePath = nullptr)
179 : vpAROgre(cam, width, height)
180 {
181 // Direction vectors
182 if (resourcePath)
183 mResourcePath = resourcePath;
184 std::cout << "mResourcePath: " << mResourcePath << std::endl;
185 vecDevant = Ogre::Vector3(0, -1, 0);
186 robot = nullptr;
187 mAnimationState = nullptr;
188 }
189
190protected:
191 // Attributes
192 // Vector to move
193 Ogre::Vector3 vecDevant;
194 // Animation attribute
195 Ogre::AnimationState *mAnimationState;
196 // The entity representing the robot
197 Ogre::Entity *robot;
198
199 // Our scene will just be a plane
200 void createScene() VP_OVERRIDE
201 {
203 // Set lights
204 mSceneMgr->setAmbientLight(Ogre::ColourValue(static_cast<float>(0.6), static_cast<float>(0.6), static_cast<float>(0.6))); // Default value of lightning
205 Ogre::Light *light = mSceneMgr->createLight();
206 light->setDiffuseColour(1.0, 1.0, 1.0); // scaled RGB values
207 light->setSpecularColour(1.0, 1.0, 1.0); // scaled RGB values
208 // Lumiere ponctuelle
209#if (VISP_HAVE_OGRE_VERSION < (1 << 16 | 10 << 8 | 0))
210 light->setPosition(-5, -5, 10);
211#else
212 Ogre::SceneNode *spotLightNode = mSceneMgr->getRootSceneNode()->createChildSceneNode();
213 spotLightNode->attachObject(light);
214 spotLightNode->setPosition(Ogre::Vector3(-5, -5, 10));
215#endif
216 light->setType(Ogre::Light::LT_POINT);
217 light->setAttenuation((Ogre::Real)100, (Ogre::Real)1.0, (Ogre::Real)0.045, (Ogre::Real)0.0075);
218 // Ombres
219 light->setCastShadows(true);
221
223 // Create the Entity
224 robot = mSceneMgr->createEntity("Robot", "robot.mesh");
225 // Attach robot to scene graph
226 Ogre::SceneNode *RobotNode = mSceneMgr->getRootSceneNode()->createChildSceneNode("Robot");
227 RobotNode->attachObject(robot);
228 RobotNode->scale((Ogre::Real)0.001, (Ogre::Real)0.001, (Ogre::Real)0.001);
229 RobotNode->pitch(Ogre::Degree(90));
230 RobotNode->yaw(Ogre::Degree(-90));
231 robot->setCastShadows(true);
232 mSceneMgr->setShadowTechnique(Ogre::SHADOWTYPE_STENCIL_MODULATIVE);
234
236 // Add an animation
237 // Set the good animation
238 mAnimationState = robot->getAnimationState("Idle");
239 // Start over when finished
240 mAnimationState->setLoop(true);
241 // Animation enabled
242 mAnimationState->setEnabled(true);
244
246 // Add a ground
247 Ogre::Plane plan;
248 plan.d = 0;
249 plan.normal = Ogre::Vector3::UNIT_Z;
250 Ogre::MeshManager::getSingleton().createPlane("sol", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, plan,
251 (Ogre::Real)0.22, (Ogre::Real)0.16, 10, 10, true, 1, 1, 1);
252 Ogre::Entity *ent = mSceneMgr->createEntity("Entitesol", "sol");
253 Ogre::SceneNode *PlaneNode = mSceneMgr->getRootSceneNode()->createChildSceneNode("Entitesol");
254 PlaneNode->attachObject(ent);
255 ent->setMaterialName("Examples/GrassFloor");
257 }
258
260 bool customframeEnded(const Ogre::FrameEvent &evt) VP_OVERRIDE
261 {
262 // Update animation
263 // To move, we add it the time since last frame
264 mAnimationState->addTime(evt.timeSinceLastFrame);
265 return true;
266 }
268
270#ifdef VISP_HAVE_OIS
271 bool processInputEvent(const Ogre::FrameEvent & /*evt*/)
272 {
273 mKeyboard->capture();
274 Ogre::Matrix3 rotmy;
275 double angle = -M_PI / 8;
276 if (mKeyboard->isKeyDown(OIS::KC_ESCAPE))
277 return false;
278
279 // Event telling that we will have to move, setting the animation to
280 // "walk", if false, annimation goes to "Idle"
281 bool event = false;
282 // Check entries
283 if (mKeyboard->isKeyDown(OIS::KC_Z) || mKeyboard->isKeyDown(OIS::KC_UP)) {
284 mSceneMgr->getSceneNode("Robot")->setPosition(mSceneMgr->getSceneNode("Robot")->getPosition() +
285 (Ogre::Real)0.003 * vecDevant);
286 event = true;
287 }
288 if (mKeyboard->isKeyDown(OIS::KC_S) || mKeyboard->isKeyDown(OIS::KC_DOWN)) {
289 mSceneMgr->getSceneNode("Robot")->setPosition(mSceneMgr->getSceneNode("Robot")->getPosition() -
290 (Ogre::Real)0.003 * vecDevant);
291 event = true;
292 }
293 if (mKeyboard->isKeyDown(OIS::KC_Q) || mKeyboard->isKeyDown(OIS::KC_LEFT)) {
294 rotmy = Ogre::Matrix3((Ogre::Real)cos(-angle), (Ogre::Real)sin(-angle), 0, (Ogre::Real)(-sin(-angle)),
295 (Ogre::Real)cos(-angle), 0, 0, 0, 1);
296 vecDevant = vecDevant * rotmy;
297 mSceneMgr->getSceneNode("Robot")->yaw(Ogre::Radian((Ogre::Real)(-angle)));
298 event = true;
299 }
300 if (mKeyboard->isKeyDown(OIS::KC_D) || mKeyboard->isKeyDown(OIS::KC_RIGHT)) {
301 rotmy = Ogre::Matrix3((Ogre::Real)cos(angle), (Ogre::Real)sin(angle), 0, (Ogre::Real)(-sin(angle)),
302 (Ogre::Real)cos(angle), 0, 0, 0, 1);
303 vecDevant = vecDevant * rotmy;
304 mSceneMgr->getSceneNode("Robot")->yaw(Ogre::Radian((Ogre::Real)angle));
305 event = true;
306 }
307
308 // Play the right animation
309 if (event) {
310 mAnimationState = robot->getAnimationState("Walk");
311 }
312 else
313 mAnimationState = robot->getAnimationState("Idle");
314
315 // Start over when finished
316 mAnimationState->setLoop(true);
317 // Animation enabled
318 mAnimationState->setEnabled(true);
319
320 return true;
321 }
322#endif
324};
325
332void computeInitialPose(vpCameraParameters *mcam, vpImage<unsigned char> &I, vpPose *mPose, vpDot2 *md,
333 vpImagePoint *mcog, vpHomogeneousMatrix *cMo, vpPoint *mP, const bool &opt_click_allowed)
334{
335 // ---------------------------------------------------
336 // Code inspired from ViSP example of camera pose
337 // ----------------------------------------------------
338 bool opt_display = true;
339
340 vpDisplay *display = nullptr;
341 if (opt_display) {
342#if defined(VISP_HAVE_DISPLAY)
344#else
345 opt_display = false; // No display is available
346#endif
347 }
348
349 for (unsigned int i = 0; i < 4; ++i) {
350 if (opt_display) {
351 md[i].setGraphics(true);
352 }
353 else {
354 md[i].setGraphics(false);
355 }
356 }
357
358 if (opt_display) {
359 // Display size is automatically defined by the image (I) size
360 display->init(I, 100, 100, "Preliminary Pose Calculation");
361 // display the image
362 // The image class has a member that specify a pointer toward
363 // the display that has been initialized in the display declaration
364 // therefore is is no longer necessary to make a reference to the
365 // display variable.
367 // Flush the display
369 }
370
371 std::cout << "**"<< std::endl;
372 std::cout << "** Preliminary Pose Calculation" << std::endl;
373 std::cout << "** Click on the 4 dots" << std::endl;
374 std::cout << "** Dot1: (-x,-y,0), Dot2: (x,-y,0), Dot3: (x,y,0), Dot4: (-x,y,0)" << std::endl;
375 std::cout << "**" << std::endl;
376
377 vpImagePoint ip[4];
378 if (!opt_click_allowed) {
379 ip[0].set_i(265);
380 ip[0].set_j(93);
381 ip[1].set_i(248);
382 ip[1].set_j(242);
383 ip[2].set_i(166);
384 ip[2].set_j(215);
385 ip[3].set_i(178);
386 ip[3].set_j(85);
387 }
388 for (unsigned int i = 0; i < 4; ++i) {
389 // by using setGraphics, we request to see the edges of the dot
390 // in red on the screen.
391 // It uses the overlay image plane.
392 // The default of this setting is that it is time consuming
393
394 md[i].setGraphics(true);
395 md[i].setGrayLevelPrecision(0.7);
396 md[i].setSizePrecision(0.5);
397
398 for (unsigned int j = 0; j < i; j++)
399 md[j].display(I);
400
401 // flush the display buffer
403 try {
404 if (opt_click_allowed)
405 md[i].initTracking(I);
406 else
407 md[i].initTracking(I, ip[i]);
408 }
409 catch (...) {
410 }
411
412 mcog[i] = md[i].getCog();
413 // an exception is thrown by the track method if
414 // - dot is lost
415 // - the number of pixel is too small
416 // - too many pixels are detected (this is usual when a "big" specularity
417 // occurs. The threshold can be modified using the setNbMaxPoint(int) method
418 if (opt_display) {
419 md[i].display(I);
420 // flush the display buffer
422 }
423 }
424
425 if (opt_display) {
426 // display a red cross (size 10) in the image at the dot center
427 // of gravity location
428 //
429 // WARNING
430 // in the vpDisplay class member's when pixel coordinates
431 // are considered the first element is the row index and the second
432 // is the column index:
433 // vpDisplay::displayCross(Image, row index, column index, size, color)
434 // therefore u and v are inverted wrt to the vpDot specification
435 // Alternatively, to avoid this problem another set of member have
436 // been defined in the vpDisplay class.
437 // If the method name is postfixe with _uv the specification is :
438 // vpDisplay::displayCross_uv(Image, column index, row index, size,
439 // color)
440
441 for (unsigned int i = 0; i < 4; ++i) {
442 vpDisplay::displayCross(I, mcog[i], 10, vpColor::red);
443 }
444
445 // flush the X11 buffer
447 }
448
449 // --------------------------------------------------------
450 // Now we will compute the pose
451 // --------------------------------------------------------
452
453 // the list of point is cleared (if that's not done before)
454 mPose->clearPoint();
455
456 // we set the 3D points coordinates (in meter !) in the object/world frame
457 double l = 0.06;
458 double L = 0.07;
459 mP[0].setWorldCoordinates(-L, -l, 0); // (X,Y,Z)
460 mP[1].setWorldCoordinates(+L, -l, 0);
461 mP[2].setWorldCoordinates(+L, +l, 0);
462 mP[3].setWorldCoordinates(-L, +l, 0);
463
464 // pixel-> meter conversion
465 for (unsigned int i = 0; i < 4; ++i) {
466 // u[i]. v[i] are expressed in pixel
467 // conversion in meter is achieved using
468 // x = (u-u0)/px
469 // y = (v-v0)/py
470 // where px, py, u0, v0 are the intrinsic camera parameters
471 double x = 0, y = 0;
472 vpPixelMeterConversion::convertPoint(*mcam, mcog[i], x, y);
473 mP[i].set_x(x);
474 mP[i].set_y(y);
475 }
476
477 // The pose structure is build, we put in the point list the set of point
478 // here both 2D and 3D world coordinates are known
479 for (unsigned int i = 0; i < 4; ++i) {
480 mPose->addPoint(mP[i]); // and added to the pose computation point list
481 }
482
483 // compute the initial pose using Dementhon method followed by a non linear
484 // minimization method
485
486 // Compute initial pose
488
489 // Display briefly just to have a glimpse a the ViSP pose
490 if (opt_display) {
491 // Display the computed pose
492 mPose->display(I, *cMo, *mcam, 0.05, vpColor::red);
494 vpTime::wait(800);
495 }
496}
497
498#endif
499
500int main(int argc, const char **argv)
501{
502#if defined(VISP_HAVE_DATASET)
503#if VISP_HAVE_DATASET_VERSION >= 0x030600
504 std::string ext("png");
505#else
506 std::string ext("pgm");
507#endif
508#else
509 // We suppose that the user will download a recent dataset
510 std::string ext("png");
511#endif
512
513 try {
514 std::string env_ipath;
515 std::string opt_ipath;
516 std::string ipath;
517 std::string opt_ppath;
518 std::string dirname;
519 std::string filename;
520 bool opt_click_allowed = true;
521
522 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
523 // environment variable value
525
526 // Set the default input path
527 if (!env_ipath.empty())
528 ipath = env_ipath;
529
530 // Read the command line options
531 if (getOptions(argc, argv, opt_ipath, opt_ppath, opt_click_allowed) == false) {
532 return EXIT_FAILURE;
533 }
534
535 // Get the option values
536 if (!opt_ipath.empty())
537 ipath = opt_ipath;
538
539 // Compare ipath and env_ipath. If they differ, we take into account
540 // the input path coming from the command line option
541 if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) {
542 if (ipath != env_ipath) {
543 std::cout << std::endl << "WARNING: " << std::endl;
544 std::cout << " Since -i <visp image path=" << ipath << "> "
545 << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl
546 << " we skip the environment variable." << std::endl;
547 }
548 }
549
550 // Test if an input path is set
551 if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) {
552 usage(argv[0], nullptr, ipath, opt_ppath);
553 std::cerr << std::endl << "ERROR:" << std::endl;
554 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
555 << " environment variable to specify the location of the " << std::endl
556 << " image path where test images are located." << std::endl
557 << " Use -p <personal image path> option if you want to " << std::endl
558 << " use personal images." << std::endl
559 << std::endl;
560
561 return EXIT_FAILURE;
562 }
563
564 std::ostringstream s;
565
566 if (opt_ppath.empty()) {
567 // Set the path location of the image sequence
568 dirname = vpIoTools::createFilePath(ipath, "mire-2");
569
570 // Build the name of the image file
571
572 s.setf(std::ios::right, std::ios::adjustfield);
573 s << "image.%04d.";
574 s << ext;
575 filename = vpIoTools::createFilePath(dirname, s.str());
576 }
577 else {
578 filename = opt_ppath;
579 }
580
581 // We will read a sequence of images
582 vpVideoReader grabber;
583 grabber.setFirstFrameIndex(1);
584 grabber.setFileName(filename.c_str());
585 // Grey level image associated to a display in the initial pose
586 // computation
587 vpImage<unsigned char> Idisplay;
588 // Grey level image to track points
590 // RGBa image to get background
592 // Matrix representing camera parameters
594
595 // Variables used for pose computation purposes
596 vpPose mPose;
597 vpDot2 md[4];
598 vpImagePoint mcog[4];
599 vpPoint mP[4];
600
601 // CameraParameters we got from calibration
602 // Keep u0 and v0 as center of the screen
604
605 try {
606 std::cout << "Load: " << filename << std::endl;
607 grabber.open(Idisplay);
608 grabber.acquire(Idisplay);
609 vpCameraParameters mcamTmp(592, 570, grabber.getWidth() / 2, grabber.getHeight() / 2);
610 // Compute the initial pose of the camera
611 computeInitialPose(&mcamTmp, Idisplay, &mPose, md, mcog, &cMo, mP, opt_click_allowed);
612 // Close the framegrabber
613 grabber.close();
614
615 // Associate the grabber to the RGBa image
616 grabber.open(IC);
617 mcam.init(mcamTmp);
618 }
619 catch (...) {
620 std::cerr << std::endl << "ERROR:" << std::endl;
621 std::cerr << " Cannot read " << filename << std::endl;
622 std::cerr << " Check your -i " << ipath << " option " << std::endl
623 << " or VISP_INPUT_IMAGE_PATH environment variable." << std::endl;
624 return EXIT_FAILURE;
625 }
626
628 // Create a vpAROgre object with color background
629 vpAROgreExample ogre(mcam, static_cast<unsigned int>(grabber.getWidth()), static_cast<unsigned int>(grabber.getHeight()));
630 // Initialize it
631 bool bufferedKeys = false, hidden = false;
632 ogre.init(IC, bufferedKeys, hidden);
634 double t0 = vpTime::measureTimeMs();
635 bool quit = false;
636
638 while (ogre.continueRendering() && !grabber.end() && !quit) {
640 // Acquire a frame
641 grabber.acquire(IC);
642
643 // Convert it to a grey level image for tracking purpose
646
648 // kill the point list
649 mPose.clearPoint();
650
651 // track the dot
652 for (int i = 0; i < 4; ++i) {
653 // track the point
654 md[i].track(I, mcog[i]);
655 md[i].setGrayLevelPrecision(0.90);
656 // pixel->meter conversion
657 {
658 double x = 0, y = 0;
659 vpPixelMeterConversion::convertPoint(mcam, mcog[i], x, y);
660 mP[i].set_x(x);
661 mP[i].set_y(y);
662 }
663
664 // and added to the pose computation point list
665 mPose.addPoint(mP[i]);
666 }
667 // the pose structure has been updated
668
669 // the pose is now updated using the virtual visual servoing approach
670 // Dementhon or lagrange is no longer necessary, pose at the
671 // previous iteration is sufficient
674
676 // Display with ogre
677 ogre.display(IC, cMo);
679
680 // Wait so that the video does not go too fast
681 vpTime::wait(15);
682
683 // Measure loop fps
684 double t1 = vpTime::measureTimeMs();
685 std::cout << "\r> " << 1000 / (t1 - t0) << " fps";
686 t0 = t1;
687 }
689 // Close the grabber
690 grabber.close();
691
692 return EXIT_SUCCESS;
693 }
694 catch (const vpException &e) {
695 std::cout << "Catch a ViSP exception: " << e << std::endl;
696 return EXIT_FAILURE;
697 }
698 catch (Ogre::Exception &e) {
699 std::cout << "Catch an Ogre exception: " << e.getDescription() << std::endl;
700 return EXIT_FAILURE;
701 }
702 catch (...) {
703 std::cout << "Catch an exception " << std::endl;
704 return EXIT_FAILURE;
705 }
706}
707#else // VISP_HAVE_OGRE && VISP_HAVE_DISPLAY
708int main()
709{
710#if (!(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GTK) || defined(VISP_HAVE_GDI)))
711 std::cout << "You do not have X11, or GTK, or GDI (Graphical Device Interface) functionalities to display images..."
712 << std::endl;
713 std::cout << "Tip if you are on a unix-like system:" << std::endl;
714 std::cout << "- Install X11, configure again ViSP using cmake and build again this example" << std::endl;
715 std::cout << "Tip if you are on a windows-like system:" << std::endl;
716 std::cout << "- Install GDI, configure again ViSP using cmake and build again this example" << std::endl;
717#else
718 std::cout << "You do not have Ogre functionalities" << std::endl;
719 std::cout << "Tip:" << std::endl;
720 std::cout << "- Install Ogre3D, configure again ViSP using cmake and build again this example" << std::endl;
721#endif
722 return EXIT_SUCCESS;
723}
724#endif
Implementation of an augmented reality viewer using Ogre3D 3rd party.
Definition vpAROgre.h:121
virtual bool customframeEnded(const Ogre::FrameEvent &evt)
Definition vpAROgre.cpp:741
virtual bool processInputEvent(const Ogre::FrameEvent &)
Definition vpAROgre.h:344
virtual void createScene(void)
Definition vpAROgre.h:328
Generic class defining intrinsic camera parameters.
void init()
Basic initialization with the default parameters.
static const vpColor red
Definition vpColor.h:198
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static void display(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
This tracker is meant to track a blob (connex pixels with same gray level) on a vpImage.
Definition vpDot2.h:127
void track(const vpImage< unsigned char > &I, bool canMakeTheWindowGrow=true)
Definition vpDot2.cpp:441
void setGraphics(bool activate)
Definition vpDot2.h:320
void display(const vpImage< unsigned char > &I, vpColor color=vpColor::red, unsigned int thickness=1) const
Definition vpDot2.cpp:219
void setSizePrecision(const double &sizePrecision)
Definition vpDot2.cpp:745
void setGrayLevelPrecision(const double &grayLevelPrecision)
Definition vpDot2.cpp:715
vpImagePoint getCog() const
Definition vpDot2.h:183
void initTracking(const vpImage< unsigned char > &I, unsigned int size=0)
Definition vpDot2.cpp:263
error that can be emitted by ViSP classes.
Definition vpException.h:60
unsigned int getWidth() const
Return the number of columns in the image.
unsigned int getHeight() const
Return the number of rows in the image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_j(double jj)
void set_i(double ii)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static std::string getViSPImagesDataPath()
static std::string createFilePath(const std::string &parent, const std::string &child)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:79
void set_x(double x)
Set the point x coordinate in the image plane.
Definition vpPoint.cpp:471
void setWorldCoordinates(double oX, double oY, double oZ)
Definition vpPoint.cpp:116
void set_y(double y)
Set the point y coordinate in the image plane.
Definition vpPoint.cpp:473
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition vpPose.h:82
void addPoint(const vpPoint &P)
Definition vpPose.cpp:96
@ DEMENTHON_LAGRANGE_VIRTUAL_VS
Definition vpPose.h:103
@ VIRTUAL_VS
Definition vpPose.h:97
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
Definition vpPose.cpp:385
void clearPoint()
Definition vpPose.cpp:89
static void display(vpImage< unsigned char > &I, vpHomogeneousMatrix &cMo, vpCameraParameters &cam, double size, vpColor col=vpColor::none)
Definition vpPose.cpp:567
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
void close() VP_OVERRIDE
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()
VISP_EXPORT int wait(double t0, double t)