Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
tutorial-panda3d-renderer.cpp
1
2#include <iostream>
3#include <visp3/core/vpConfig.h>
4
5#if defined(VISP_HAVE_PANDA3D) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_MODULE_IO)
6
7#include <visp3/core/vpException.h>
8#include <visp3/core/vpExponentialMap.h>
9
10#include <visp3/gui/vpDisplayX.h>
11#include <visp3/gui/vpDisplayGDI.h>
12#include <visp3/gui/vpDisplayD3D.h>
13#include <visp3/gui/vpDisplayOpenCV.h>
14#include <visp3/gui/vpDisplayGTK.h>
15
16#include <visp3/io/vpParseArgv.h>
17#include <visp3/io/vpImageIo.h>
18
19#include <visp3/ar/vpPanda3DRGBRenderer.h>
20#include <visp3/ar/vpPanda3DGeometryRenderer.h>
21#include <visp3/ar/vpPanda3DRendererSet.h>
22#include <visp3/ar/vpPanda3DCommonFilters.h>
23#include <visp3/ar/vpPanda3DFrameworkManager.h>
24
25#ifdef ENABLE_VISP_NAMESPACE
26using namespace VISP_NAMESPACE_NAME;
27#endif
28
29void displayNormals(const vpImage<vpRGBf> &normalsImage,
30 vpImage<vpRGBa> &normalDisplayImage)
31{
32#if defined(_OPENMP)
33#pragma omp parallel for
34#endif
35 for (int i = 0; i < static_cast<int>(normalsImage.getSize()); ++i) {
36 normalDisplayImage.bitmap[i].R = static_cast<unsigned char>((normalsImage.bitmap[i].R + 1.0) * 127.5f);
37 normalDisplayImage.bitmap[i].G = static_cast<unsigned char>((normalsImage.bitmap[i].G + 1.0) * 127.5f);
38 normalDisplayImage.bitmap[i].B = static_cast<unsigned char>((normalsImage.bitmap[i].B + 1.0) * 127.5f);
39 }
40
41 vpDisplay::display(normalDisplayImage);
42 vpDisplay::flush(normalDisplayImage);
43}
44
45void displayDepth(const vpImage<float> &depthImage,
46 vpImage<unsigned char> &depthDisplayImage, float nearV, float farV)
47{
48#if defined(_OPENMP)
49#pragma omp parallel for
50#endif
51 for (int i = 0; i < static_cast<int>(depthImage.getSize()); ++i) {
52 float val = std::max(0.f, (depthImage.bitmap[i] - nearV) / (farV - nearV));
53 depthDisplayImage.bitmap[i] = static_cast<unsigned char>(val * 255.f);
54 }
55 vpDisplay::display(depthDisplayImage);
56 vpDisplay::flush(depthDisplayImage);
57}
58
59void displayLightDifference(const vpImage<vpRGBa> &colorImage, const vpImage<vpRGBa> &colorDiffuseOnly, vpImage<unsigned char> &lightDifference)
60{
61#if defined(_OPENMP)
62#pragma omp parallel for
63#endif
64 for (int i = 0; i < static_cast<int>(colorImage.getSize()); ++i) {
65 float I1 = 0.299 * colorImage.bitmap[i].R + 0.587 * colorImage.bitmap[i].G + 0.114 * colorImage.bitmap[i].B;
66 float I2 = 0.299 * colorDiffuseOnly.bitmap[i].R + 0.587 * colorDiffuseOnly.bitmap[i].G + 0.114 * colorDiffuseOnly.bitmap[i].B;
67 lightDifference.bitmap[i] = static_cast<unsigned char>(round(abs(I1 - I2)));
68 }
69 vpDisplay::display(lightDifference);
70 vpDisplay::flush(lightDifference);
71}
72
73void displayCanny(const vpImage<vpRGBf> &cannyRawData,
75{
76#if defined(_OPENMP)
77#pragma omp parallel for
78#endif
79 for (int i = 0; i < static_cast<int>(cannyRawData.getSize()); ++i) {
80 vpRGBf &px = cannyRawData.bitmap[i];
81 canny.bitmap[i] = 255 * (px.R * px.R + px.G * px.G > 0);
82 //canny.bitmap[i] = static_cast<unsigned char>(127.5f + 127.5f * atan(px.B));
83 }
84
85 vpDisplay::display(canny);
86 for (unsigned int i = 0; i < canny.getHeight(); i += 8) {
87 for (unsigned int j = 0; j < canny.getWidth(); j += 8) {
88 bool valid = (pow(cannyRawData[i][j].R, 2.f) + pow(cannyRawData[i][j].G, 2.f)) > 0;
89 if (!valid) continue;
90 float angle = cannyRawData[i][j].B;
91 unsigned x = j + 10 * cos(angle);
92 unsigned y = i + 10 * sin(angle);
93 vpDisplay::displayArrow(canny, i, j, y, x, vpColor::green);
94 }
95 }
96 vpDisplay::flush(canny);
97}
98
99int main(int argc, const char **argv)
100{
101 bool stepByStep = false;
102 bool debug = false;
103 bool showLightContrib = false;
104 bool showCanny = false;
105 char *modelPathCstr = nullptr;
106
107 char *backgroundPathCstr = nullptr;
108 vpParseArgv::vpArgvInfo argTable[] =
109 {
110 {"-model", vpParseArgv::ARGV_STRING, (char *) nullptr, (char *)&modelPathCstr,
111 "Path to the model to load."},
112
113 {"-background", vpParseArgv::ARGV_STRING, (char *) nullptr, (char *)&backgroundPathCstr,
114 "Path to the background image to load for the rgb renderer."},
115 {"-step", vpParseArgv::ARGV_CONSTANT_BOOL, (char *) nullptr, (char *)&stepByStep,
116 "Show frames step by step."},
117 {"-specular", vpParseArgv::ARGV_CONSTANT_BOOL, (char *) nullptr, (char *)&showLightContrib,
118 "Show frames step by step."},
119 {"-canny", vpParseArgv::ARGV_CONSTANT_BOOL, (char *) nullptr, (char *)&showCanny,
120 "Show frames step by step."},
121 {"-debug", vpParseArgv::ARGV_CONSTANT_BOOL, (char *) nullptr, (char *)&debug,
122 "Show Opengl/Panda3D debug message."},
123 {"-h", vpParseArgv::ARGV_HELP, (char *) nullptr, (char *) nullptr,
124 "Print the help."},
125 {(char *) nullptr, vpParseArgv::ARGV_END, (char *) nullptr, (char *) nullptr, (char *) nullptr} };
126
127 // Read the command line options
128 if (vpParseArgv::parse(&argc, argv, argTable,
132 return (false);
133 }
134
135 if (PStatClient::is_connected()) {
136 PStatClient::disconnect();
137 }
138
139 std::string host = ""; // Empty = default config var value
140 int port = -1; // -1 = default config var value
141 if (!PStatClient::connect(host, port)) {
142 std::cout << "Could not connect to PStat server." << std::endl;
143 }
144
145 std::string modelPath;
146 if (modelPathCstr) {
147 modelPath = modelPathCstr;
148 }
149 else {
150 modelPath = "data/suzanne.bam";
151 }
152
153 std::string backgroundPath;
154 if (backgroundPathCstr) {
155 backgroundPath = backgroundPathCstr;
156 }
157 const std::string objectName = "object";
158
160 double factor = 1.0;
161 vpPanda3DRenderParameters renderParams(vpCameraParameters(600 * factor, 600 * factor, 320 * factor, 240 * factor), int(480 * factor), int(640 * factor), 0.01, 10.0);
162 unsigned h = renderParams.getImageHeight(), w = renderParams.getImageWidth();
163 vpPanda3DRendererSet renderer;
164 renderer.setRenderParameters(renderParams);
165 renderer.setVerticalSyncEnabled(false);
166 renderer.setAbortOnPandaError(true);
167 if (debug) {
168 renderer.enableDebugLog();
169 }
171
173 std::shared_ptr<vpPanda3DGeometryRenderer> geometryRenderer = std::make_shared<vpPanda3DGeometryRenderer>(vpPanda3DGeometryRenderer::vpRenderType::OBJECT_NORMALS);
174 std::shared_ptr<vpPanda3DGeometryRenderer> cameraRenderer = std::make_shared<vpPanda3DGeometryRenderer>(vpPanda3DGeometryRenderer::vpRenderType::CAMERA_NORMALS);
175 std::shared_ptr<vpPanda3DRGBRenderer> rgbRenderer = std::make_shared<vpPanda3DRGBRenderer>();
176 std::shared_ptr<vpPanda3DRGBRenderer> rgbDiffuseRenderer = std::make_shared<vpPanda3DRGBRenderer>(false);
177 std::shared_ptr<vpPanda3DLuminanceFilter> grayscaleFilter = std::make_shared<vpPanda3DLuminanceFilter>("toGrayscale", rgbRenderer, false);
178 std::shared_ptr<vpPanda3DCanny> cannyFilter = std::make_shared<vpPanda3DCanny>("canny", grayscaleFilter, true, 10.f);
180
182 renderer.addSubRenderer(geometryRenderer);
183 renderer.addSubRenderer(cameraRenderer);
184 renderer.addSubRenderer(rgbRenderer);
185 if (showLightContrib) {
186 renderer.addSubRenderer(rgbDiffuseRenderer);
187 }
188 if (showCanny) {
189 renderer.addSubRenderer(grayscaleFilter);
190 renderer.addSubRenderer(cannyFilter);
191 }
192 std::cout << "Initializing Panda3D rendering framework" << std::endl;
193 renderer.initFramework();
195
197 NodePath object = renderer.loadObject(objectName, modelPath);
198 renderer.addNodeToScene(object);
199
200 vpPanda3DAmbientLight alight("Ambient", vpRGBf(0.2f));
201 renderer.addLight(alight);
202
203 vpPanda3DPointLight plight("Point", vpRGBf(1.0f), vpColVector({ 0.3, -0.4, -0.2 }), vpColVector({ 0.0, 0.0, 1.0 }));
204 renderer.addLight(plight);
205
206 vpPanda3DDirectionalLight dlight("Directional", vpRGBf(2.0f), vpColVector({ 1.0, 1.0, 0.0 }));
207 renderer.addLight(dlight);
208
209 if (!backgroundPath.empty()) {
210 vpImage<vpRGBa> background;
211 vpImageIo::read(background, backgroundPath);
212 rgbRenderer->setBackgroundImage(background);
213 }
214
215 rgbRenderer->printStructure();
216
217 renderer.setCameraPose(vpHomogeneousMatrix(0.0, 0.0, -5.0, 0.0, 0.0, 0.0));
219
220 std::cout << "Creating display and data images" << std::endl;
221 vpImage<vpRGBf> normalsImage;
222 vpImage<vpRGBf> cameraNormalsImage;
223 vpImage<vpRGBf> cannyRawData;
224 vpImage<float> depthImage;
225 vpImage<vpRGBa> colorImage(h, w);
226 vpImage<vpRGBa> colorDiffuseOnly(h, w);
227 vpImage<unsigned char> lightDifference(h, w);
228 vpImage<unsigned char> cannyImage(h, w);
229
230 vpImage<vpRGBa> normalDisplayImage(h, w);
231 vpImage<vpRGBa> cameraNormalDisplayImage(h, w);
232 vpImage<unsigned char> depthDisplayImage(h, w);
233
234#if defined(VISP_HAVE_GTK)
235 using DisplayCls = vpDisplayGTK;
236#elif defined(VISP_HAVE_X11)
237 using DisplayCls = vpDisplayX;
238#elif defined(HAVE_OPENCV_HIGHGUI)
239 using DisplayCls = vpDisplayOpenCV;
240#elif defined(VISP_HAVE_GDI)
241 using DisplayCls = vpDisplayGDI;
242#elif defined(VISP_HAVE_D3D9)
243 using DisplayCls = vpDisplayD3D;
244#endif
245 unsigned int padding = 80;
246 DisplayCls dNormals(normalDisplayImage, 0, 0, "normals in object space");
247 DisplayCls dNormalsCamera(cameraNormalDisplayImage, 0, h + padding, "normals in camera space");
248 DisplayCls dDepth(depthDisplayImage, w + padding, 0, "depth");
249 DisplayCls dColor(colorImage, w + padding, h + padding, "color");
250
251 DisplayCls dImageDiff;
252 if (showLightContrib) {
253 dImageDiff.init(lightDifference, w * 2 + padding, 0, "Specular/reflectance contribution");
254 }
255 DisplayCls dCanny;
256 if (showCanny) {
257 dCanny.init(cannyImage, w * 2 + padding, h + padding, "Canny");
258 }
259 renderer.renderFrame();
260 bool end = false;
261 std::vector<double> renderTime, fetchTime, displayTime;
262 while (!end) {
264 float nearV = 0, farV = 0;
265 geometryRenderer->computeNearAndFarPlanesFromNode(objectName, nearV, farV, true);
266 renderParams.setClippingDistance(nearV, farV);
267 renderer.setRenderParameters(renderParams);
269
270 const double beforeRender = vpTime::measureTimeMs();
272 renderer.renderFrame();
274 const double beforeFetch = vpTime::measureTimeMs();
276 renderer.getRenderer<vpPanda3DGeometryRenderer>(geometryRenderer->getName())->getRender(normalsImage, depthImage);
277 renderer.getRenderer<vpPanda3DGeometryRenderer>(cameraRenderer->getName())->getRender(cameraNormalsImage);
278 renderer.getRenderer<vpPanda3DRGBRenderer>(rgbRenderer->getName())->getRender(colorImage);
279 if (showLightContrib) {
280 renderer.getRenderer<vpPanda3DRGBRenderer>(rgbDiffuseRenderer->getName())->getRender(colorDiffuseOnly);
281 }
282 if (showCanny) {
283 renderer.getRenderer<vpPanda3DCanny>()->getRender(cannyRawData);
284 }
286 const double beforeConvert = vpTime::measureTimeMs();
288 displayNormals(normalsImage, normalDisplayImage);
289 displayNormals(cameraNormalsImage, cameraNormalDisplayImage);
290 displayDepth(depthImage, depthDisplayImage, nearV, farV);
291 if (showLightContrib) {
292 displayLightDifference(colorImage, colorDiffuseOnly, lightDifference);
293 }
294 if (showCanny) {
295 displayCanny(cannyRawData, cannyImage);
296 }
297
298 vpDisplay::display(colorImage);
299 vpDisplay::displayText(colorImage, 15, 15, "Click to quit", vpColor::red);
301
302 if (stepByStep) {
303 vpDisplay::displayText(colorImage, 50, 15, "Next frame: space", vpColor::red);
304 }
305 if (vpDisplay::getClick(colorImage, false)) {
306 end = true;
307 }
308 vpDisplay::flush(colorImage);
309 const double endDisplay = vpTime::measureTimeMs();
310 renderTime.push_back(beforeFetch - beforeRender);
311 fetchTime.push_back(beforeConvert - beforeFetch);
312 displayTime.push_back(endDisplay - beforeConvert);
313 std::string s;
314 if (stepByStep) {
315 bool next = false;
316 while (!next) {
317 vpDisplay::getKeyboardEvent(colorImage, s, true);
318 if (s == " ") {
319 next = true;
320 }
321 }
322 }
323 const double afterAll = vpTime::measureTimeMs();
325 const double delta = (afterAll - beforeRender) / 1000.0;
326 const vpHomogeneousMatrix wTo = renderer.getNodePose(objectName);
327 const vpHomogeneousMatrix oToo = vpExponentialMap::direct(vpColVector({ 0.0, 0.0, 0.0, 0.0, vpMath::rad(20.0), 0.0 }), delta);
328 renderer.setNodePose(objectName, wTo * oToo);
330 }
331
335
336 if (renderTime.size() > 0) {
337 std::cout << "Render time: " << vpMath::getMean(renderTime) << "ms +- " << vpMath::getStdev(renderTime) << "ms" << std::endl;
338 std::cout << "Panda3D -> vpImage time: " << vpMath::getMean(fetchTime) << "ms +- " << vpMath::getStdev(fetchTime) << "ms" << std::endl;
339 std::cout << "Display time: " << vpMath::getMean(displayTime) << "ms +- " << vpMath::getStdev(displayTime) << "ms" << std::endl;
340 }
341 return 0;
342}
343
344#else
345
346int main()
347{
348 std::cerr << "Recompile ViSP with Panda3D as a third party to run this tutorial" << std::endl;
349 return EXIT_FAILURE;
350}
351
352#endif
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor green
Definition vpColor.h:201
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:135
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static bool getKeyboardEvent(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
static void displayArrow(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color=vpColor::white, unsigned int w=4, unsigned int h=2, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition of the vpImage class member functions.
Definition vpImage.h:131
unsigned int getWidth() const
Definition vpImage.h:242
unsigned int getSize() const
Definition vpImage.h:221
Type * bitmap
points toward the bitmap
Definition vpImage.h:135
unsigned int getHeight() const
Definition vpImage.h:181
static double rad(double deg)
Definition vpMath.h:129
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:374
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:323
Class representing an ambient light.
void setVerticalSyncEnabled(bool useVsync)
set whether vertical sync is enabled. When vertical sync is enabled, render speed will be limited by ...
void setAbortOnPandaError(bool abort)
Set the behaviour when a Panda3D assertion fails. If abort is true, the program will stop....
NodePath loadObject(const std::string &nodeName, const std::string &modelPath)
Load a 3D object. To load an .obj file, Panda3D must be compiled with assimp support.
Implementation of canny filtering, using Sobel kernels.
Class representing a directional light.
static vpPanda3DFrameworkManager & getInstance()
Renderer that outputs object geometric information.
void getRender(vpImage< vpRGBf > &colorData, vpImage< float > &depth) const
Get render results into ViSP readable structures.
@ CAMERA_NORMALS
Surface normals in the object frame.
Class representing a Point Light.
Implementation of a traditional RGB renderer in Panda3D.
void getRender(vpImage< vpRGBa > &I) const
Store the render resulting from calling renderFrame() into a vpImage.
Rendering parameters for a panda3D simulation.
Class that renders multiple datatypes, in a single pass. A renderer set contains multiple subrenderer...
void addNodeToScene(const NodePath &object) VP_OVERRIDE
void initFramework() VP_OVERRIDE
Initialize the framework and propagate the created panda3D framework to the subrenderers.
virtual void setRenderParameters(const vpPanda3DRenderParameters &params) VP_OVERRIDE
Set new rendering parameters. If the scene has already been initialized, the renderer camera is updat...
void addSubRenderer(std::shared_ptr< vpPanda3DBaseRenderer > renderer)
Add a new subrenderer: This subrenderer should have a unique name, not present in the set.
vpHomogeneousMatrix getNodePose(const std::string &name) VP_OVERRIDE
Retrieve the pose of a scene node. The pose is in the world frame, using a ViSP convention.
std::shared_ptr< RendererType > getRenderer()
Retrieve the first subrenderer with the specified template type.
void setNodePose(const std::string &name, const vpHomogeneousMatrix &wTo) VP_OVERRIDE
Set the pose of an object for all the subrenderers. The pose is specified using the ViSP convention T...
void addLight(const vpPanda3DLight &light) VP_OVERRIDE
Light this lightable object with a new light.
void setCameraPose(const vpHomogeneousMatrix &wTc) VP_OVERRIDE
Set the pose of the camera, using the ViSP convention. This change is propagated to all subrenderers.
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
@ ARGV_NO_DEFAULTS
No default options like -help.
@ ARGV_NO_LEFTOVERS
Print an error message if an option is not in the argument list.
@ ARGV_STRING
Argument is associated to a char * string.
@ ARGV_CONSTANT_BOOL
Stand alone argument associated to a bool var that is set to true.
@ ARGV_END
End of the argument list.
@ ARGV_HELP
Argument is for help displaying.
float G
Green component.
Definition vpRGBf.h:160
float R
Red component.
Definition vpRGBf.h:159
VISP_EXPORT double measureTimeMs()