72#include <visp3/core/vpConfig.h>
73#include <visp3/core/vpCameraParameters.h>
74#include <visp3/core/vpIoTools.h>
75#include <visp3/core/vpXmlParserCamera.h>
76#include <visp3/detection/vpDetectorAprilTag.h>
77#include <visp3/gui/vpDisplayFactory.h>
78#include <visp3/gui/vpPlot.h>
79#include <visp3/io/vpImageIo.h>
80#include <visp3/robot/vpRobotFranka.h>
81#include <visp3/sensor/vpRealSense2.h>
82#include <visp3/visual_features/vpFeatureBuilder.h>
83#include <visp3/visual_features/vpFeaturePoint.h>
84#include <visp3/vs/vpServo.h>
85#include <visp3/vs/vpServoDisplay.h>
87#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_FRANKA) && defined(VISP_HAVE_PUGIXML)
89#ifdef ENABLE_VISP_NAMESPACE
93bool save_desired_features(
const std::string &filename,
const std::vector<vpFeaturePoint> &desired_features)
95 std::ofstream file(filename);
97 for (
size_t i = 0;
i < desired_features.size(); ++
i) {
98 file << desired_features[
i].get_x() <<
" " << desired_features[
i].get_y() <<
" " << desired_features[
i].get_Z() << std::endl;
109bool read_desired_features(
const std::string &filename, std::vector<vpFeaturePoint> &desired_features)
111 desired_features.clear();
112 std::ifstream file(filename);
113 if (file.is_open()) {
115 while (std::getline(file, line)) {
116 std::istringstream iss(line);
118 if (!(iss >> x >> y >> Z)) {
123 desired_features.push_back(s_d);
135 std::vector<vpImagePoint> *traj_vip)
137 for (
size_t i = 0;
i < vip.size(); ++
i) {
138 if (traj_vip[i].size()) {
141 traj_vip[
i].push_back(vip[i]);
145 traj_vip[
i].push_back(vip[i]);
148 for (
size_t i = 0;
i < vip.size(); ++
i) {
149 for (
size_t j = 1;
j < traj_vip[
i].size();
j++) {
155int main(
int argc,
char **argv)
157 double opt_tag_size = 0.120;
158 bool opt_tag_z_aligned =
false;
159 std::string opt_robot_ip =
"192.168.1.1";
160 std::string opt_rMc_filename =
"";
161 std::string opt_intrinsic_filename =
"";
162 std::string opt_camera_name =
"Camera";
163 bool display_tag =
true;
164 int opt_quad_decimate = 2;
165 bool opt_verbose =
false;
166 bool opt_plot =
false;
167 bool opt_adaptive_gain =
false;
168 bool opt_task_sequencing =
false;
169 bool opt_learn_desired_features =
false;
170 std::string desired_features_filename =
"learned_desired_features.txt";
171 double convergence_threshold = 0.00005;
173 for (
int i = 1;
i < argc; ++
i) {
174 if ((std::string(argv[i]) ==
"--tag-size") && (i + 1 < argc)) {
175 opt_tag_size = std::stod(argv[++i]);
177 else if (std::string(argv[i]) ==
"--tag-z-aligned") {
178 opt_tag_z_aligned =
true;
180 else if ((std::string(argv[i]) ==
"--ip") && (i + 1 < argc)) {
181 opt_robot_ip = std::string(argv[++i]);
183 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
184 opt_intrinsic_filename = std::string(argv[++i]);
186 else if (std::string(argv[i]) ==
"--camera-name" && i + 1 < argc) {
187 opt_camera_name = std::string(argv[++i]);
189 else if ((std::string(argv[i]) ==
"--rMc") && (i + 1 < argc)) {
190 opt_rMc_filename = std::string(argv[++i]);
192 else if ((std::string(argv[i]) ==
"--verbose") || (std::string(argv[i]) ==
"-v")) {
195 else if (std::string(argv[i]) ==
"--plot") {
198 else if (std::string(argv[i]) ==
"--adaptive-gain") {
199 opt_adaptive_gain =
true;
201 else if (std::string(argv[i]) ==
"--task-sequencing") {
202 opt_task_sequencing =
true;
204 else if ((std::string(argv[i]) ==
"--tag-quad-decimate") && (i + 1 < argc)) {
205 opt_quad_decimate = std::stoi(argv[++i]);
207 else if (std::string(argv[i]) ==
"--no-convergence-threshold") {
208 convergence_threshold = 0.;
210 else if (std::string(argv[i]) ==
"--learn-desired-features") {
211 opt_learn_desired_features =
true;
213 else if ((std::string(argv[i]) ==
"--help") || (std::string(argv[i]) ==
"-h")) {
214 std::cout <<
"SYNOPSYS" << std::endl
216 <<
" [--ip <controller ip>]"
217 <<
" [--intrinsic <xml file>]"
218 <<
" [--camera-name <name>]"
219 <<
" [--tag-size <size>]"
220 <<
" [--tag-quad-decimate <decimation factor>]"
221 <<
" [--tag-z-aligned]"
222 <<
" [--learn-desired-features]"
223 <<
" [--rMc <file.yaml>]"
224 <<
" [--adaptive-gain]"
226 <<
" [--task-sequencing]"
227 <<
" [--no-convergence-threshold]"
228 <<
" [--verbose, -v]"
231 std::cout <<
"DESCRIPTION" << std::endl
232 <<
" Use an image-based visual-servoing scheme to position the camera in front of an Apriltag." << std::endl
234 <<
" --ip <controller ip>" << std::endl
235 <<
" Franka controller ip address" << std::endl
236 <<
" Default: " << opt_robot_ip << std::endl
238 <<
" --intrinsic <xml file>" << std::endl
239 <<
" XML file that contains camera intrinsic parameters. " << std::endl
240 <<
" If no file is specified, use Realsense camera factory intrinsic parameters." << std::endl
242 <<
" --camera-name <name>" << std::endl
243 <<
" Camera name in the XML file that contains camera intrinsic parameters." << std::endl
244 <<
" Default: \"Camera\"" << std::endl
246 <<
" --tag-size <size>" << std::endl
247 <<
" Apriltag size in [m]." << std::endl
248 <<
" Default: " << opt_tag_size <<
" [m]" << std::endl
250 <<
" --tag-quad-decimate <decimation factor>" << std::endl
251 <<
" Decimation factor used during Apriltag detection." << std::endl
252 <<
" Default: " << opt_quad_decimate << std::endl
254 <<
" --tag-z-aligned" << std::endl
255 <<
" When enabled, tag z-axis and camera z-axis are aligned." << std::endl
256 <<
" Default: false" << std::endl
258 <<
" --rMc <file.yaml>" << std::endl
259 <<
" Yaml file containing the extrinsic transformation between" << std::endl
260 <<
" robot reference frame and camera frames." << std::endl
262 <<
" --adaptive-gain" << std::endl
263 <<
" Flag to enable adaptive gain to speed up visual servo near convergence." << std::endl
265 <<
" --plot" << std::endl
266 <<
" Flag to enable curve plotter." << std::endl
268 <<
" --task-sequencing" << std::endl
269 <<
" Flag to enable task sequencing scheme." << std::endl
271 <<
" --no-convergence-threshold" << std::endl
272 <<
" Flag to disable convergence threshold used to stop the visual servo." << std::endl
274 <<
" --learn-desired-pose" << std::endl
275 <<
" Flag to enable desired pose learning." << std::endl
276 <<
" Data are saved in learned-desired-pose.yaml file." << std::endl
278 <<
" --verbose, -v" << std::endl
279 <<
" Flag to enable extra verbosity." << std::endl
281 <<
" --help, -h" << std::endl
282 <<
" Print this helper message." << std::endl
288 std::cout <<
"\nERROR" << std::endl
289 << std::string(argv[i]) <<
" command line option is not supported." << std::endl
290 <<
"Use " << std::string(argv[0]) <<
" --help" << std::endl
299 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
300 config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30);
301 config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30);
306#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
312 std::cout <<
"Parameters:" << std::endl;
313 std::cout <<
" Apriltag " << std::endl;
314 std::cout <<
" Size [m] : " << opt_tag_size << std::endl;
315 std::cout <<
" Z aligned : " << (opt_tag_z_aligned ?
"true" :
"false") << std::endl;
316 std::cout <<
" Camera intrinsics " << std::endl;
317 std::cout <<
" Factory parameters : " << (opt_intrinsic_filename.empty() ?
"yes" :
"no") << std::endl;
321 if (opt_intrinsic_filename.empty()) {
322 std::cout <<
"Use Realsense camera intrinsic factory parameters: " << std::endl;
324 std::cout <<
"cam:\n" <<
cam << std::endl;
327 std::cout <<
"Camera parameters file " << opt_intrinsic_filename <<
" doesn't exist." << std::endl;
332 if (!opt_camera_name.empty()) {
334 std::cout <<
" Param file name [.xml]: " << opt_intrinsic_filename << std::endl;
335 std::cout <<
" Camera name : " << opt_camera_name << std::endl;
339 std::cout <<
"Unable to parse parameters with distortion for camera \"" << opt_camera_name <<
"\" from "
340 << opt_intrinsic_filename <<
" file" << std::endl;
341 std::cout <<
"Attempt to find parameters without distortion" << std::endl;
343 if (
parser.parse(cam, opt_intrinsic_filename, opt_camera_name,
345 std::cout <<
"Unable to parse parameters without distortion for camera \"" << opt_camera_name <<
"\" from "
346 << opt_intrinsic_filename <<
" file" << std::endl;
353 std::cout <<
"Camera parameters used to compute the pose:\n" <<
cam << std::endl;
360 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
361 detector.setDisplayTag(display_tag);
362 detector.setAprilTagQuadDecimate(opt_quad_decimate);
363 detector.setZAlignedWithCameraAxis(opt_tag_z_aligned);
366 std::vector<vpPoint> point(4);
367 point[0].setWorldCoordinates(-opt_tag_size / 2., -opt_tag_size / 2., 0);
368 point[1].setWorldCoordinates(+opt_tag_size / 2., -opt_tag_size / 2., 0);
369 point[2].setWorldCoordinates(+opt_tag_size / 2., +opt_tag_size / 2., 0);
370 point[3].setWorldCoordinates(-opt_tag_size / 2., +opt_tag_size / 2., 0);
372 if (opt_learn_desired_features) {
380 std::vector<vpHomogeneousMatrix> c_M_o_vec;
382 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
387 std::vector< std::vector<vpImagePoint> > tags_corners;
389 if (detector.getNbObjects() == 1) {
390 tags_corners = detector.getTagsCorners();
391 for (
size_t i = 0;
i < 4; ++
i) {
392 std::stringstream ss;
405 if (detector.getNbObjects() == 1) {
407 std::vector<vpFeaturePoint> p_d(4);
410 for (
size_t i = 0;
i < 4; ++
i) {
413 std::stringstream ss;
421 for (
size_t i = 0;
i < point.size(); ++
i) {
423 point[
i].changeFrame(cd_M_o, c_P);
424 p_d[
i].set_Z(c_P[2]);
426 if (save_desired_features(desired_features_filename, p_d)) {
427 std::cout <<
"Desired visual features saved in: " << desired_features_filename << std::endl;
430 std::cout <<
"Error: Unable to save desired features in " << desired_features_filename << std::endl;
435 std::cout <<
"Cannot save desired features. More than 1 tag is visible in the image..." << std::endl;
439 std::cout <<
"Cannot save desired features. Tag is not visible in the image..." << std::endl;
457 std::vector<vpFeaturePoint> p_d;
460 std::cout <<
"Cannot start eye-to-hand visual-servoing. Desired features are not available." << std::endl;
461 std::cout <<
"use --learn-desired-features flag to learn the desired features." << std::endl;
465 if (!read_desired_features(desired_features_filename, p_d)) {
466 std::cout <<
"Error: Unable to read desired features from: " << desired_features_filename << std::endl;
475 if (!opt_rMc_filename.empty()) {
476 r_P_c.
loadYAML(opt_rMc_filename, r_P_c);
479 std::cout <<
"Warning, rMc transformation is not specified using --rMc parameter." << std::endl;
483 std::cout <<
"r_M_c:\n" << r_M_c << std::endl;
488 robot.connect(opt_robot_ip);
491 std::vector<vpFeaturePoint>
p(4);
495 for (
size_t i = 0;
i <
p.size(); ++
i) {
496 task.addFeature(p[i], p_d[i]);
501 if (opt_adaptive_gain) {
503 task.setLambda(lambda);
510 task.set_cVf(r_M_c.inverse());
516 plotter =
new vpPlot(2,
static_cast<int>(250 * 2), 500,
static_cast<int>(I.getWidth()) + 80, 10,
517 "Real time curves plotter");
518 plotter->setTitle(0,
"Visual features error");
519 plotter->setTitle(1,
"Joint velocities");
522 plotter->setLegend(0, 0,
"error_feat_p1_x");
523 plotter->setLegend(0, 1,
"error_feat_p1_y");
524 plotter->setLegend(0, 2,
"error_feat_p2_x");
525 plotter->setLegend(0, 3,
"error_feat_p2_y");
526 plotter->setLegend(0, 4,
"error_feat_p3_x");
527 plotter->setLegend(0, 5,
"error_feat_p3_y");
528 plotter->setLegend(0, 6,
"error_feat_p4_x");
529 plotter->setLegend(0, 7,
"error_feat_p4_y");
530 plotter->setLegend(1, 0,
"q_1");
531 plotter->setLegend(1, 1,
"q_2");
532 plotter->setLegend(1, 2,
"q_3");
533 plotter->setLegend(1, 3,
"q_4");
534 plotter->setLegend(1, 4,
"q_5");
535 plotter->setLegend(1, 5,
"q_6");
536 plotter->setLegend(1, 6,
"q_7");
539 bool final_quit =
false;
540 bool has_converged =
false;
541 bool send_velocities =
false;
542 bool servo_started =
false;
543 std::vector<vpImagePoint> *traj_corners =
nullptr;
550 while (!has_converged && !final_quit) {
557 std::vector<vpHomogeneousMatrix> c_M_o_vec;
559 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
562 std::stringstream ss;
563 ss <<
"Left click to " << (send_velocities ?
"stop the robot" :
"servo the robot") <<
", right click to quit.";
570 if (ret && detector.getNbObjects() == 1) {
571 static bool first_time =
true;
575 std::vector<vpImagePoint> corners = detector.getPolygon(0);
578 for (
size_t i = 0;
i < corners.size(); ++
i) {
583 point[
i].changeFrame(c_M_o, c_P);
596 robot.get_eJe(e_J_e);
599 if (opt_task_sequencing) {
600 if (!servo_started) {
601 if (send_velocities) {
602 servo_started =
true;
609 qdot =
task.computeControlLaw();
614 for (
size_t i = 0;
i < corners.size(); ++
i) {
615 std::stringstream ss;
625 traj_corners =
new std::vector<vpImagePoint>[corners.size()];
628 display_point_trajectory(I, corners, traj_corners);
632 plotter->plot(1, iter_plot, qdot);
637 std::cout <<
"qdot: " << qdot.t() << std::endl;
640 double error =
task.getError().sumSquare();
641 std::stringstream ss;
642 ss <<
"error: " <<
error;
646 std::cout <<
"error: " <<
error << std::endl;
648 if (error < convergence_threshold) {
649 has_converged =
true;
650 std::cout <<
"Servo task has converged" << std::endl;
661 if (!send_velocities) {
669 std::stringstream ss;
679 send_velocities = !send_velocities;
692 std::cout <<
"Stop the robot " << std::endl;
695 if (opt_plot && plotter !=
nullptr) {
701 while (!final_quit) {
716 delete[] traj_corners;
720 std::cout <<
"ViSP exception: " <<
e.what() << std::endl;
721 std::cout <<
"Stop the robot " << std::endl;
723#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
724 if (display !=
nullptr) {
730 catch (
const franka::NetworkException &e) {
731 std::cout <<
"Franka network exception: " <<
e.what() << std::endl;
732 std::cout <<
"Check if you are connected to the Franka robot"
733 <<
" or if you specified the right IP using --ip command line option set by default to 192.168.1.1. "
735#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
736 if (display !=
nullptr) {
742 catch (
const std::exception &e) {
743 std::cout <<
"Franka exception: " <<
e.what() << std::endl;
744#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
745 if (display !=
nullptr) {
752#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
753 if (display !=
nullptr) {
763#if !defined(VISP_HAVE_REALSENSE2)
764 std::cout <<
"Install librealsense-2.x and rebuild ViSP." << std::endl;
766#if !defined(VISP_HAVE_FRANKA)
767 std::cout <<
"Install libfranka and rebuild ViSP." << std::endl;
769#if !defined(VISP_HAVE_PUGIXML)
770 std::cout <<
"Build ViSP with pugixml support enabled." << std::endl;
Adaptive gain computation.
static bool loadYAML(const std::string &filename, vpArray2D< Type > &A, char *header=nullptr)
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
static const vpColor green
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
vpFeaturePoint & buildFrom(const double &x, const double &y, const double &Z)
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
Definition of the vpImage class member functions.
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
@ EYETOHAND_L_cVf_fVe_eJe
XML parser to load and save intrinsic camera parameters.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()