Visual Servoing Platform  version 3.0.1
servoAfma6Points2DCamVelocityEyeToHand.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2017 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * tests the control law
32  * eye-to-hand control
33  * velocity computed in the camera frame
34  *
35  * Authors:
36  * Eric Marchand
37  *
38  *****************************************************************************/
60 #include <visp3/core/vpConfig.h>
61 #include <visp3/core/vpDebug.h> // Debug trace
62 #include <stdlib.h>
63 #include <cmath> // std::fabs
64 #include <limits> // numeric_limits
65 #include <list>
66 #if (defined (VISP_HAVE_AFMA6) && defined (VISP_HAVE_DC1394))
67 
68 #define SAVE 0
69 
70 #include <visp3/sensor/vp1394TwoGrabber.h>
71 #include <visp3/core/vpImage.h>
72 #include <visp3/core/vpImagePoint.h>
73 #include <visp3/core/vpMath.h>
74 #include <visp3/core/vpHomogeneousMatrix.h>
75 #include <visp3/visual_features/vpFeaturePoint.h>
76 #include <visp3/core/vpPoint.h>
77 #include <visp3/vs/vpServo.h>
78 #include <visp3/visual_features/vpFeatureBuilder.h>
79 #include <visp3/robot/vpRobotAfma6.h>
80 #include <visp3/core/vpException.h>
81 #include <visp3/vs/vpServoDisplay.h>
82 #include <visp3/blob/vpDot.h>
83 #include <visp3/vision/vpPose.h>
84 #include <visp3/io/vpImageIo.h>
85 #include <visp3/core/vpDisplay.h>
86 #include <visp3/gui/vpDisplayX.h>
87 #include <visp3/gui/vpDisplayOpenCV.h>
88 #include <visp3/gui/vpDisplayGTK.h>
89 
90 #define L 0.006
91 #define D 0
92 
93 int main()
94 {
95  try
96  {
97  vpServo task ;
98 
99  vpCameraParameters cam ;
101  int i ;
102 
106  g.open(I) ;
107 
108  g.acquire(I) ;
109 
110 #ifdef VISP_HAVE_X11
111  vpDisplayX display(I,100,100,"Current image") ;
112 #elif defined(VISP_HAVE_OPENCV)
113  vpDisplayOpenCV display(I,100,100,"Current image") ;
114 #elif defined(VISP_HAVE_GTK)
115  vpDisplayGTK display(I,100,100,"Current image") ;
116 #endif
117 
118  vpDisplay::display(I) ;
119  vpDisplay::flush(I) ;
120 
121  std::cout << std::endl ;
122  std::cout << "-------------------------------------------------------" << std::endl ;
123  std::cout << " Test program for vpServo " <<std::endl ;
124  std::cout << " Eye-to-hand task control" << std::endl ;
125  std::cout << " Simulation " << std::endl ;
126  std::cout << " task : servo a point " << std::endl ;
127  std::cout << "-------------------------------------------------------" << std::endl ;
128  std::cout << std::endl ;
129 
130  int nbPoint =7 ;
131 
132  vpDot dot[nbPoint] ;
133  vpImagePoint cog;
134 
135  for (i=0 ; i < nbPoint ; i++)
136  {
137  dot[i].initTracking(I) ;
138  dot[i].setGraphics(true) ;
139  dot[i].track(I) ;
140  vpDisplay::flush(I) ;
141  dot[i].setGraphics(false) ;
142  }
143 
144  // Compute the pose 3D model
145  vpPoint point[nbPoint] ;
146  point[0].setWorldCoordinates(-2*L,D, -3*L) ;
147  point[1].setWorldCoordinates(0,D, -3*L) ;
148  point[2].setWorldCoordinates(2*L,D, -3*L) ;
149 
150  point[3].setWorldCoordinates(-L,D,-L) ;
151  point[4].setWorldCoordinates(L,D, -L) ;
152  point[5].setWorldCoordinates(L,D, L) ;
153  point[6].setWorldCoordinates(-L,D, L) ;
154 
155  vpRobotAfma6 robot ;
156  // Update camera parameters
157  robot.getCameraParameters (cam, I);
158 
159  vpHomogeneousMatrix cMo, cdMo ;
160  vpPose pose ;
161  pose.clearPoint() ;
162  for (i=0 ; i < nbPoint ; i++)
163  {
164  cog = dot[i].getCog();
165  double x=0, y=0;
166  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
167  point[i].set_x(x) ;
168  point[i].set_y(y) ;
169  pose.addPoint(point[i]) ;
170  }
171 
172  // compute the initial pose using Dementhon method followed by a non linear
173  // minimisation method
175 
176 
177  std::cout << cMo << std::endl ;
178  cMo.print() ;
179 
180  /*------------------------------------------------------------------
181  -- Learning the desired position
182  -- or reading the desired position
183  ------------------------------------------------------------------
184  */
185  std::cout << " Learning 0/1 " <<std::endl ;
186  char name[FILENAME_MAX] ;
187  sprintf(name,"cdMo.dat") ;
188  int learning ;
189  std::cin >> learning ;
190  if (learning ==1)
191  {
192  // save the object position
193  vpTRACE("Save the location of the object in a file cdMo.dat") ;
194  std::ofstream f(name) ;
195  cMo.save(f) ;
196  f.close() ;
197  exit(1) ;
198  }
199 
200 
201  {
202  vpTRACE("Loading desired location from cdMo.dat") ;
203  std::ifstream f("cdMo.dat") ;
204  cdMo.load(f) ;
205  f.close() ;
206  }
207 
208  vpFeaturePoint p[nbPoint], pd[nbPoint] ;
209 
210  // set the desired position of the point by forward projection using
211  // the pose cdMo
212  for (i=0 ; i < nbPoint ; i++)
213  {
214  vpColVector cP, p ;
215  point[i].changeFrame(cdMo, cP) ;
216  point[i].projection(cP, p) ;
217 
218  pd[i].set_x(p[0]) ;
219  pd[i].set_y(p[1]) ;
220  }
221 
222 
223 
224  //------------------------------------------------------------------
225 
226  vpTRACE("define the task") ;
227  vpTRACE("\t we want an eye-in-hand control law") ;
228  vpTRACE("\t robot is controlled in the camera frame") ;
231 
232 
233  for (i=0 ; i < nbPoint ; i++)
234  {
235  task.addFeature(p[i],pd[i]) ;
236  }
237 
238 
239  vpTRACE("Display task information " ) ;
240  task.print() ;
241 
242 
243  //------------------------------------------------------------------
244 
245 
246  double convergence_threshold = 0.00; //025 ;
248 
249  //-------------------------------------------------------------
250  double error =1 ;
251  unsigned int iter=0 ;
252  vpTRACE("\t loop") ;
254  vpColVector v ; // computed robot velocity
255 
256 
257  // position of the object in the effector frame
258  vpHomogeneousMatrix oMcamrobot ;
259  oMcamrobot[0][3] = -0.05 ;
260 
261  vpImage<vpRGBa> Ic ;
262  int it = 0 ;
263 
264  double lambda_av =0.1;
265  double alpha = 1 ; //1 ;
266  double beta =3 ; //3 ;
267 
268  std::cout << "alpha 0.7" << std::endl;
269  std::cin >> alpha ;
270  std::cout << "beta 5" << std::endl;
271  std::cin >> beta ;
272  std::list<vpImagePoint> Lcog ;
273  vpImagePoint ip;
274  while(error > convergence_threshold)
275  {
276  std::cout << "---------------------------------------------" << iter++ <<std::endl ;
277 
278  g.acquire(I) ;
279  vpDisplay::display(I) ;
280  ip.set_i( 265 );
281  ip.set_j( 150 );
282  vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green) ;
283  ip.set_i( 280 );
284  ip.set_j( 150 );
285  vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green) ;
286  try
287  {
288  for (i=0 ; i < nbPoint ; i++)
289  {
290  dot[i].track(I) ;
291  Lcog.push_back( dot[i].getCog() );
292  }
293  }
294  catch(...)
295  {
296  vpTRACE("Error detected while tracking visual features") ;
297  robot.stopMotion() ;
298  exit(1) ;
299  }
300 
301  // compute the initial pose using a non linear minimisation method
302  pose.clearPoint() ;
303 
304  for (i=0 ; i < nbPoint ; i++)
305  {
306  double x=0, y=0;
307  cog = dot[i].getCog();
308  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
309  point[i].set_x(x) ;
310  point[i].set_y(y) ;
311 
312  vpColVector cP ;
313  point[i].changeFrame(cdMo, cP) ;
314 
315  p[i].set_x(x) ;
316  p[i].set_y(y) ;
317  p[i].set_Z(cP[2]) ;
318 
319  pose.addPoint(point[i]) ;
320 
321  point[i].display(I,cMo,cam, vpColor::green) ;
322  point[i].display(I,cdMo,cam, vpColor::blue) ;
323  }
324  pose.computePose(vpPose::LOWE, cMo) ;
325  vpDisplay::flush(I) ;
326 
328  vpHomogeneousMatrix cMe, camrobotMe ;
329  robot.get_cMe(camrobotMe) ;
330  cMe = cMo *oMcamrobot * camrobotMe ;
331 
332 
333  task.set_cVe(cMe) ;
334 
335  vpMatrix eJe ;
336  robot.get_eJe(eJe) ;
337  task.set_eJe(eJe) ;
338 
339 
340  // Compute the adaptative gain (speed up the convergence)
341  double gain ;
342  if (iter>2)
343  {
344  if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
345  gain = lambda_av ;
346  else
347  {
348  gain = alpha * exp (-beta * ( task.getError() ).sumSquare() ) + lambda_av;
349  }
350  }
351  else gain = lambda_av ;
352  if (SAVE==1)
353  gain = gain/5 ;
354 
355  vpTRACE("%f %f %f %f %f",alpha, beta, lambda_av, ( task.getError() ).sumSquare(), gain) ;
356  task.setLambda(gain) ;
357 
358 
359  v = task.computeControlLaw() ;
360 
361  // display points trajectory
362  for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog)
363  {
365  }
366  vpServoDisplay::display(task,cam,I) ;
368 
369  error = ( task.getError() ).sumSquare() ;
370  std::cout << "|| s - s* || = "<< error<<std::endl ;
371 
372  if (error>7)
373  {
374  vpTRACE("Error detected while tracking visual features") ;
375  robot.stopMotion() ;
376  exit(1) ;
377  }
378 
379  // display the pose
380  // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
381  // display the pose
382  // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
383  if ((SAVE==1) && (iter %3==0))
384  {
385 
386  vpDisplay::getImage(I,Ic) ;
387  sprintf(name,"/tmp/marchand/image.%04d.ppm",it++) ;
388  vpImageIo::write(Ic,name) ;
389  }
390  }
391  v = 0 ;
394  task.kill();
395  }
396  catch (...)
397  {
398  vpERROR_TRACE(" Test failed") ;
399  return 0;
400  }
401 }
402 
403 #else
404 int
405 main()
406 {
407  vpERROR_TRACE("You do not have an afma6 robot or a firewire framegrabber connected to your computer...");
408 }
409 
410 #endif
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:97
void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height) const
Definition: vpAfma6.cpp:1235
void projection(const vpColVector &_cP, vpColVector &_p)
Projection onto the image plane of a point. Input: the 3D coordinates in the camera frame _cP...
Definition: vpPoint.cpp:229
void get_eJe(vpMatrix &_eJe)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
Implementation of an homogeneous matrix and operations on such kind of matrices.
#define vpERROR_TRACE
Definition: vpDebug.h:391
void set_eJe(const vpMatrix &eJe_)
Definition: vpServo.h:460
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, const unsigned int thickness=1)
Definition: vpPoint.cpp:428
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:153
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:512
void print() const
Print the matrix as a pose vector .
void track(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:800
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:496
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
void get_cMe(vpHomogeneousMatrix &_cMe) const
static const vpColor green
Definition: vpColor.h:166
void acquire(vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
void set_y(const double y)
void load(std::ifstream &f)
Control of Irisa&#39;s gantry robot named Afma6.
Definition: vpRobotAfma6.h:210
static const vpColor red
Definition: vpColor.h:163
Class that defines what is a point.
Definition: vpPoint.h:59
void set_x(const double x)
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:368
void open(vpImage< unsigned char > &I)
void set_i(const double ii)
Definition: vpImagePoint.h:163
void kill()
Definition: vpServo.cpp:191
vpImagePoint getCog() const
Definition: vpDot.h:225
Initialize the velocity controller.
Definition: vpRobot.h:68
vpColVector computeControlLaw()
Definition: vpServo.cpp:954
#define vpTRACE
Definition: vpDebug.h:414
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:76
Generic class defining intrinsic camera parameters.
void setLambda(double c)
Definition: vpServo.h:391
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:498
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:138
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
Definition: vpPose.cpp:372
vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
Definition: vpDisplay.cpp:155
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:585
void set_j(const double jj)
Definition: vpImagePoint.h:174
void setWorldCoordinates(const double oX, const double oY, const double oZ)
Definition: vpPoint.cpp:111
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
void setGraphics(const bool activate)
Definition: vpDot.h:359
void set_cVe(const vpVelocityTwistMatrix &cVe_)
Definition: vpServo.h:435
void setFramerate(vp1394TwoFramerateType fps)
void setVideoMode(vp1394TwoVideoModeType videomode)
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:314
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage...
Definition: vpDot.h:116
vpColVector getError() const
Definition: vpServo.h:271
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void save(std::ofstream &f) const
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &_cP)
Definition: vpPoint.cpp:247
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:145
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:222
void initTracking(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:654
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
static const vpColor blue
Definition: vpColor.h:169
void clearPoint()
Definition: vpPose.cpp:130