Fawkes API  Fawkes Development Version
triclops.cpp
1 
2 /***************************************************************************
3  * triclops.cpp - Stereo processor using the TriclopsSDK
4  *
5  * Created: Fri May 18 17:20:31 2007
6  * Copyright 2007 Tim Niemueller [www.niemueller.de]
7  *
8  ****************************************************************************/
9 
10 /* This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version. A runtime exception applies to
14  * this software (see LICENSE.GPL_WRE file mentioned below for details).
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU Library General Public License for more details.
20  *
21  * Read the full text in the LICENSE.GPL_WRE file in the doc directory.
22  */
23 
24 #include <core/exceptions/software.h>
25 #include <core/exceptions/system.h>
26 #include <fvcams/bumblebee2.h>
27 #include <fvstereo/triclops.h>
28 #include <fvutils/base/roi.h>
29 #include <fvutils/color/conversions.h>
30 #include <fvutils/rectification/rectfile.h>
31 #include <fvutils/rectification/rectinfo_lut_block.h>
32 #include <utils/math/angle.h>
33 
34 // PGR Triclops SDK
35 #include <cstdlib>
36 #include <errno.h>
37 #include <math.h>
38 #include <triclops.h>
39 #include <unistd.h>
40 
41 using namespace fawkes;
42 
43 namespace firevision {
44 
45 /// @cond INTERNALS
46 /** Data internal to Triclops stereo processor
47  * This class exists to be able to hide the triclops stuff from the camera
48  * user and not to expose the Triclops SDK headers.
49  */
50 class TriclopsStereoProcessorData
51 {
52 public:
53  TriclopsContext triclops;
54  TriclopsInput input;
55  TriclopsError err;
56  TriclopsImage rectified_image;
57  TriclopsImage16 disparity_image_hires;
58  TriclopsImage disparity_image_lores;
59  bool enable_subpixel_interpolation;
60 };
61 /// @endcond
62 
63 /** @class TriclopsStereoProcessor <fvstereo/triclops.h>
64  * Stereo processing using PGR Triclops SDK.
65  * This class uses the Triclops SDK provided by Point Grey Research along with
66  * the Bumblebee2 cameras.
67  * @author Tim Niemueller
68  */
69 
70 /** Constructor.
71  * This constructor initializes this triclops wrapper to work on a real camera.
72  * @param camera Must be of type Bumblebee2Camera
73  */
74 TriclopsStereoProcessor::TriclopsStereoProcessor(Camera *camera)
75 {
76  _context_file = NULL;
77  buffer_deinterlaced = NULL;
78 
79  bb2 = dynamic_cast<Bumblebee2Camera *>(camera);
80  if (!bb2) {
81  throw TypeMismatchException("Camera is not of type Bumblebee2Camera");
82  }
83  if (!bb2->is_bumblebee2()) {
84  throw TypeMismatchException("Camera is not a Bumblebee 2");
85  }
86 
87  bb2->set_image_number(Bumblebee2Camera::RGB_IMAGE);
88 
89  _width = bb2->pixel_width();
90  _height = bb2->pixel_height();
91 
92  _output_image_width = _width;
93  _output_image_height = _height;
94 
95  create_buffers();
96  try {
97  setup_triclops();
98  } catch (...) {
99  throw;
100  }
101 
102  buffer_rgb = bb2->buffer();
103  buffer_rgb_right = buffer_rgb;
104  buffer_rgb_left = buffer_rgb + colorspace_buffer_size(RGB, _width, _height);
105  buffer_rgb_center = buffer_rgb_left; // wtf? Done so in pgr code
106 }
107 
108 /** Constructor.
109  * With this ctor you can make the triclops wrapper to work on saved images given
110  * the expected image size (of a single image) and the path to the Triclops
111  * context from the used camera.
112  * @param width image width in pixels
113  * @param height image height in pixels
114  * @param context_file Triclops context file
115  */
116 TriclopsStereoProcessor::TriclopsStereoProcessor(unsigned int width,
117  unsigned int height,
118  const char * context_file)
119 {
120  _width = width;
121  _height = height;
122  _output_image_width = _width;
123  _output_image_height = _height;
124  _context_file = strdup(context_file);
125 
126  bb2 = NULL;
127 
128  create_buffers();
129  try {
130  setup_triclops();
131  } catch (Exception &e) {
132  throw;
133  }
134 }
135 
136 /** Create working buffers.
137  * buffer size calculated as: we have RAW16 format, which means two bytes per
138  * pixel, thus total buffer size must be w * h * 2
139  */
140 void
141 TriclopsStereoProcessor::create_buffers()
142 {
143  buffer_green = (unsigned char *)malloc(_width * _height * 2);
144  buffer_yuv_right = malloc_buffer(YUV422_PLANAR, _width, _height);
145  buffer_yuv_left = malloc_buffer(YUV422_PLANAR, _width, _height);
146 
147  if (bb2) {
148  buffer_rgb = bb2->buffer();
149  } else {
150  buffer_rgb = (unsigned char *)malloc(colorspace_buffer_size(RGB, _width, _height) * 2);
151  buffer_deinterlaced = (unsigned char *)malloc(_width * _height * 2);
152  }
153  buffer_rgb_right = buffer_rgb;
154  buffer_rgb_left = buffer_rgb + colorspace_buffer_size(RGB, _width, _height);
155  buffer_rgb_center = buffer_rgb_left; // wtf? Done so in pgr code
156 }
157 
158 void
159 TriclopsStereoProcessor::setup_triclops()
160 {
161  // Internal data
162  data = new TriclopsStereoProcessorData();
163  // Always the same
164  data->input.inputType = TriInp_RGB;
165  data->input.nrows = _height;
166  data->input.ncols = _width;
167  data->input.rowinc = data->input.ncols;
168  /*
169  data->input.u.rgb.red = buffer_yuv_right;
170  data->input.u.rgb.green = buffer_yuv_left;
171  data->input.u.rgb.blue = buffer_yuv_left;
172  */
173  data->input.u.rgb.red = buffer_green;
174  data->input.u.rgb.green = buffer_green + _width * _height;
175  data->input.u.rgb.blue = data->input.u.rgb.green;
176 
177  if (bb2) {
178  try {
179  get_triclops_context_from_camera();
180  } catch (Exception &e) {
181  free(data);
182  throw;
183  }
184  } else {
185  if (!_context_file) {
186  free(data);
187  throw NullPointerException("TriclopsStereoProcessor: You must supply the path "
188  "to a valid BB2 context file");
189  }
190 
191  if (access(_context_file, F_OK | R_OK) != 0) {
192  free(data);
193  throw CouldNotOpenFileException("TriclopsStereoProcessor: Cannot access context file");
194  }
195  data->err = triclopsGetDefaultContextFromFile(&(data->triclops), _context_file);
196  if (data->err != TriclopsErrorOk) {
197  free(data);
198  throw Exception("Fetching Triclops context from camera failed");
199  }
200  }
201 
202  // Set defaults
203  data->enable_subpixel_interpolation = false;
204 
205  triclopsSetSubpixelInterpolation(data->triclops, 0);
206 
207  triclopsSetResolutionAndPrepare(data->triclops, _height, _width, _height, _width);
208 
209  triclopsSetEdgeCorrelation(data->triclops, 1);
210  triclopsSetLowpass(data->triclops, 1);
211  triclopsSetDisparity(data->triclops, 5, 100);
212  triclopsSetEdgeMask(data->triclops, 11);
213  triclopsSetStereoMask(data->triclops, 23);
214  triclopsSetSurfaceValidation(data->triclops, 1);
215  triclopsSetTextureValidation(data->triclops, 0);
216 
217  triclopsSetDisparityMapping(data->triclops, 10, 85);
218  triclopsSetDisparityMappingOn(data->triclops, 1);
219 }
220 
221 /** Destructor. */
222 TriclopsStereoProcessor::~TriclopsStereoProcessor()
223 {
224  // segfaults :-/
225  // triclopsDestroyContext( data->triclops );
226 
227  if (buffer_green != NULL)
228  free(buffer_green);
229  if (buffer_yuv_right != NULL)
230  free(buffer_yuv_right);
231  if (buffer_yuv_left != NULL)
232  free(buffer_yuv_left);
233  if (_context_file != NULL)
234  free(_context_file);
235 
236  if (!bb2) {
237  if (buffer_rgb)
238  free(buffer_rgb);
239  if (buffer_deinterlaced)
240  free(buffer_deinterlaced);
241  }
242 
243  buffer_green = NULL;
244  buffer_rgb = NULL;
245  buffer_yuv_right = NULL;
246  buffer_yuv_left = NULL;
247  _buffer = NULL;
248 
249  delete data;
250 }
251 
252 /** Set raw buffer.
253  * @param raw16_buffer buffer containing the stereo image encoded as BB2 RAW16
254  */
255 void
256 TriclopsStereoProcessor::set_raw_buffer(unsigned char *raw16_buffer)
257 {
258  buffer_raw16 = raw16_buffer;
259 }
260 
261 /** Set the resolution of the output images.
262  * @param width the width of the output images
263  * @param height the height of the output images
264  */
265 void
266 TriclopsStereoProcessor::set_output_image_size(unsigned int width, unsigned int height)
267 {
268  data->err = triclopsSetResolutionAndPrepare(data->triclops, height, width, _height, _width);
269 
270  if (data->err == TriclopsErrorOk) {
271  _output_image_width = width;
272  _output_image_height = height;
273  }
274 }
275 
276 /** Enable or disable subpixel interpolation
277  * @param enabled true to enable, false to disable
278  */
279 void
280 TriclopsStereoProcessor::set_subpixel_interpolation(bool enabled)
281 {
282  data->enable_subpixel_interpolation = enabled;
283  triclopsSetSubpixelInterpolation(data->triclops, enabled);
284 }
285 
286 /** Enable or disable edge correlation.
287  * @param enabled true to enable, false to disable
288  */
289 void
290 TriclopsStereoProcessor::set_edge_correlation(bool enabled)
291 {
292  triclopsSetEdgeCorrelation(data->triclops, enabled);
293 }
294 
295 /** Enable or disable lowpass filtering before rectification.
296  * @param enabled true to enable, false to disable
297  */
298 void
299 TriclopsStereoProcessor::set_lowpass(bool enabled)
300 {
301  triclopsSetLowpass(data->triclops, enabled);
302 }
303 
304 /** Set disparity range.
305  * @param min minimum disparity
306  * @param max maximum disparity
307  */
308 void
309 TriclopsStereoProcessor::set_disparity_range(int min, int max)
310 {
311  triclopsSetDisparity(data->triclops, min, max);
312 }
313 
314 /** Set edge mask.
315  * Size of the kernel used for edge detection.
316  * This value must be in the range [3..13].
317  * @param mask_size mask size
318  */
319 void
320 TriclopsStereoProcessor::set_edge_masksize(unsigned int mask_size)
321 {
322  triclopsSetEdgeMask(data->triclops, mask_size);
323 }
324 
325 /** Set stereo mask.
326  * Size of the mask used for stereo correlation.
327  * @param mask_size mask size
328  */
329 void
330 TriclopsStereoProcessor::set_stereo_masksize(unsigned int mask_size)
331 {
332  triclopsSetStereoMask(data->triclops, mask_size);
333 }
334 
335 /** Enable or disable surface validation.
336  * @param enabled true to enable, false to disable
337  */
338 void
339 TriclopsStereoProcessor::set_surface_validation(bool enabled)
340 {
341  triclopsSetSurfaceValidation(data->triclops, enabled);
342 }
343 
344 /** Enable or disable texture validation.
345  * @param enabled true to enable, false to disable
346  */
347 void
348 TriclopsStereoProcessor::set_texture_validation(bool enabled)
349 {
350  triclopsSetTextureValidation(data->triclops, enabled);
351 }
352 
353 /** Set disparity mapping range.
354  * @param min minimum disparity
355  * @param max maximum disparity
356  */
357 void
358 TriclopsStereoProcessor::set_disparity_mapping_range(unsigned char min, unsigned char max)
359 {
360  triclopsSetDisparityMapping(data->triclops, min, max);
361 }
362 
363 /** Enable or disable disparity mapping.
364  * @param enabled true to enable, false to disable
365  */
366 void
367 TriclopsStereoProcessor::set_disparity_mapping(bool enabled)
368 {
369  triclopsSetDisparityMappingOn(data->triclops, enabled);
370 }
371 
372 /** Check state of subpixel interpolation
373  * @return true if enabled, false otherwise
374  */
375 bool
376 TriclopsStereoProcessor::subpixel_interpolation()
377 {
378  TriclopsBool on;
379  triclopsGetSubpixelInterpolation(data->triclops, &on);
380  return on;
381 }
382 
383 /** Get width of ouput images.
384  * @return width of output images
385  */
386 unsigned int
387 TriclopsStereoProcessor::output_image_width()
388 {
389  return _output_image_width;
390 }
391 
392 /** Get height of ouput images.
393  * @return height of output images
394  */
395 unsigned int
396 TriclopsStereoProcessor::output_image_height()
397 {
398  return _output_image_height;
399 }
400 
401 /** Check state of edge correlation.
402  * @return true if enabled, false otherwise
403  */
404 bool
405 TriclopsStereoProcessor::edge_correlation()
406 {
407  TriclopsBool on;
408  triclopsGetEdgeCorrelation(data->triclops, &on);
409  return on;
410 }
411 
412 /** Check state of lowpass filtering.
413  * @return true if enabled, false otherwise
414  */
415 bool
416 TriclopsStereoProcessor::lowpass()
417 {
418  TriclopsBool on;
419  triclopsGetLowpass(data->triclops, &on);
420  return on;
421 }
422 
423 /** Get disparity range min value.
424  * @return disparity range min value
425  */
426 int
427 TriclopsStereoProcessor::disparity_range_min()
428 {
429  int min, max;
430  triclopsGetDisparity(data->triclops, &min, &max);
431  return min;
432 }
433 
434 /** Get disparity range max value.
435  * @return disparity range max value
436  */
437 int
438 TriclopsStereoProcessor::disparity_range_max()
439 {
440  int min, max;
441  triclopsGetDisparity(data->triclops, &min, &max);
442  return max;
443 }
444 
445 /** Get edge mask size.
446  * @return size of the edge mask
447  */
448 unsigned int
449 TriclopsStereoProcessor::edge_masksize()
450 {
451  int mask_size = 0;
452  triclopsGetEdgeMask(data->triclops, &mask_size);
453  return mask_size;
454 }
455 
456 /** Get stereo mask size.
457  * @return size of the stereo mask
458  */
459 unsigned int
460 TriclopsStereoProcessor::stereo_masksize()
461 {
462  int mask_size = 0;
463  triclopsGetStereoMask(data->triclops, &mask_size);
464  return mask_size;
465 }
466 
467 /** Check state of surface validation.
468  * @return true if enabled, false otherwise
469  */
470 bool
471 TriclopsStereoProcessor::surface_validation()
472 {
473  TriclopsBool on;
474  triclopsGetSurfaceValidation(data->triclops, &on);
475  return on;
476 }
477 
478 /** Check state of texture validation.
479  * @return true if enabled, false otherwise
480  */
481 bool
482 TriclopsStereoProcessor::texture_validation()
483 {
484  TriclopsBool on;
485  triclopsGetTextureValidation(data->triclops, &on);
486  return on;
487 }
488 
489 /** Get disparity mapping min value.
490  * @return min value for disparity mapping
491  */
492 unsigned char
493 TriclopsStereoProcessor::disparity_mapping_min()
494 {
495  unsigned char min, max;
496  triclopsGetDisparityMapping(data->triclops, &min, &max);
497  return min;
498 }
499 
500 /** Get disparity mapping max value.
501  * @return max value for disparity mapping
502  */
503 unsigned char
504 TriclopsStereoProcessor::disparity_mapping_max()
505 {
506  unsigned char min, max;
507  triclopsGetDisparityMapping(data->triclops, &min, &max);
508  return max;
509 }
510 
511 /** Check state of disparity mapping
512  * @return true if enabled, false otherwise
513  */
514 bool
515 TriclopsStereoProcessor::disparity_mapping()
516 {
517  TriclopsBool on;
518  triclopsGetDisparityMappingOn(data->triclops, &on);
519  return on;
520 }
521 
522 void
523 TriclopsStereoProcessor::preprocess_stereo()
524 {
525  if (bb2) {
526  bb2->deinterlace_stereo();
527  bb2->decode_bayer();
528  } else {
529  Bumblebee2Camera::deinterlace_stereo(buffer_raw16, buffer_deinterlaced, _width, _height);
530  Bumblebee2Camera::decode_bayer(
531  buffer_deinterlaced, buffer_rgb, _width, _height, BAYER_PATTERN_BGGR);
532  }
533 }
534 
535 void
536 TriclopsStereoProcessor::calculate_yuv(bool both)
537 {
538  // RGB -> YUV
539  convert(RGB, YUV422_PLANAR, buffer_rgb_right, buffer_yuv_right, _width, _height);
540  if (both) {
541  convert(RGB, YUV422_PLANAR, buffer_rgb_left, buffer_yuv_left, _width, _height);
542  }
543 }
544 
545 void
546 TriclopsStereoProcessor::calculate_disparity(ROI *roi)
547 {
548  TriclopsROI *rois;
549  int max_rois;
550 
551  if (NULL != roi) {
552  if (TriclopsErrorOk == triclopsGetROIs(data->triclops, &rois, &max_rois)) {
553  // assume there is always at least one ROI
554  rois[0].col = roi->start.x;
555  rois[0].row = roi->start.y;
556  rois[0].ncols = roi->width;
557  rois[0].nrows = roi->height;
558 
559  triclopsSetNumberOfROIs(data->triclops, 1);
560  } else {
561  triclopsSetNumberOfROIs(data->triclops, 0);
562  }
563  } else {
564  triclopsSetNumberOfROIs(data->triclops, 0);
565  }
566 
567  // now deinterlace the RGB Buffer
568  deinterlace_green(buffer_rgb, buffer_green, _width, 6 * _height);
569 
570  // rectify
571  if ((data->err = triclopsRectify(data->triclops, &(data->input))) != TriclopsErrorOk) {
572  throw Exception("Rectifying the image failed");
573  }
574 
575  // disparity
576  if ((data->err = triclopsStereo(data->triclops)) != TriclopsErrorOk) {
577  throw Exception("Calculating the disparity image failed");
578  }
579 
580  triclopsGetImage(data->triclops, TriImg_RECTIFIED, TriCam_REFERENCE, &(data->rectified_image));
581 
582  if (data->enable_subpixel_interpolation) {
583  triclopsGetImage16(data->triclops,
584  TriImg16_DISPARITY,
585  TriCam_REFERENCE,
586  &(data->disparity_image_hires));
587  } else {
588  triclopsGetImage(data->triclops,
589  TriImg_DISPARITY,
590  TriCam_REFERENCE,
591  &(data->disparity_image_lores));
592  }
593  /*
594  if ( data->enable_subpixel_interpolation ) {
595  if ( data->disparity_image_hires.data[640 * 240 + 320] < 0xFF00 ) {
596  float x, y, z;
597  triclopsRCD16ToXYZ(data->triclops, _height, _width, data->disparity_image_hires.data[640 * 240 + 320], &x, &y, &z);
598  cout << "Pixel coordinate: (x, y, z) = (" << x << "," << y << "," << z << ")" << endl;
599  } else {
600  cout << "Pixel has invalid disparity value" << endl;
601  }
602  if ( ! done ) {
603  triclopsSaveImage16( &(data->disparity_image_hires), "disparity.pgm" );
604  triclopsSetDisparityMappingOn( data->triclops, 0 );
605  done = true;
606  }
607  } else {
608  if ( (data->disparity_image_lores.data + data->disparity_image_lores.rowinc * 240 + 320) != 0 ) {
609  float x, y, z;
610  triclopsRCD8ToXYZ(data->triclops, 240, 320, *(data->disparity_image_lores.data + data->disparity_image_lores.rowinc * 240 + 320), &x, &y, &z);
611  cout << "Pixel coordinate: (x, y, z) = (" << x << "," << y << "," << z << ")" << endl;
612  } else {
613  cout << "Pixel has invalid disparity value" << endl;
614  }
615  if ( ! done ) {
616  PNMWriter pnm(PNM_PGM, "disparity_own.pgm", _width, _height);
617  pnm.set_buffer(YUV422_PLANAR, data->disparity_image_lores.data);
618  pnm.write();
619  triclopsSaveImage( &(data->disparity_image_lores), "disparity_lores.pgm" );
620  triclopsSetDisparityMappingOn( data->triclops, 0 );
621  done = true;
622  }
623  }
624  */
625 }
626 
627 /** Get the disparity image buffer.
628  * Access method to the disparity image buffer.
629  * @return pointer to the internal disparity image buffer
630  */
631 unsigned char *
632 TriclopsStereoProcessor::disparity_buffer()
633 {
634  if (data->enable_subpixel_interpolation) {
635  return (unsigned char *)data->disparity_image_hires.data;
636  } else {
637  return data->disparity_image_lores.data;
638  }
639 }
640 
641 size_t
642 TriclopsStereoProcessor::disparity_buffer_size() const
643 {
644  if (data->enable_subpixel_interpolation) {
645  return _width * _height * 2;
646  } else {
647  return _width * _height;
648  }
649 }
650 
651 unsigned char *
652 TriclopsStereoProcessor::yuv_buffer_left()
653 {
654  return buffer_yuv_right;
655 }
656 
657 unsigned char *
658 TriclopsStereoProcessor::yuv_buffer_right()
659 {
660  return buffer_yuv_left;
661 }
662 
663 /** Get Triclops context.
664  * This retrieves calibration information from the camera and stores it into a
665  * temporary file. With this file the Triclops context is initialized. Afterwards
666  * the temporary file is removed.
667  */
668 void
669 TriclopsStereoProcessor::get_triclops_context_from_camera()
670 {
671  char *tmpname = (char *)malloc(strlen("triclops_cal_XXXXXX") + 1);
672  strcpy(tmpname, "triclops_cal_XXXXXX");
673  char *tmpfile = mktemp(tmpname);
674  bb2->write_triclops_config_from_camera_to_file(tmpfile);
675 
676  data->err = triclopsGetDefaultContextFromFile(&(data->triclops), tmpfile);
677  if (data->err != TriclopsErrorOk) {
678  free(tmpfile);
679  throw Exception("Fetching Triclops context from camera failed");
680  }
681  unlink(tmpfile);
682  free(tmpfile);
683 }
684 
685 /** Deinterlace green buffer.
686  * Method used in stereo processing. Following the PTGrey example, seems useless
687  * if we have YUV planar and thus grey images anyway.
688  * @param src source buffer
689  * @param dest destination buffer
690  * @param width width of the image
691  * @param height height of the image
692  */
693 void
694 TriclopsStereoProcessor::deinterlace_green(unsigned char *src,
695  unsigned char *dest,
696  unsigned int width,
697  unsigned int height)
698 {
699  register int i = (width * height) - 2;
700  register int g = ((width * height) / 3) - 1;
701 
702  while (i >= 0) {
703  dest[g--] = src[i -= 3];
704  }
705 }
706 
707 /** Get camera-relative coordinates of a point.
708  * Use this method to get the coordinates in the camera coordinate system of a given
709  * point in the image. It may not be possible to provide such a coordinate if no valid
710  * disparity information could be calculated for the given point.
711  * @param px x coordinate in image
712  * @param py y coordinate in image
713  * @param x contains the x coordinate in the camera-relative coordinate system upon
714  * successful return
715  * @param y contains the y coordinate in the camera-relative coordinate system upon
716  * successful return
717  * @param z contains the z coordinate in the camera-relative coordinate system upon
718  * successful return
719  * @return true, if valid information could be retrieved. In that case (x, y, z) is filled
720  * with the coordinates, false otherwise (x, y, and z are not modified).
721  */
722 bool
723 TriclopsStereoProcessor::get_xyz(unsigned int px, unsigned int py, float *x, float *y, float *z)
724 {
725  if (data->enable_subpixel_interpolation) {
726  unsigned short disparity = data->disparity_image_hires.data[_width * py + px];
727  if (disparity < 0xFF00) {
728  triclopsRCD16ToXYZ(data->triclops, py, px, disparity, x, y, z);
729  return true;
730  }
731  } else {
732  unsigned char disparity = data->disparity_image_lores.data[_width * py + px];
733  if (disparity != 0) {
734  triclopsRCD8ToXYZ(data->triclops, py, px, disparity, x, y, z);
735  return true;
736  }
737  }
738 
739  return false;
740 }
741 
742 /** Get transformed coordinates of a point.
743  * Use this method to get the coordinates in the transformed coordinate system of a given
744  * point in the image. It may not be possible to provide such a coordinate if no valid
745  * disparity information could be calculated for the given point.
746  * @param px x coordinate in image
747  * @param py y coordinate in image
748  * @param x contains the x coordinate in the camera-relative coordinate system upon
749  * successful return
750  * @param y contains the y coordinate in the camera-relative coordinate system upon
751  * successful return
752  * @param z contains the z coordinate in the camera-relative coordinate system upon
753  * successful return
754  * @return true, if valid information could be retrieved. In that case (x, y, z) is filled
755  * with the coordinates, false otherwise (x, y, and z are not modified).
756  */
757 bool
758 TriclopsStereoProcessor::get_world_xyz(unsigned int px,
759  unsigned int py,
760  float * x,
761  float * y,
762  float * z)
763 {
764  float cam_angle = deg2rad(57);
765  float trans_x = -0.1;
766  float trans_y = 0.05;
767  float trans_z = -0.78;
768  float tx, ty, tz;
769  if (get_xyz(px, py, &tx, &ty, &tz)) {
770  /* transform around x axis
771  *x = tx;
772  *y = cos(cam_angle) * ty + sin(cam_angle) * tz;
773  *z = -sin(cam_angle) * ty + cos(cam_angle) * tz;
774  */
775  float x1, y1, z1, x2, y2, z2, x3, y3, z3;
776  x1 = tx;
777  y1 = cos(cam_angle) * ty + sin(cam_angle) * tz;
778  z1 = -sin(cam_angle) * ty + cos(cam_angle) * tz;
779  // cout << "Transform 1: (" << tx << "," << ty << "," << tz << ") -> (" << x1 << "," << y1 << "," << z1 << ")" << endl;
780  // *x = y1;
781  // *y = cos(cam_angle) * x1 + sin(cam_angle) * z1;
782  // *z = -sin(cam_angle) * x1 + cos(cam_angle) * z1;
783  x2 = y1;
784  y2 = x1;
785  z2 = z1;
786  // cout << "Transform 2: (" << x1 << "," << y1 << "," << z1 << ") -> (" << x2 << "," << y2 << "," << z2 << ")" << endl;
787 
788  x3 = z2;
789  y3 = y2;
790  z3 = x2;
791  // cout << "Transform 3: (" << x2 << "," << y2 << "," << z2 << ") -> (" << x3 << "," << y3 << "," << z3 << ")" << endl;
792 
793  *x = x3 + trans_x;
794  *y = y3 + trans_y;
795  *z = z3 + trans_z;
796 
797  // cout << "Transform 4: (" << x3 << "," << y3 << "," << z3 << ") -> (" << *x << "," << *y << "," << *z << ")" << endl;
798 
799  /*
800  *x = ty + trans_x;
801  *y = -sin(cam_angle) * tx + cos(cam_angle) * tz + trans_y;
802  *z = cos(cam_angle) * tx + sin(cam_angle) * tz + trans_z;
803  */
804  return true;
805  } else {
806  return false;
807  }
808 }
809 
810 /** Generate rectification LUT.
811  * This will generate a lookup table that can be used for fast rectification of
812  * an image. The lut will be of the dimensions of the currently specified image
813  * (either given to the offline context file constructor or as defined by the supplied
814  * BB2 camera). The file will use RectificationFile to write two RectificationLookupTable
815  * entities, one for each image.
816  * @param lut_file name of the file to write to. The file will be created if
817  * it does not exist and truncated otherwise. The directory where the file has to
818  * be stored has to exist.
819  */
820 void
821 TriclopsStereoProcessor::generate_rectification_lut(const char *lut_file)
822 {
823  uint64_t guid = 0;
824  const char *model;
825 
826  if (bb2) {
827  guid = bb2->guid();
828  model = bb2->model();
829  } else {
830  int serial_no;
831  triclopsGetSerialNumber(data->triclops, &serial_no);
832  guid = 0xFFFFFFFF;
833  guid <<= 32;
834  guid |= serial_no;
835 
836  model = "Bumblebee2";
837  }
838 
839  RectificationInfoFile *rif = new RectificationInfoFile(guid, model);
840 
841  RectificationLutInfoBlock *lib_left =
842  new RectificationLutInfoBlock(_width, _height, FIREVISION_RECTINFO_CAMERA_LEFT);
843  RectificationLutInfoBlock *lib_right =
844  new RectificationLutInfoBlock(_width, _height, FIREVISION_RECTINFO_CAMERA_RIGHT);
845 
846  register float row, col;
847  for (unsigned int h = 0; h < _height; ++h) {
848  for (unsigned int w = 0; w < _width; ++w) {
849  if (triclopsUnrectifyPixel(data->triclops, TriCam_LEFT, h, w, &row, &col)
850  != TriclopsErrorOk) {
851  throw Exception("Failed to get unrectified position from Triclops SDK");
852  }
853  lib_left->set_mapping(w, h, (int)roundf(col), (int)roundf(row));
854 
855  if (triclopsUnrectifyPixel(data->triclops, TriCam_RIGHT, h, w, &row, &col)
856  != TriclopsErrorOk) {
857  throw Exception("Failed to get unrectified position from Triclops SDK");
858  }
859  lib_right->set_mapping(w, h, (int)roundf(col), (int)roundf(row));
860  }
861  }
862 
863  rif->add_rectinfo_block(lib_left);
864  rif->add_rectinfo_block(lib_right);
865 
866  rif->write(lut_file);
867 
868  delete rif;
869 }
870 
871 /** Verify rectification LUT.
872  * @param lut_file Rectification LUT to verify
873  * @return true if the LUT matches the current camera/context file, false otherwise.
874  */
875 bool
876 TriclopsStereoProcessor::verify_rectification_lut(const char *lut_file)
877 {
879  rif->read(lut_file);
880 
881  if (bb2) {
882  if (!bb2->verify_guid(rif->guid())) {
883  return false;
884  }
885  } else {
886  int serial_no;
887  uint64_t guid = 0;
888  triclopsGetSerialNumber(data->triclops, &serial_no);
889  guid = 0xFFFFFFFF;
890  guid <<= 32;
891  guid |= serial_no;
892 
893  if (rif->guid() != guid) {
894  return false;
895  }
896  }
897 
898  if (rif->num_blocks() != 2) {
899  printf("Insufficient blocks, we only have %zu\n", rif->num_blocks());
900  return false;
901  }
902 
903  bool left_ok = false;
904  bool right_ok = false;
905 
907  printf("We have %zu blocks\n", blocks->size());
908  RectificationInfoFile::RectInfoBlockVector::const_iterator i;
909  for (i = blocks->begin(); (i != blocks->end() && !(left_ok && right_ok)); ++i) {
910  printf("Veryfying block\n");
911  RectificationInfoBlock *rib = *i;
912 
913  if ((rib->camera() != FIREVISION_RECTINFO_CAMERA_LEFT)
914  && (rib->camera() != FIREVISION_RECTINFO_CAMERA_RIGHT)) {
915  continue;
916  }
917 
918  if (rib->type() == FIREVISION_RECTINFO_TYPE_LUT_16x16) {
919  RectificationLutInfoBlock *rlib = dynamic_cast<RectificationLutInfoBlock *>(rib);
920  if (rlib == NULL) {
921  continue;
922  }
923 
924  TriclopsCamera cam;
925  if (rib->camera() == FIREVISION_RECTINFO_CAMERA_LEFT) {
926  cam = TriCam_LEFT;
927  if (left_ok)
928  continue;
929  } else {
930  cam = TriCam_RIGHT;
931  if (right_ok)
932  continue;
933  }
934 
935  register float row, col;
936  register uint16_t rx, ry;
937  bool lut_ok = true;
938  for (unsigned int h = 0; (h < _height) && lut_ok; ++h) {
939  for (unsigned int w = 0; w < _width; ++w) {
940  if (triclopsUnrectifyPixel(data->triclops, cam, h, w, &row, &col) != TriclopsErrorOk) {
941  throw Exception("Failed to get unrectified position from Triclops SDK");
942  }
943  rlib->mapping(w, h, &rx, &ry);
944  if ((rx != (int)roundf(col)) || (ry != (int)roundf(row))) {
945  printf("Value at (%x,%u) not ok\n", rx, ry);
946  lut_ok = false;
947  break;
948  }
949  }
950  }
951 
952  if (lut_ok) {
953  if (rib->camera() == FIREVISION_RECTINFO_CAMERA_LEFT) {
954  left_ok = true;
955  } else {
956  right_ok = true;
957  }
958  }
959  }
960  }
961  delete blocks;
962 
963  delete rif;
964  return (left_ok && right_ok);
965 }
966 
967 /**Calculates all three cartesian coordinates of the entire disparity map
968  * The values transformed are given by the window (specified by hoff, voff, width and height). settings contains all further information needed (in that order): the angle of the camera, the position of the camera relativ to the robot: x, y, z and the maximum distance of points allowed. Points filtered out by distance are marked as NaN in all three coordinates. Unknown Regions are marked the same way with INFINITY.
969  * @param buffer buffer for the coordinates, 1st index 0: x; 1: y; 2:z, 2nd index denotes the lines, 3rd index denotes the columns
970  * @param hoff horizontal offset of the window
971  * @param voff vertical offset of the window
972  * @param width width of the window
973  * @param height height of the window
974  * @param settings a vector of settings in floating point format (angle,position of camera x, y, z, maximum distance of points)
975  */
976 void
977 TriclopsStereoProcessor::getall_world_xyz(float ***buffer,
978  int hoff,
979  int voff,
980  int width,
981  int height,
982  float * settings)
983 {
984  float fx, fy, fz, fed, rho;
985  int displine, /*zline,*/ px, py;
986 
987  float **x = buffer[0], **y = buffer[1], **z = buffer[2];
988 
989  const float dnc = NAN;
990  const float ur = INFINITY;
991  const float cos_angle = cos(deg2rad(settings[0]));
992  const float sin_angle = sin(deg2rad(settings[0]));
993  const float trans_x = settings[1];
994  const float trans_y = settings[2];
995  const float trans_z = settings[3];
996  const float mqd = settings[4] * settings[4];
997 
998  if (data->enable_subpixel_interpolation) {
999  unsigned short *disp = data->disparity_image_hires.data;
1000 
1001  for (py = 0; py < height; py++) {
1002  displine = (py + voff) * _width;
1003  //zline = py * width;
1004  for (px = 0; px < width; px++) {
1005  if (disp[displine + px + hoff] >= 0xFF00) {
1006  z[py][px] = x[py][px] = y[py][px] = ur;
1007  } else {
1008  triclopsRCD16ToXYZ(
1009  data->triclops, py + voff, px + hoff, disp[displine + px + hoff], &fx, &fy, &fz);
1010  fed = x[py][px] = -sin_angle * fy + cos_angle * fz + trans_x;
1011  fz = z[py][px] = cos_angle * fy + sin_angle * fz + trans_z;
1012  fy = y[py][px] = fx + trans_y;
1013  fx = fed;
1014  if (fz > 0.0f) {
1015  rho = trans_z / (fz - trans_z);
1016  x[py][px] = fx = fx - rho * (fx - trans_x);
1017  y[py][px] = fy = fy - rho * (fy - trans_y);
1018  }
1019  fed = fx * fx + fy * fy;
1020  if (fed > mqd) {
1021  z[py][px] = x[py][px] = y[py][px] = dnc;
1022  }
1023  }
1024  }
1025  }
1026  } else {
1027  unsigned char *disp = data->disparity_image_lores.data;
1028 
1029  for (py = 0; py < height; py++) {
1030  displine = (py + voff) * _width;
1031  //zline = py * width;
1032  for (px = 0; px < width; px++) {
1033  if (disp[displine + px + hoff] == 0) {
1034  z[py][px] = x[py][px] = y[py][px] = ur;
1035  } else {
1036  triclopsRCD8ToXYZ(
1037  data->triclops, py + voff, px + hoff, disp[displine + px + hoff], &fx, &fy, &fz);
1038  fed = x[py][px] = -sin_angle * fy + cos_angle * fz + trans_x;
1039  fz = z[py][px] = cos_angle * fy + sin_angle * fz + trans_z;
1040  fy = y[py][px] = fx + trans_y;
1041  fx = fed;
1042  if (fz > 0.0f) {
1043  rho = trans_z / (fz - trans_z);
1044  x[py][px] = fx = fx - rho * (fx - trans_x);
1045  y[py][px] = fy = fy - rho * (fy - trans_y);
1046  }
1047  fed = fx * fx + fy * fy;
1048  if (fed > mqd) {
1049  z[py][px] = x[py][px] = y[py][px] = dnc;
1050  }
1051  }
1052  }
1053  }
1054  }
1055 }
1056 
1057 } // end namespace firevision
firevision::ROI::width
unsigned int width
ROI width.
Definition: roi.h:117
firevision::RectificationInfoFile
Rectification Info File.
Definition: rectfile.h:37
firevision::RectificationLutInfoBlock
Recitification Lookup Table Block.
Definition: rectinfo_lut_block.h:32
firevision::RectificationInfoFile::read
virtual void read(const char *filename)
Read file.
Definition: rectfile.cpp:146
firevision::RectificationInfoBlock::camera
uint8_t camera() const
Get block camera identifier.
Definition: rectinfo_block.cpp:113
firevision::ROI
Region of interest.
Definition: roi.h:55
firevision::RectificationLutInfoBlock::mapping
virtual void mapping(uint16_t x, uint16_t y, uint16_t *to_x, uint16_t *to_y)
Get mapping (to_x, to_y) for (x, y).
Definition: rectinfo_lut_block.cpp:73
firevision::FireVisionDataFileBlock::type
unsigned int type() const
Get block type.
Definition: fvfile_block.cpp:179
firevision::RectificationInfoBlock
Rectification info block.
Definition: rectinfo_block.h:34
firevision::ROI::height
unsigned int height
ROI height.
Definition: roi.h:119
firevision::FireVisionDataFile::num_blocks
size_t num_blocks()
Get the number of available info blocks.
Definition: fvfile.cpp:216
fawkes::CouldNotOpenFileException
File could not be opened.
Definition: system.h:53
firevision::RectificationInfoFile::add_rectinfo_block
void add_rectinfo_block(RectificationInfoBlock *block)
Add a rectification info block.
Definition: rectfile.cpp:119
firevision::RectificationInfoFile::RectInfoBlockVector
Vector that is used for maintaining the rectification info blocks.
Definition: rectfile.h:48
fawkes::TypeMismatchException
Type mismatch.
Definition: software.h:44
fawkes
Fawkes library namespace.
firevision::FireVisionDataFile::write
virtual void write(const char *file_name)
Write file.
Definition: fvfile.cpp:243
fawkes::upoint_t::y
unsigned int y
y coordinate
Definition: types.h:37
firevision::RectificationLutInfoBlock::set_mapping
void set_mapping(uint16_t x, uint16_t y, uint16_t to_x, uint16_t to_y)
Set mapping.
Definition: rectinfo_lut_block.cpp:93
firevision::Bumblebee2Camera
Bumblebee2 camera.
Definition: bumblebee2.h:35
fawkes::deg2rad
float deg2rad(float deg)
Convert an angle given in degrees to radians.
Definition: angle.h:36
firevision::RectificationInfoFile::rectinfo_blocks
RectInfoBlockVector * rectinfo_blocks()
Get all rectification info blocks.
Definition: rectfile.cpp:128
firevision::ROI::start
fawkes::upoint_t start
ROI start.
Definition: roi.h:115
fawkes::NullPointerException
A NULL pointer was supplied where not allowed.
Definition: software.h:32
firevision::Camera
Camera interface for image aquiring devices in FireVision.
Definition: camera.h:33
fawkes::upoint_t::x
unsigned int x
x coordinate
Definition: types.h:36
firevision::RectificationInfoFile::guid
uint64_t guid()
Get the GUID of camera.
Definition: rectfile.cpp:99
fawkes::Exception
Base class for exceptions in Fawkes.
Definition: exception.h:36