The open source OpenXR runtime
1// Copyright 2019-2020, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Calibration code.
6 * @author Pete Black <pblack@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @author Rylie Pavlik <rylie.pavlik@collabora.com>
9 * @ingroup aux_tracking
10 */
11
12#include "util/u_sink.h"
13#include "util/u_misc.h"
14#include "util/u_debug.h"
15#include "util/u_frame.h"
16#include "util/u_format.h"
17#include "util/u_logging.h"
18
19#include "tracking/t_tracking.h"
20#include "tracking/t_calibration_opencv.hpp"
21
22#include <opencv2/opencv.hpp>
23#include <sys/stat.h>
24#include <utility>
25#include <stdexcept>
26
27#if CV_MAJOR_VERSION >= 4
28#define SB_CHEESBOARD_CORNERS_SUPPORTED
29#if CV_MINOR_VERSION >= 3 || CV_MAJOR_VERSION > 4
30#define SB_CHEESBOARD_CORNERS_MARKER_SUPPORTED
31#endif
32#endif
33
34
35DEBUG_GET_ONCE_BOOL_OPTION(hsv_filter, "T_DEBUG_HSV_FILTER", false)
36DEBUG_GET_ONCE_BOOL_OPTION(hsv_picker, "T_DEBUG_HSV_PICKER", false)
37DEBUG_GET_ONCE_BOOL_OPTION(hsv_viewer, "T_DEBUG_HSV_VIEWER", false)
38
39namespace xrt::auxiliary::tracking {
40/*
41 *
42 * Structs
43 *
44 */
45
46//! Model of the thing we are measuring to calibrate, 32 bit.
47typedef std::vector<cv::Point3f> ModelF32;
48//! Model of the thing we are measuring to calibrate, 64 bit.
49typedef std::vector<cv::Point3d> ModelF64;
50//! A measurement of the model as viewed on the camera.
51typedef std::vector<cv::Point2f> MeasurementF32;
52//! In doubles, because OpenCV can't agree on a single type to use.
53typedef std::vector<cv::Point2d> MeasurementF64;
54//! For each @ref MeasurementF32 we take we also save the @ref ModelF32.
55typedef std::vector<ModelF32> ArrayOfModelF32s;
56//! For each @ref MeasurementF64 we take we also save the @ref ModelF64.
57typedef std::vector<ModelF64> ArrayOfModelF64s;
58//! A array of @ref MeasurementF32.
59typedef std::vector<MeasurementF32> ArrayOfMeasurementF32s;
60//! A array of @ref MeasurementF64.
61typedef std::vector<MeasurementF64> ArrayOfMeasurementF64s;
62//! A array of bounding rects.
63typedef std::vector<cv::Rect> ArrayOfRects;
64
65/*!
66 * Current state for each view, one view for mono cameras, two for stereo.
67 */
68struct ViewState
69{
70 ArrayOfMeasurementF32s measured_f32 = {};
71 ArrayOfMeasurementF64s measured_f64 = {};
72 ArrayOfRects measured_bounds = {};
73
74 bool last_valid = false;
75 MeasurementF64 last = {};
76
77 MeasurementF64 current_f64 = {};
78 MeasurementF32 current_f32 = {};
79 cv::Rect current_bounds = {};
80
81 cv::Rect pre_rect = {};
82 cv::Rect post_rect = {};
83
84 bool maps_valid = false;
85 cv::Mat map1 = {};
86 cv::Mat map2 = {};
87};
88
89/*!
90 * Main class for doing calibration.
91 */
92class Calibration
93{
94public:
95 struct xrt_frame_sink base = {};
96
97 struct
98 {
99 cv::Mat rgb = {};
100 struct xrt_frame *frame = {};
101 struct xrt_frame_sink *sink = {};
102 } gui;
103
104 struct
105 {
106 ModelF32 model_f32 = {};
107 ModelF64 model_f64 = {};
108 cv::Size dims = {8, 6};
109 enum t_board_pattern pattern = T_BOARD_CHECKERS;
110 float spacing_meters = 0.05;
111 bool marker; //!< Center board marker for sb_checkers.
112 bool normalize_image; //!< For SB checkers.
113 } board;
114
115 struct
116 {
117 ViewState view[2] = {};
118
119 ArrayOfModelF32s board_models_f32 = {};
120 ArrayOfModelF64s board_models_f64 = {};
121
122 uint32_t calibration_count = {};
123 bool calibrated = false;
124
125
126 uint32_t cooldown = 0;
127 uint32_t waited_for = 0;
128 uint32_t collected_of_part = 0;
129 } state;
130
131 struct
132 {
133 bool enabled = false;
134 uint32_t num_images = 20;
135 } load;
136
137 //! Should we use subpixel enhancing for checkerboard.
138 bool subpixel_enable = true;
139 //! What subpixel range for checkerboard enhancement.
140 int subpixel_size = 5;
141
142 //! Number of frames to wait for cooldown.
143 uint32_t num_cooldown_frames = 20;
144 //! Number of frames to wait for before collecting.
145 uint32_t num_wait_for = 5;
146 //! Total number of samples to collect.
147 uint32_t num_collect_total = 20;
148 //! Number of frames to capture before restarting.
149 uint32_t num_collect_restart = 1;
150
151 //! Distortion model to use.
152 enum t_camera_distortion_model distortion_model = T_DISTORTION_OPENCV_RADTAN_5;
153 //! From parameters.
154 bool stereo_sbs = false;
155
156 //! Should we clear the frame.
157 bool clear_frame = false;
158
159 //! Dump all of the measurements to stdout.
160 bool dump_measurements = false;
161
162 //! Should we save images used for capture.
163 bool save_images = false;
164
165 //! Should we mirror the rgb images.
166 bool mirror_rgb_image = false;
167
168 cv::Mat gray = {};
169
170 char text[512] = {};
171
172 t_calibration_status *status;
173};
174
175
176/*
177 *
178 * Small helpers.
179 *
180 */
181
182static void
183to_stdout(const char *name, const cv::Mat &mat)
184{
185 std::cout << name << " " << mat.size() << ":\n" << mat << "\n";
186}
187
188static void
189refresh_gui_frame(class Calibration &c, int rows, int cols)
190{
191 // Also dereferences the old frame.
192 u_frame_create_one_off(XRT_FORMAT_R8G8B8, cols, rows, &c.gui.frame);
193
194 c.gui.rgb = cv::Mat(rows, cols, CV_8UC3, c.gui.frame->data, c.gui.frame->stride);
195}
196
197static void
198send_rgb_frame(class Calibration &c)
199{
200 c.gui.sink->push_frame(c.gui.sink, c.gui.frame);
201
202 refresh_gui_frame(c, c.gui.rgb.rows, c.gui.rgb.cols);
203}
204
205static void
206ensure_buffers_are_allocated(class Calibration &c, int rows, int cols)
207{
208 if (c.gui.rgb.cols == cols && c.gui.rgb.rows == rows) {
209 return;
210 }
211
212 // If our rgb is not allocated but our gray already is, alloc our rgb
213 // now. We will end up in this path if we receive L8 format.
214 if (c.gray.cols == cols && c.gray.rows == rows) {
215 refresh_gui_frame(c, rows, cols);
216 return;
217 }
218
219 c.gray = cv::Mat(rows, cols, CV_8UC1, cv::Scalar(0));
220
221 refresh_gui_frame(c, rows, cols);
222}
223
224static void
225print_txt(cv::Mat &rgb, const char *text, double fontScale)
226{
227 int fontFace = 0;
228 int thickness = 2;
229 cv::Size textSize = cv::getTextSize(text, fontFace, fontScale, thickness, NULL);
230
231 cv::Point textOrg((rgb.cols - textSize.width) / 2, textSize.height * 2);
232
233 cv::putText(rgb, text, textOrg, fontFace, fontScale, cv::Scalar(192, 192, 192), thickness);
234}
235
236static void
237make_gui_str(class Calibration &c)
238{
239 auto &rgb = c.gui.rgb;
240
241 int cols = 800;
242 int rows = 100;
243 ensure_buffers_are_allocated(c, rows, cols);
244
245 cv::rectangle(rgb, cv::Point2f(0, 0), cv::Point2f(cols, rows), cv::Scalar(0, 0, 0), -1, 0);
246
247 print_txt(rgb, c.text, 1.0);
248
249 send_rgb_frame(c);
250}
251
252/*!
253 * Simple helper to draw a bounding rect.
254 */
255static void
256draw_rect(cv::Mat &rgb, const cv::Rect &rect, const cv::Scalar &colour)
257{
258 cv::rectangle(rgb, rect.tl(), rect.br(), colour);
259}
260
261static void
262do_view_coverage(class Calibration &c, struct ViewState &view, cv::Mat &gray, cv::Mat &rgb, bool found)
263{
264 // Get the current bounding rect.
265 view.current_bounds = cv::boundingRect(view.current_f32);
266
267 // Compute our 'pre sample' coverage for this frame,
268 // for display and area threshold checking.
269 std::vector<cv::Point2f> coverage;
270 coverage.reserve(view.measured_bounds.size() * 2 + 2);
271 for (const cv::Rect &brect : view.measured_bounds) {
272 draw_rect(rgb, brect, cv::Scalar(0, 64, 32));
273
274 coverage.emplace_back(brect.tl());
275 coverage.emplace_back(brect.br());
276 }
277
278 // What area of the camera have we calibrated.
279 view.pre_rect = cv::boundingRect(coverage);
280 draw_rect(rgb, view.pre_rect, cv::Scalar(0, 255, 255));
281
282 if (found) {
283 coverage.emplace_back(view.current_bounds.tl());
284 coverage.emplace_back(view.current_bounds.br());
285
286 // New area we cover.
287 view.post_rect = cv::boundingRect(coverage);
288
289 draw_rect(rgb, view.post_rect, cv::Scalar(0, 255, 0));
290 }
291
292 // Draw the checker board, will also draw partial hits.
293 cv::drawChessboardCorners(rgb, c.board.dims, view.current_f32, found);
294}
295
296static bool
297do_view_chess(class Calibration &c, struct ViewState &view, cv::Mat &gray, cv::Mat &rgb)
298{
299 /*
300 * Fisheye requires measurement and model to be double, other functions
301 * requires them to be floats (like cornerSubPix). So we give in
302 * current_f32 here and convert below.
303 */
304
305 int flags = 0;
306 flags += cv::CALIB_CB_FAST_CHECK;
307 flags += cv::CALIB_CB_ADAPTIVE_THRESH;
308 flags += cv::CALIB_CB_NORMALIZE_IMAGE;
309
310 bool found = cv::findChessboardCorners(gray, // Image
311 c.board.dims, // patternSize
312 view.current_f32, // corners
313 flags); // flags
314
315 // Improve the corner positions.
316 if (found && c.subpixel_enable) {
317 int crit_flag = 0;
318 crit_flag |= cv::TermCriteria::EPS;
319 crit_flag |= cv::TermCriteria::COUNT;
320 cv::TermCriteria term_criteria = {crit_flag, 30, 0.1};
321
322 cv::Size size(c.subpixel_size, c.subpixel_size);
323 cv::Size zero(-1, -1);
324
325 cv::cornerSubPix(gray, view.current_f32, size, zero, term_criteria);
326 }
327
328 // Do the conversion here.
329 view.current_f64.clear(); // Doesn't effect capacity.
330 for (const cv::Point2f &p : view.current_f32) {
331 view.current_f64.emplace_back(double(p.x), double(p.y));
332 }
333
334 do_view_coverage(c, view, gray, rgb, found);
335
336 return found;
337}
338
339#ifdef SB_CHEESBOARD_CORNERS_SUPPORTED
340static bool
341do_view_sb_checkers(class Calibration &c, struct ViewState &view, cv::Mat &gray, cv::Mat &rgb)
342{
343 /*
344 * Fisheye requires measurement and model to be double, other functions
345 * requires them to be floats (like cornerSubPix). So we give in
346 * current_f32 here and convert below.
347 */
348
349 int flags = 0;
350 if (c.board.normalize_image) {
351 flags += cv::CALIB_CB_NORMALIZE_IMAGE;
352 }
353
354#ifdef SB_CHEESBOARD_CORNERS_MARKER_SUPPORTED
355 if (c.board.marker) {
356 // Only available in OpenCV 4.3 and newer.
357 flags += cv::CALIB_CB_MARKER;
358 }
359#endif
360
361 bool found = cv::findChessboardCornersSB(gray, // Image
362 c.board.dims, // patternSize
363 view.current_f32, // corners
364 flags); // flags
365
366 // Do the conversion here.
367 view.current_f64.clear(); // Doesn't effect capacity.
368 for (const cv::Point2f &p : view.current_f32) {
369 view.current_f64.emplace_back(double(p.x), double(p.y));
370 }
371
372 do_view_coverage(c, view, gray, rgb, found);
373
374 return found;
375}
376#endif
377
378static bool
379do_view_circles(class Calibration &c, struct ViewState &view, cv::Mat &gray, cv::Mat &rgb)
380{
381 /*
382 * Fisheye requires measurement and model to be double, other functions
383 * requires them to be floats (like drawChessboardCorners). So we give
384 * in current here for highest precision and convert below.
385 */
386
387 int flags = 0;
388 if (c.board.pattern == T_BOARD_ASYMMETRIC_CIRCLES) {
389 flags |= cv::CALIB_CB_ASYMMETRIC_GRID;
390 }
391
392 bool found = cv::findCirclesGrid(gray, // Image
393 c.board.dims, // patternSize
394 view.current_f64, // corners
395 flags); // flags
396
397 // Convert here so that displaying also works.
398 view.current_f32.clear(); // Doesn't effect capacity.
399 for (const cv::Point2d &p : view.current_f64) {
400 view.current_f32.emplace_back(float(p.x), float(p.y));
401 }
402
403 do_view_coverage(c, view, gray, rgb, found);
404
405 return found;
406}
407
408static bool
409do_view(class Calibration &c, struct ViewState &view, cv::Mat &gray, cv::Mat &rgb)
410{
411 bool found = false;
412
413 switch (c.board.pattern) {
414 case T_BOARD_CHECKERS: //
415 found = do_view_chess(c, view, gray, rgb);
416 break;
417#ifdef SB_CHEESBOARD_CORNERS_SUPPORTED
418 case T_BOARD_SB_CHECKERS: //
419 found = do_view_sb_checkers(c, view, gray, rgb);
420 break;
421#endif
422 case T_BOARD_CIRCLES: //
423 found = do_view_circles(c, view, gray, rgb);
424 break;
425 case T_BOARD_ASYMMETRIC_CIRCLES: //
426 found = do_view_circles(c, view, gray, rgb);
427 break;
428 default: assert(false);
429 }
430
431 if (c.mirror_rgb_image) {
432 cv::flip(rgb, rgb, +1);
433 }
434
435 return found;
436}
437
438static void
439remap_view(class Calibration &c, struct ViewState &view, cv::Mat &rgb)
440{
441 if (!view.maps_valid) {
442 return;
443 }
444
445 cv::remap(rgb, // src
446 rgb, // dst
447 view.map1, // map1
448 view.map2, // map2
449 cv::INTER_LINEAR, // interpolation
450 cv::BORDER_CONSTANT, // borderMode
451 cv::Scalar()); // borderValue
452}
453
454static void
455build_board_position(class Calibration &c)
456{
457 int cols_num = c.board.dims.width;
458 int rows_num = c.board.dims.height;
459 float size_meters = c.board.spacing_meters;
460
461 switch (c.board.pattern) {
462 case T_BOARD_CHECKERS:
463 case T_BOARD_SB_CHECKERS:
464 case T_BOARD_CIRCLES:
465 // Nothing to do.
466 break;
467 case T_BOARD_ASYMMETRIC_CIRCLES:
468 // From diagonal size to "square" size.
469 size_meters = sqrt((size_meters * size_meters) / 2.0);
470 break;
471 }
472
473 switch (c.board.pattern) {
474 case T_BOARD_CHECKERS:
475 case T_BOARD_SB_CHECKERS:
476 case T_BOARD_CIRCLES:
477 c.board.model_f32.reserve(rows_num * cols_num);
478 c.board.model_f64.reserve(rows_num * cols_num);
479 for (int i = 0; i < rows_num; ++i) {
480 for (int j = 0; j < cols_num; ++j) {
481 cv::Point3d p = {
482 j * size_meters,
483 i * size_meters,
484 0.0f,
485 };
486 c.board.model_f32.emplace_back(p);
487 c.board.model_f64.emplace_back(p);
488 }
489 }
490 break;
491 case T_BOARD_ASYMMETRIC_CIRCLES:
492 c.board.model_f32.reserve(rows_num * cols_num);
493 c.board.model_f64.reserve(rows_num * cols_num);
494 for (int i = 0; i < rows_num; ++i) {
495 for (int j = 0; j < cols_num; ++j) {
496 cv::Point3d p = {
497 (2 * j + i % 2) * size_meters,
498 i * size_meters,
499 0.0f,
500 };
501 c.board.model_f32.emplace_back(p);
502 c.board.model_f64.emplace_back(p);
503 }
504 }
505 break;
506 }
507}
508
509static void
510push_model(Calibration &c)
511{
512 c.state.board_models_f32.push_back(c.board.model_f32);
513 c.state.board_models_f64.push_back(c.board.model_f64);
514}
515
516static void
517push_measurement(ViewState &view)
518{
519 view.measured_f32.push_back(view.current_f32);
520 view.measured_f64.push_back(view.current_f64);
521 view.measured_bounds.push_back(view.current_bounds);
522}
523
524
525/*!
526 * Returns true if any one of the measurement points have moved.
527 */
528static bool
529has_measurement_moved(MeasurementF64 &last, MeasurementF64 ¤t)
530{
531 if (last.size() != current.size()) {
532 return true;
533 }
534
535 for (size_t i = 0; i < last.size(); ++i) {
536 float x = last[i].x - current[i].x;
537 float y = last[i].y - current[i].y;
538
539 // Distance squard in pixels.
540 if ((x * x + y * y) >= 3.0) {
541 return true;
542 }
543 }
544
545 return false;
546}
547
548static bool
549moved_state_check(struct ViewState &view)
550{
551 bool moved = false;
552 if (view.last_valid) {
553 moved = has_measurement_moved(view.last, view.current_f64);
554 }
555
556 // Now save the current measurement to the last one.
557 view.last = view.current_f64;
558 view.last_valid = true;
559
560 return moved;
561}
562
563/*
564 *
565 * Stereo calibration
566 *
567 */
568
569#define P(...) snprintf(c.text, sizeof(c.text), __VA_ARGS__)
570
571XRT_NO_INLINE static void
572process_stereo_samples(class Calibration &c, int cols, int rows)
573{
574 c.state.calibrated = true;
575
576 cv::Size image_size(cols, rows);
577 cv::Size new_image_size(cols, rows);
578
579 StereoCameraCalibrationWrapper wrapped(c.distortion_model);
580 wrapped.view[0].image_size_pixels.w = image_size.width;
581 wrapped.view[0].image_size_pixels.h = image_size.height;
582 wrapped.view[1].image_size_pixels = wrapped.view[0].image_size_pixels;
583
584
585 float rp_error = 0.0f;
586 if (t_camera_distortion_model_is_opencv_fisheye(c.distortion_model)) {
587 int flags = 0;
588 flags |= cv::fisheye::CALIB_FIX_SKEW;
589 flags |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC;
590
591 // fisheye version
592 rp_error = cv::fisheye::stereoCalibrate(c.state.board_models_f64, // objectPoints
593 c.state.view[0].measured_f64, // inagePoints1
594 c.state.view[1].measured_f64, // imagePoints2
595 wrapped.view[0].intrinsics_mat, // cameraMatrix1
596 wrapped.view[0].distortion_mat, // distCoeffs1
597 wrapped.view[1].intrinsics_mat, // cameraMatrix2
598 wrapped.view[1].distortion_mat, // distCoeffs2
599 image_size, // imageSize
600 wrapped.camera_rotation_mat, // R
601 wrapped.camera_translation_mat, // T
602 flags);
603 } else if (t_camera_distortion_model_is_opencv_non_fisheye(c.distortion_model)) {
604 // non-fisheye version
605 int flags = 0;
606
607 // Insists on 32-bit floats for object points and image points
608 rp_error = cv::stereoCalibrate(c.state.board_models_f32, // objectPoints
609 c.state.view[0].measured_f32, // inagePoints1
610 c.state.view[1].measured_f32, // imagePoints2,
611 wrapped.view[0].intrinsics_mat, // cameraMatrix1
612 wrapped.view[0].distortion_mat, // distCoeffs1
613 wrapped.view[1].intrinsics_mat, // cameraMatrix2
614 wrapped.view[1].distortion_mat, // distCoeffs2
615 image_size, // imageSize
616 wrapped.camera_rotation_mat, // R
617 wrapped.camera_translation_mat, // T
618 wrapped.camera_essential_mat, // E
619 wrapped.camera_fundamental_mat, // F
620 flags); // flags
621 } else {
622 assert(!"Unsupported distortion model");
623 }
624
625 // Tell the user what has happened.
626 P("CALIBRATION DONE RP ERROR %f", rp_error);
627
628 // Preview undistortion/rectification.
629 StereoRectificationMaps maps(wrapped.base);
630 c.state.view[0].map1 = maps.view[0].rectify.remap_x;
631 c.state.view[0].map2 = maps.view[0].rectify.remap_y;
632 c.state.view[0].maps_valid = true;
633
634 c.state.view[1].map1 = maps.view[1].rectify.remap_x;
635 c.state.view[1].map2 = maps.view[1].rectify.remap_y;
636 c.state.view[1].maps_valid = true;
637
638 std::cout << "#####\n";
639 std::cout << "calibration rp_error: " << rp_error << "\n";
640 to_stdout("camera_rotation", wrapped.camera_rotation_mat);
641 to_stdout("camera_translation", wrapped.camera_translation_mat);
642 if (!t_camera_distortion_model_is_fisheye(c.distortion_model)) {
643 to_stdout("camera_essential", wrapped.camera_essential_mat);
644 to_stdout("camera_fundamental", wrapped.camera_fundamental_mat);
645 }
646 to_stdout("disparity_to_depth", maps.disparity_to_depth_mat);
647 std::cout << "#####\n";
648 to_stdout("view[0].distortion", wrapped.view[0].distortion_mat);
649 to_stdout("view[0].intrinsics", wrapped.view[0].intrinsics_mat);
650 to_stdout("view[0].projection", maps.view[0].projection_mat);
651 to_stdout("view[0].rotation", maps.view[0].rotation_mat);
652 std::cout << "#####\n";
653 to_stdout("view[1].distortion", wrapped.view[1].distortion_mat);
654 to_stdout("view[1].intrinsics", wrapped.view[1].intrinsics_mat);
655 to_stdout("view[1].projection", maps.view[1].projection_mat);
656 to_stdout("view[1].rotation", maps.view[1].rotation_mat);
657
658 // Validate that nothing has been re-allocated.
659 assert(wrapped.isDataStorageValid());
660
661 if (c.status != NULL) {
662 t_stereo_camera_calibration_reference(&c.status->stereo_data, wrapped.base);
663 }
664}
665
666static void
667process_view_samples(class Calibration &c, struct ViewState &view, int cols, int rows)
668{
669
670 const cv::Size image_size = {cols, rows};
671 double rp_error = 0.f;
672
673 cv::Mat intrinsics_mat = {};
674 cv::Mat new_intrinsics_mat = {};
675 cv::Mat distortion_mat = {};
676
677 if (c.dump_measurements) {
678 U_LOG_RAW("...measured = (ArrayOfMeasurements){");
679 for (MeasurementF32 &m : view.measured_f32) {
680 U_LOG_RAW(" {");
681 for (cv::Point2f &p : m) {
682 U_LOG_RAW(" {%+ff, %+ff},", p.x, p.y);
683 }
684 U_LOG_RAW(" },");
685 }
686 U_LOG_RAW("};");
687 }
688
689 if (t_camera_distortion_model_is_opencv_fisheye(c.distortion_model)) {
690 int crit_flag = 0;
691 crit_flag |= cv::TermCriteria::EPS;
692 crit_flag |= cv::TermCriteria::COUNT;
693 cv::TermCriteria term_criteria = {crit_flag, 100, DBL_EPSILON};
694
695 int flags = 0;
696 flags |= cv::fisheye::CALIB_FIX_SKEW;
697 flags |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC;
698#if 0
699 flags |= cv::fisheye::CALIB_FIX_PRINCIPAL_POINT;
700#endif
701
702 rp_error = cv::fisheye::calibrate(c.state.board_models_f64, // objectPoints
703 view.measured_f64, // imagePoints
704 image_size, // image_size
705 intrinsics_mat, // K (cameraMatrix 3x3)
706 distortion_mat, // D (distCoeffs 4x1)
707 cv::noArray(), // rvecs
708 cv::noArray(), // tvecs
709 flags, // flags
710 term_criteria); // criteria
711
712 double balance = 0.1f;
713
714 cv::fisheye::estimateNewCameraMatrixForUndistortRectify(intrinsics_mat, // K
715 distortion_mat, // D
716 image_size, // image_size
717 cv::Matx33d::eye(), // R
718 new_intrinsics_mat, // P
719 balance); // balance
720
721 // Probably a busted work-around for busted function.
722 new_intrinsics_mat.at<double>(0, 2) = (cols - 1) / 2.0;
723 new_intrinsics_mat.at<double>(1, 2) = (rows - 1) / 2.0;
724 } else if (t_camera_distortion_model_is_opencv_non_fisheye(c.distortion_model)) {
725 int flags = 0;
726
727 // Go all out.
728 flags |= cv::CALIB_THIN_PRISM_MODEL;
729 flags |= cv::CALIB_RATIONAL_MODEL;
730 flags |= cv::CALIB_TILTED_MODEL;
731
732 rp_error = cv::calibrateCamera( //
733 c.state.board_models_f32, // objectPoints
734 view.measured_f32, // imagePoints
735 image_size, // imageSize
736 intrinsics_mat, // cameraMatrix
737 distortion_mat, // distCoeffs
738 cv::noArray(), // rvecs
739 cv::noArray(), // tvecs
740 flags); // flags
741
742 // Currently see as much as possible of the original image.
743 float alpha = 1.0;
744
745 // Create the new camera matrix.
746 new_intrinsics_mat = cv::getOptimalNewCameraMatrix(intrinsics_mat, // cameraMatrix
747 distortion_mat, // distCoeffs
748 image_size, // imageSize
749 alpha, // alpha
750 cv::Size(), // newImgSize
751 NULL, // validPixROI
752 false); // centerPrincipalPoint
753 } else {
754 assert(!"Unsupported distortion model");
755 }
756
757 P("CALIBRATION DONE RP ERROR %f", rp_error);
758
759 // clang-format off
760 std::cout << "image_size: " << image_size << "\n";
761 std::cout << "rp_error: " << rp_error << "\n";
762 std::cout << "intrinsics_mat:\n" << intrinsics_mat << "\n";
763 std::cout << "new_intrinsics_mat:\n" << new_intrinsics_mat << "\n";
764 std::cout << "distortion_mat:\n" << distortion_mat << "\n";
765 // clang-format on
766
767 if (t_camera_distortion_model_is_opencv_fisheye(c.distortion_model)) {
768 cv::fisheye::initUndistortRectifyMap(intrinsics_mat, // K
769 distortion_mat, // D
770 cv::Matx33d::eye(), // R
771 new_intrinsics_mat, // P
772 image_size, // size
773 CV_32FC1, // m1type
774 view.map1, // map1
775 view.map2); // map2
776
777 // Set the maps as valid.
778 view.maps_valid = true;
779 } else if (t_camera_distortion_model_is_opencv_non_fisheye(c.distortion_model)) {
780 cv::initUndistortRectifyMap( //
781 intrinsics_mat, // K
782 distortion_mat, // D
783 cv::noArray(), // R
784 new_intrinsics_mat, // P
785 image_size, // size
786 CV_32FC1, // m1type
787 view.map1, // map1
788 view.map2); // map2
789
790 // Set the maps as valid.
791 view.maps_valid = true;
792 } else {
793 assert(!"Unsupported distortion model");
794 }
795
796 c.state.calibrated = true;
797}
798
799static void
800update_public_status(class Calibration &c, bool found)
801{
802 if (c.status != NULL) {
803 int num = (int)c.state.board_models_f32.size();
804 c.status->num_collected = num;
805 c.status->cooldown = c.state.cooldown;
806 c.status->waits_remaining = c.state.waited_for;
807 c.status->found = found;
808 }
809}
810
811/*!
812 * Logic for capturing a frame.
813 */
814static void
815do_capture_logic_mono(class Calibration &c, struct ViewState &view, bool found, cv::Mat &gray, cv::Mat &rgb)
816{
817 int num = (int)c.state.board_models_f32.size();
818 int of = c.num_collect_total;
819 P("(%i/%i) SHOW BOARD", num, of);
820 update_public_status(c, found);
821
822 if (c.state.cooldown > 0) {
823 P("(%i/%i) MOVE BOARD TO NEW POSITION", num, of);
824 c.state.cooldown--;
825 return;
826 }
827
828 // We haven't found anything, reset to be beginning.
829 if (!found) {
830 c.state.waited_for = c.num_wait_for;
831 c.state.collected_of_part = 0;
832 view.last_valid = false;
833 return;
834 }
835
836 // We are still waiting for frames.
837 if (c.state.waited_for > 0) {
838 P("(%i/%i) WAITING %i FRAMES", num, of, c.state.waited_for);
839 c.state.waited_for--;
840
841 if (moved_state_check(view)) {
842 P("(%i/%i) KEEP BOARD STILL!", num, of);
843 c.state.waited_for = c.num_wait_for;
844 c.state.collected_of_part = 0;
845 }
846
847 return;
848 }
849
850 if (c.save_images) {
851 char buf[512];
852
853 snprintf(buf, 512, "gray_%ix%i_%03i.png", gray.cols, gray.rows, (int)view.measured_f32.size());
854 cv::imwrite(buf, gray);
855
856 snprintf(buf, 512, "debug_rgb_%03i.jpg", (int)view.measured_f32.size());
857 cv::imwrite(buf, rgb);
858 }
859
860 push_model(c);
861 push_measurement(view);
862
863 c.state.collected_of_part++;
864
865 P("(%i/%i) COLLECTED #%i", num, of, c.state.collected_of_part);
866
867 // Have we collected all of the frames for one part?
868 if (c.state.collected_of_part >= c.num_collect_restart) {
869 c.state.waited_for = c.num_wait_for;
870 c.state.collected_of_part = 0;
871 c.state.cooldown = c.num_cooldown_frames;
872 return;
873 }
874}
875
876/*!
877 * Capture logic for stereo frames.
878 */
879static void
880do_capture_logic_stereo(class Calibration &c,
881 cv::Mat &gray,
882 cv::Mat &rgb,
883 bool l_found,
884 struct ViewState &l_view,
885 cv::Mat &l_gray,
886 cv::Mat &l_rgb,
887 bool r_found,
888 struct ViewState &r_view,
889 cv::Mat &r_gray,
890 cv::Mat &r_rgb)
891{
892 bool found = l_found && r_found;
893
894 int num = (int)c.state.board_models_f32.size();
895 int of = c.num_collect_total;
896 P("(%i/%i) SHOW BOARD %i %i", num, of, l_found, r_found);
897 update_public_status(c, found);
898
899 if (c.state.cooldown > 0) {
900 P("(%i/%i) MOVE BOARD TO NEW POSITION", num, of);
901 c.state.cooldown--;
902 return;
903 }
904
905 // We haven't found anything, reset to be beginning.
906 if (!found) {
907 c.state.waited_for = c.num_wait_for;
908 c.state.collected_of_part = 0;
909 l_view.last_valid = false;
910 r_view.last_valid = false;
911 return;
912 }
913
914 // We are still waiting for frames.
915 if (c.state.waited_for > 0) {
916 P("(%i/%i) WAITING %i FRAMES", num, of, c.state.waited_for);
917 c.state.waited_for--;
918
919 bool l_moved = moved_state_check(l_view);
920 bool r_moved = moved_state_check(r_view);
921 bool moved = l_moved || r_moved;
922
923 if (moved) {
924 P("(%i/%i) KEEP BOARD STILL!", num, of);
925 c.state.waited_for = c.num_wait_for;
926 c.state.collected_of_part = 0;
927 }
928
929 return;
930 }
931
932 if (c.save_images) {
933 char buf[512];
934
935 snprintf(buf, 512, "gray_%ix%i_%03i.png", gray.cols, gray.rows, (int)c.state.board_models_f32.size());
936 cv::imwrite(buf, gray);
937
938 snprintf(buf, 512, "debug_rgb_%03i.jpg", (int)c.state.board_models_f32.size());
939 cv::imwrite(buf, rgb);
940 }
941
942 push_model(c);
943 push_measurement(c.state.view[0]);
944 push_measurement(c.state.view[1]);
945
946 c.state.collected_of_part++;
947
948 P("(%i/%i) COLLECTED #%i", num, of, c.state.collected_of_part);
949
950 // Have we collected all of the frames for one part?
951 if (c.state.collected_of_part >= c.num_collect_restart) {
952 c.state.waited_for = c.num_wait_for;
953 c.state.collected_of_part = 0;
954 c.state.cooldown = c.num_cooldown_frames;
955 return;
956 }
957}
958
959/*!
960 * Make a mono frame.
961 */
962static void
963make_calibration_frame_mono(class Calibration &c)
964{
965 auto &rgb = c.gui.rgb;
966 auto &gray = c.gray;
967
968 bool found = do_view(c, c.state.view[0], gray, rgb);
969
970 // Advance the state of the calibration.
971 do_capture_logic_mono(c, c.state.view[0], found, gray, rgb);
972
973 if (c.state.board_models_f32.size() >= c.num_collect_total) {
974 process_view_samples(c, c.state.view[0], rgb.cols, rgb.rows);
975 }
976
977 // Draw text and finally send the frame off.
978 print_txt(rgb, c.text, 1.5);
979 send_rgb_frame(c);
980}
981
982/*!
983 * Make a stereo frame side by side.
984 */
985static void
986make_calibration_frame_sbs(class Calibration &c)
987{
988 auto &rgb = c.gui.rgb;
989 auto &gray = c.gray;
990
991 int cols = rgb.cols / 2;
992 int rows = rgb.rows;
993
994 // Split left and right eyes, don't make any copies.
995 cv::Mat l_gray(rows, cols, CV_8UC1, gray.data, gray.cols);
996 cv::Mat r_gray(rows, cols, CV_8UC1, gray.data + cols, gray.cols);
997 cv::Mat l_rgb(rows, cols, CV_8UC3, c.gui.frame->data, c.gui.frame->stride);
998 cv::Mat r_rgb(rows, cols, CV_8UC3, c.gui.frame->data + 3 * cols, c.gui.frame->stride);
999
1000 bool found_left = do_view(c, c.state.view[0], l_gray, l_rgb);
1001 bool found_right = do_view(c, c.state.view[1], r_gray, r_rgb);
1002
1003 do_capture_logic_stereo(c, gray, rgb, found_left, c.state.view[0], l_gray, l_rgb, found_right, c.state.view[1],
1004 r_gray, r_rgb);
1005
1006 if (c.state.board_models_f32.size() >= c.num_collect_total) {
1007 process_stereo_samples(c, cols, rows);
1008 }
1009
1010 // Draw text and finally send the frame off.
1011 print_txt(rgb, c.text, 1.5);
1012 send_rgb_frame(c);
1013}
1014
1015static void
1016make_calibration_frame(class Calibration &c, struct xrt_frame *xf)
1017{
1018 switch (xf->stereo_format) {
1019 case XRT_STEREO_FORMAT_SBS: make_calibration_frame_sbs(c); break;
1020 case XRT_STEREO_FORMAT_NONE: make_calibration_frame_mono(c); break;
1021 default:
1022 P("ERROR: Unknown stereo format! '%i'", xf->stereo_format);
1023 make_gui_str(c);
1024 return;
1025 }
1026
1027 if (c.status != NULL && c.state.calibrated) {
1028 c.status->finished = true;
1029 }
1030}
1031
1032static void
1033make_remap_view(class Calibration &c, struct xrt_frame *xf)
1034{
1035 cv::Mat &rgb = c.gui.rgb;
1036 struct xrt_frame &frame = *c.gui.frame;
1037
1038 switch (xf->stereo_format) {
1039 case XRT_STEREO_FORMAT_SBS: {
1040 int cols = rgb.cols / 2;
1041 int rows = rgb.rows;
1042
1043 cv::Mat l_rgb(rows, cols, CV_8UC3, frame.data, frame.stride);
1044 cv::Mat r_rgb(rows, cols, CV_8UC3, frame.data + 3 * cols, frame.stride);
1045
1046 remap_view(c, c.state.view[0], l_rgb);
1047 remap_view(c, c.state.view[1], r_rgb);
1048 } break;
1049 case XRT_STEREO_FORMAT_NONE: {
1050 remap_view(c, c.state.view[0], rgb);
1051 } break;
1052 default:
1053 P("ERROR: Unknown stereo format! '%i'", xf->stereo_format);
1054 make_gui_str(c);
1055 return;
1056 }
1057}
1058
1059
1060/*
1061 *
1062 * Main functions.
1063 *
1064 */
1065
1066XRT_NO_INLINE static void
1067process_frame_l8(class Calibration &c, struct xrt_frame *xf)
1068{
1069
1070 int w = (int)xf->width;
1071 int h = (int)xf->height;
1072
1073 cv::Mat data(h, w, CV_8UC1, xf->data, xf->stride);
1074 c.gray = data;
1075 ensure_buffers_are_allocated(c, data.rows, data.cols);
1076 c.gui.frame->source_sequence = xf->source_sequence;
1077
1078 cv::cvtColor(data, c.gui.rgb, cv::COLOR_GRAY2RGB);
1079}
1080
1081XRT_NO_INLINE static void
1082process_frame_yuv(class Calibration &c, struct xrt_frame *xf)
1083{
1084
1085 int w = (int)xf->width;
1086 int h = (int)xf->height;
1087
1088 cv::Mat data(h, w, CV_8UC3, xf->data, xf->stride);
1089 ensure_buffers_are_allocated(c, data.rows, data.cols);
1090 c.gui.frame->source_sequence = xf->source_sequence;
1091
1092 cv::cvtColor(data, c.gui.rgb, cv::COLOR_YUV2RGB);
1093 cv::cvtColor(c.gui.rgb, c.gray, cv::COLOR_RGB2GRAY);
1094}
1095
1096XRT_NO_INLINE static void
1097process_frame_yuyv(class Calibration &c, struct xrt_frame *xf)
1098{
1099 /*
1100 * Cleverly extract the different channels.
1101 * Cr/Cb are extracted at half width.
1102 */
1103 int w = (int)xf->width;
1104 int h = (int)xf->height;
1105
1106 cv::Mat data_full(h, w, CV_8UC2, xf->data, xf->stride);
1107 ensure_buffers_are_allocated(c, data_full.rows, data_full.cols);
1108 c.gui.frame->source_sequence = xf->source_sequence;
1109
1110 cv::cvtColor(data_full, c.gui.rgb, cv::COLOR_YUV2RGB_YUYV);
1111 cv::cvtColor(data_full, c.gray, cv::COLOR_YUV2GRAY_YUYV);
1112}
1113
1114XRT_NO_INLINE static void
1115process_frame_uyvy(class Calibration &c, struct xrt_frame *xf)
1116{
1117 /*
1118 * Cleverly extract the different channels.
1119 * Cr/Cb are extracted at half width.
1120 */
1121 int w = (int)xf->width;
1122 int h = (int)xf->height;
1123
1124 cv::Mat data_full(h, w, CV_8UC2, xf->data, xf->stride);
1125 ensure_buffers_are_allocated(c, data_full.rows, data_full.cols);
1126 c.gui.frame->source_sequence = xf->source_sequence;
1127
1128 cv::cvtColor(data_full, c.gui.rgb, cv::COLOR_YUV2RGB_UYVY);
1129 cv::cvtColor(data_full, c.gray, cv::COLOR_YUV2GRAY_UYVY);
1130}
1131
1132XRT_NO_INLINE static void
1133process_frame_rgb(class Calibration &c, struct xrt_frame *xf)
1134{
1135
1136 int w = (int)xf->width;
1137 int h = (int)xf->height;
1138
1139 cv::Mat rgb_data(h, w, CV_8UC3, xf->data, xf->stride);
1140 ensure_buffers_are_allocated(c, rgb_data.rows, rgb_data.cols);
1141 c.gui.frame->source_sequence = xf->source_sequence;
1142
1143 cv::cvtColor(rgb_data, c.gray, cv::COLOR_RGB2GRAY);
1144 rgb_data.copyTo(c.gui.rgb);
1145}
1146
1147XRT_NO_INLINE static void
1148process_load_image(class Calibration &c, struct xrt_frame *xf)
1149{
1150 char buf[512];
1151
1152 // We need to change the settings for frames to make it work.
1153 uint32_t num_collect_restart = 1;
1154 uint32_t num_cooldown_frames = 0;
1155 uint32_t num_wait_for = 0;
1156
1157 std::swap(c.num_collect_restart, num_collect_restart);
1158 std::swap(c.num_cooldown_frames, num_cooldown_frames);
1159 std::swap(c.num_wait_for, num_wait_for);
1160
1161 for (uint32_t i = 0; i < c.load.num_images; i++) {
1162 // Early out if the user requested less images.
1163 if (c.state.calibrated) {
1164 break;
1165 }
1166
1167 snprintf(buf, 512, "gray_%ux%u_%03i.png", xf->width, xf->height, i);
1168 c.gray = cv::imread(buf, cv::IMREAD_GRAYSCALE);
1169
1170 if (c.gray.rows == 0 || c.gray.cols == 0) {
1171 U_LOG_E("Could not find image '%s'!", buf);
1172 continue;
1173 }
1174
1175 if (c.gray.rows != (int)xf->height || c.gray.cols != (int)xf->width) {
1176 U_LOG_E(
1177 "Image size does not match frame size! Image: "
1178 "(%ix%i) Frame: (%ux%u)",
1179 c.gray.cols, c.gray.rows, xf->width, xf->height);
1180 continue;
1181 }
1182
1183 // Create a new RGB image and then copy the gray data to it.
1184 refresh_gui_frame(c, c.gray.rows, c.gray.cols);
1185 cv::cvtColor(c.gray, c.gui.rgb, cv::COLOR_GRAY2RGB);
1186
1187 if (c.stereo_sbs) {
1188 xf->stereo_format = XRT_STEREO_FORMAT_SBS;
1189 }
1190
1191 // Call the normal frame processing now.
1192 make_calibration_frame(c, xf);
1193 }
1194
1195 // Restore settings.
1196 c.num_collect_restart = num_collect_restart;
1197 c.num_cooldown_frames = num_cooldown_frames;
1198 c.num_wait_for = num_wait_for;
1199
1200 c.load.enabled = false;
1201}
1202
1203
1204/*
1205 *
1206 * Interface functions.
1207 *
1208 */
1209
1210extern "C" void
1211t_calibration_frame(struct xrt_frame_sink *xsink, struct xrt_frame *xf)
1212{
1213 auto &c = *(class Calibration *)xsink;
1214
1215 if (c.load.enabled) {
1216 process_load_image(c, xf);
1217 }
1218
1219 // Fill both c.gui.rgb and c.gray with the data we got.
1220 switch (xf->format) {
1221 case XRT_FORMAT_YUV888: process_frame_yuv(c, xf); break;
1222 case XRT_FORMAT_YUYV422: process_frame_yuyv(c, xf); break;
1223 case XRT_FORMAT_UYVY422: process_frame_uyvy(c, xf); break;
1224 case XRT_FORMAT_L8: process_frame_l8(c, xf); break;
1225 case XRT_FORMAT_R8G8B8: process_frame_rgb(c, xf); break;
1226 default:
1227 P("ERROR: Bad format '%s'", u_format_str(xf->format));
1228 make_gui_str(c);
1229 return;
1230 }
1231
1232 // Don't do anything if we are done.
1233 if (c.state.calibrated) {
1234 make_remap_view(c, xf);
1235
1236 print_txt(c.gui.rgb, c.text, 1.5);
1237
1238 send_rgb_frame(c);
1239 return;
1240 }
1241
1242 // Clear our gui frame.
1243 if (c.clear_frame) {
1244 cv::rectangle(c.gui.rgb, cv::Point2f(0, 0), cv::Point2f(c.gui.rgb.cols, c.gui.rgb.rows),
1245 cv::Scalar(0, 0, 0), -1, 0);
1246 }
1247
1248 make_calibration_frame(c, xf);
1249}
1250
1251
1252/*
1253 *
1254 * Exported functions.
1255 *
1256 */
1257
1258extern "C" int
1259t_calibration_stereo_create(struct xrt_frame_context *xfctx,
1260 const struct t_calibration_params *params,
1261 struct t_calibration_status *status,
1262 struct xrt_frame_sink *gui,
1263 struct xrt_frame_sink **out_sink)
1264{
1265#ifndef SB_CHEESBOARD_CORNERS_SUPPORTED
1266 if (params->pattern == T_BOARD_SB_CHECKERS) {
1267 U_LOG_E("OpenCV %u.%u doesn't support SB chessboard!", CV_MAJOR_VERSION, CV_MINOR_VERSION);
1268 return -1;
1269 }
1270#endif
1271#ifndef SB_CHEESBOARD_CORNERS_MARKER_SUPPORTED
1272 if (params->pattern == T_BOARD_SB_CHECKERS && params->sb_checkers.marker) {
1273 U_LOG_W("OpenCV %u.%u doesn't support SB chessboard marker option!", CV_MAJOR_VERSION,
1274 CV_MINOR_VERSION);
1275 }
1276#endif
1277
1278 auto &c = *(new Calibration());
1279
1280 // Basic setup.
1281 c.gui.sink = gui;
1282 c.base.push_frame = t_calibration_frame;
1283 *out_sink = &c.base;
1284
1285 // Copy the parameters.
1286 c.stereo_sbs = params->stereo_sbs;
1287 c.board.pattern = params->pattern;
1288 switch (params->pattern) {
1289 case T_BOARD_CHECKERS:
1290 c.board.dims = {
1291 params->checkers.cols - 1,
1292 params->checkers.rows - 1,
1293 };
1294 c.board.spacing_meters = params->checkers.size_meters;
1295 c.subpixel_enable = params->checkers.subpixel_enable;
1296 c.subpixel_size = params->checkers.subpixel_size;
1297 break;
1298 case T_BOARD_SB_CHECKERS:
1299 c.board.dims = {
1300 params->sb_checkers.cols,
1301 params->sb_checkers.rows,
1302 };
1303 c.board.spacing_meters = params->sb_checkers.size_meters;
1304 c.board.marker = params->sb_checkers.marker;
1305 c.board.normalize_image = params->sb_checkers.normalize_image;
1306 break;
1307 case T_BOARD_CIRCLES:
1308 c.board.dims = {
1309 params->circles.cols,
1310 params->circles.rows,
1311 };
1312 c.board.spacing_meters = params->circles.distance_meters;
1313 break;
1314 case T_BOARD_ASYMMETRIC_CIRCLES:
1315 c.board.dims = {
1316 params->asymmetric_circles.cols,
1317 params->asymmetric_circles.rows,
1318 };
1319 c.board.spacing_meters = params->asymmetric_circles.diagonal_distance_meters;
1320 break;
1321 default: assert(false);
1322 }
1323 c.num_cooldown_frames = params->num_cooldown_frames;
1324 c.num_wait_for = params->num_wait_for;
1325 c.num_collect_total = params->num_collect_total;
1326 c.num_collect_restart = params->num_collect_restart;
1327 c.load.enabled = params->load.enabled;
1328 c.load.num_images = params->load.num_images;
1329 c.mirror_rgb_image = params->mirror_rgb_image;
1330 c.save_images = params->save_images;
1331 c.status = status;
1332 c.distortion_model = params->use_fisheye ? T_DISTORTION_FISHEYE_KB4 : T_DISTORTION_OPENCV_RADTAN_5;
1333
1334
1335 // Setup a initial message.
1336 P("Waiting for camera");
1337 make_gui_str(c);
1338
1339 int ret = 0;
1340 if (debug_get_bool_option_hsv_filter()) {
1341 ret = t_debug_hsv_filter_create(xfctx, *out_sink, out_sink);
1342 }
1343
1344 if (debug_get_bool_option_hsv_picker()) {
1345 ret = t_debug_hsv_picker_create(xfctx, *out_sink, out_sink);
1346 }
1347
1348 if (debug_get_bool_option_hsv_viewer()) {
1349 ret = t_debug_hsv_viewer_create(xfctx, *out_sink, out_sink);
1350 }
1351
1352 // Ensure we only get rgb, yuv, yuyv, uyvy or l8 frames.
1353 u_sink_create_to_rgb_yuv_yuyv_uyvy_or_l8(xfctx, *out_sink, out_sink);
1354
1355
1356 // Build the board model.
1357 build_board_position(c);
1358
1359 // Pre allocate
1360 c.state.view[0].current_f32.reserve(c.board.model_f32.size());
1361 c.state.view[0].current_f64.reserve(c.board.model_f64.size());
1362 c.state.view[1].current_f32.reserve(c.board.model_f32.size());
1363 c.state.view[1].current_f64.reserve(c.board.model_f64.size());
1364
1365#if 0
1366 c.state.view[0].measured = (ArrayOfMeasurements){
1367 };
1368
1369 c.state.view[1].measured = (ArrayOfMeasurements){
1370 };
1371
1372 for (Measurement &m : c.state.view[0].measured) {
1373 (void)m;
1374 push_model(c);
1375 }
1376#endif
1377
1378
1379 return ret;
1380}
1381
1382//! Helper for NormalizedCoordsCache constructors
1383static inline std::vector<cv::Vec2f>
1384generateInputCoordsAndReserveOutputCoords(const cv::Size &size, std::vector<cv::Vec2f> &outputCoords)
1385{
1386 std::vector<cv::Vec2f> inputCoords;
1387
1388 const auto n = size.width * size.height;
1389 assert(n != 0);
1390 inputCoords.reserve(n);
1391 for (int row = 0; row < size.height; ++row) {
1392 for (int col = 0; col < size.width; ++col) {
1393 inputCoords.emplace_back(col, row);
1394 }
1395 }
1396 outputCoords.reserve(inputCoords.size());
1397 return inputCoords;
1398}
1399
1400//! Helper for NormalizedCoordsCache constructors
1401static inline void
1402populateCacheMats(const cv::Size &size,
1403 const std::vector<cv::Vec2f> &inputCoords,
1404 const std::vector<cv::Vec2f> &outputCoords,
1405 cv::Mat_<float> &cacheX,
1406 cv::Mat_<float> &cacheY)
1407{
1408 assert(size.height != 0);
1409 assert(size.width != 0);
1410 cacheX.create(size);
1411 cacheY.create(size);
1412 const auto n = size.width * size.height;
1413 // Populate the cache matrices
1414 for (int i = 0; i < n; ++i) {
1415 auto input = cv::Point{int(inputCoords[i][0]), int(inputCoords[i][1])};
1416 cacheX(input) = outputCoords[i][0];
1417 cacheY(input) = outputCoords[i][1];
1418 }
1419}
1420
1421NormalizedCoordsCache::NormalizedCoordsCache(cv::Size size, // NOLINT // small, pass by value
1422 t_camera_distortion_model distortion_model,
1423 cv::InputArray intrinsics,
1424 cv::InputArray distortion,
1425 cv::InputArray rectification,
1426 cv::InputArray new_camera_or_projection_matrix)
1427{
1428 std::vector<cv::Vec2f> outputCoords;
1429 std::vector<cv::Vec2f> inputCoords = generateInputCoordsAndReserveOutputCoords(size, outputCoords);
1430
1431 // Undistort/reproject those coordinates in one call, to make use of
1432 // cached internal/intermediate computations.
1433 if (t_camera_distortion_model_is_opencv_fisheye(distortion_model)) {
1434 cv::fisheye::undistortPoints(inputCoords, outputCoords, intrinsics, distortion, rectification,
1435 new_camera_or_projection_matrix);
1436 } else if (t_camera_distortion_model_is_opencv_non_fisheye(distortion_model)) {
1437 cv::undistortPoints(inputCoords, outputCoords, intrinsics, distortion, rectification,
1438 new_camera_or_projection_matrix);
1439 } else {
1440 throw std::invalid_argument("unsupported distortion model");
1441 }
1442
1443 populateCacheMats(size, inputCoords, outputCoords, cacheX_, cacheY_);
1444}
1445
1446cv::Vec2f
1447NormalizedCoordsCache::getNormalizedImageCoords(
1448 // NOLINTNEXTLINE // small, pass by value
1449 cv::Point2f origCoords) const
1450{
1451 /*
1452 * getRectSubPix is more strict than the docs would imply:
1453 *
1454 * - Source must be 1 or 3 channels
1455 * - Can sample from u8 into u8, u8 into f32, or f32 into f32 - that's
1456 * it (though the latter is provided by a template function internally
1457 * so could be extended...)
1458 */
1459 cv::Mat patch;
1460 cv::getRectSubPix(cacheX_, cv::Size(1, 1), origCoords, patch);
1461 auto x = patch.at<float>(0, 0);
1462 cv::getRectSubPix(cacheY_, cv::Size(1, 1), origCoords, patch);
1463 auto y = patch.at<float>(0, 0);
1464 return {x, y};
1465}
1466
1467cv::Vec3f
1468NormalizedCoordsCache::getNormalizedVector(cv::Point2f origCoords) const
1469{
1470 // cameras traditionally look along -z, so we want negative sqrt
1471 auto pt = getNormalizedImageCoords(std::move(origCoords));
1472 auto z = -std::sqrt(1.f - pt.dot(pt));
1473 return {pt[0], pt[1], z};
1474}
1475
1476} // namespace xrt::auxiliary::tracking