The open source OpenXR runtime
1// Copyright 2019, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief PSVR tracker code.
6 * @author Jakob Bornecrantz <jakob@collabora.com>
7 * @author Pete Black <pete.black@collabora.com>
8 * @ingroup aux_tracking
9 */
10
11#include "xrt/xrt_tracking.h"
12
13#include "tracking/t_tracking.h"
14#include "tracking/t_calibration_opencv.hpp"
15#include "tracking/t_helper_debug_sink.hpp"
16
17#include "util/u_misc.h"
18#include "util/u_debug.h"
19#include "util/u_frame.h"
20#include "util/u_format.h"
21#include "util/u_var.h"
22#include "util/u_logging.h"
23
24#include "math/m_mathinclude.h"
25#include "math/m_api.h"
26#include "math/m_permutation.h"
27#include "math/m_imu_3dof.h"
28
29#include "os/os_threading.h"
30
31#include <stdio.h>
32#include <assert.h>
33#include <pthread.h>
34
35#include <Hungarian.hpp>
36#include <Eigen/Eigen>
37#include <opencv2/opencv.hpp>
38
39
40DEBUG_GET_ONCE_LOG_OPTION(psvr_log, "PSVR_TRACKING_LOG", U_LOGGING_WARN)
41
42#define PSVR_TRACE(...) U_LOG_IFL_T(t.log_level, __VA_ARGS__)
43#define PSVR_DEBUG(...) U_LOG_IFL_D(t.log_level, __VA_ARGS__)
44#define PSVR_INFO(...) U_LOG_IFL_I(t.log_level, __VA_ARGS__)
45#define PSVR_WARN(...) U_LOG_IFL_W(t.log_level, __VA_ARGS__)
46#define PSVR_ERROR(...) U_LOG_IFL_E(t.log_level, __VA_ARGS__)
47
48
49/*!
50 * How many LEDs in the tracked configuration
51 */
52#define PSVR_NUM_LEDS 7
53/*!
54 * How many LEDs do we need to do an optical solve/correction
55 */
56#define PSVR_OPTICAL_SOLVE_THRESH 5
57/*!
58 * If potential match vertex is further than this distance from the
59 * measurement, reject the match - do not set too low
60 */
61#define PSVR_DISAMBIG_REJECT_DIST 0.02f
62/*!
63 * If potential match vertex is further than this distance from the measurement,
64 * reject the match - do not set too low
65 */
66#define PSVR_DISAMBIG_REJECT_ANG 0.7f
67/*!
68 * Cutoff distance for keeping the id for a blob from one frame to the next
69 */
70#define PSVR_SEARCH_RADIUS 0.043f
71/*
72 * The magnitude of the correction relative to the previous correction must be
73 * below this value to contribute towards lock acquisition
74 */
75#define PSVR_MAX_BAD_CORR 10
76#define PSVR_BAD_CORRECTION_THRESH 0.1f
77#define PSVR_CORRECTION_THRESH 0.05f
78
79/*!
80 * We will 'drift' our imu-solved rotation towards our optically solved
81 * correction to avoid jumps
82 */
83#define PSVR_FAST_CORRECTION 0.05f
84
85/*!
86 * We will 'drift' our imu-solved rotation towards our optically solved
87 * correction to avoid jumps
88 */
89#define PSVR_SLOW_CORRECTION 0.005f
90
91// kalman filter coefficients
92#define PSVR_BLOB_PROCESS_NOISE 0.1f // R
93#define PSVR_BLOB_MEASUREMENT_NOISE 1.0f // Q
94
95#define PSVR_POSE_PROCESS_NOISE 0.5f // R
96//! Our measurements are quite noisy so we need to smooth heavily
97#define PSVR_POSE_MEASUREMENT_NOISE 100.0f
98
99#define PSVR_OUTLIER_THRESH 0.17f
100#define PSVR_MERGE_THRESH 0.06f
101
102//! hold the previously recognised configuration unless we depart significantly
103#define PSVR_HOLD_THRESH 0.086f
104
105// uncomment this to dump comprehensive optical and imu data to
106// /tmp/psvr_dump.txt
107
108// Debug define(s), always off.
109#undef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
110#undef PSVR_DUMP_IMU_FOR_OFFLINE_ANALYSIS
111
112using namespace xrt::auxiliary::tracking;
113
114//! Namespace for PSVR tracking implementation
115namespace xrt::auxiliary::tracking::psvr {
116
117typedef enum blob_type
118{
119 BLOB_TYPE_UNKNOWN,
120 BLOB_TYPE_SIDE,
121 BLOB_TYPE_FRONT,
122 BLOB_TYPE_REAR, // currently unused
123} blob_type_t;
124
125typedef struct blob_point
126{
127 cv::Point3f p; // 3d coordinate
128 cv::KeyPoint lkp; // left keypoint
129 cv::KeyPoint rkp; // right keypoint
130 blob_type_t btype; // blob type
131} blob_point_t;
132
133struct View
134{
135 cv::Mat undistort_rectify_map_x;
136 cv::Mat undistort_rectify_map_y;
137
138 cv::Matx33d intrinsics;
139 cv::Mat distortion; // size may vary
140 enum t_camera_distortion_model distortion_model;
141
142 std::vector<cv::KeyPoint> keypoints;
143
144 cv::Mat frame_undist_rectified;
145
146 void
147 populate_from_calib(t_camera_calibration &calib, const RemapPair &rectification)
148 {
149 CameraCalibrationWrapper wrap(calib);
150 intrinsics = wrap.intrinsics_mat;
151 distortion = wrap.distortion_mat.clone();
152 distortion_model = wrap.distortion_model;
153
154 undistort_rectify_map_x = rectification.remap_x;
155 undistort_rectify_map_y = rectification.remap_y;
156 }
157};
158
159typedef enum led_tag
160{
161 TAG_TL,
162 TAG_TR,
163 TAG_C,
164 TAG_BL,
165 TAG_BR,
166 TAG_SL,
167 TAG_SR
168} led_tag_t;
169
170typedef struct model_vertex
171{
172 int32_t vertex_index;
173 Eigen::Vector4f position;
174
175 led_tag_t tag;
176 bool active;
177
178 // NOTE: these operator overloads are required for
179 // comparisons with the permutations library
180
181 bool
182 operator<(const model_vertex &mv) const
183 {
184 return (vertex_index < mv.vertex_index);
185 }
186 bool
187 operator>(const model_vertex &mv) const
188 {
189 return (vertex_index > mv.vertex_index);
190 }
191
192} model_vertex_t;
193
194typedef struct match_data
195{
196 float angle = {}; // angle from reference vector
197 float distance = {}; // distance from base of reference vector
198 int32_t vertex_index = {}; // index also known as tag
199 Eigen::Vector4f position = Eigen::Vector4f::Zero(); // 3d position of vertex
200 blob_point_t src_blob = {}; // blob this vertex was derived from
201} match_data_t;
202
203typedef struct match_model
204{
205 //! Collection of vertices and associated data.
206 std::vector<match_data_t> measurements;
207} match_model_t;
208
209/*!
210 * Main PSVR tracking class.
211 */
212class TrackerPSVR
213{
214public:
215 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
216 struct xrt_tracked_psvr base = {};
217 struct xrt_frame_sink sink = {};
218 struct xrt_frame_node node = {};
219
220 //! Logging stuff.
221 enum u_logging_level log_level;
222
223 //! Frame waiting to be processed.
224 struct xrt_frame *frame;
225
226 //! Thread and lock helper.
227 struct os_thread_helper oth;
228
229 //! Have we received a new IMU sample.
230 bool has_imu = false;
231
232 timepoint_ns last_imu{0};
233
234 struct
235 {
236 struct xrt_vec3 pos = {};
237 struct m_imu_3dof imu_3dof;
238 } fusion;
239
240 struct
241 {
242 struct xrt_vec3 pos = {};
243 struct xrt_quat rot = {};
244 } optical;
245
246 Eigen::Quaternionf target_optical_rotation_correction; // the calculated rotation to
247 // correct the imu
248 Eigen::Quaternionf optical_rotation_correction; // currently applied (interpolated
249 // towards target) correction
250 Eigen::Matrix4f corrected_imu_rotation; // imu rotation with correction applied
251 Eigen::Quaternionf axis_align_rot; // used to rotate imu/tracking coordinates to world
252
253 model_vertex_t model_vertices[PSVR_NUM_LEDS]; // the model we match our
254 // measurements against
255 std::vector<match_data_t> last_vertices; // the last solved position of the HMD
256
257 uint32_t last_optical_model;
258
259 cv::KalmanFilter track_filters[PSVR_NUM_LEDS];
260
261
262 cv::KalmanFilter pose_filter; // we filter the final pose position of
263 // the HMD to smooth motion
264
265 View view[2];
266 bool calibrated;
267
268 HelperDebugSink debug = {HelperDebugSink::AllAvailable};
269
270 cv::Mat disparity_to_depth;
271 cv::Vec3d r_cam_translation;
272 cv::Matx33d r_cam_rotation;
273
274 cv::Ptr<cv::SimpleBlobDetector> sbd;
275 std::vector<cv::KeyPoint> l_blobs, r_blobs;
276 std::vector<match_model_t> matches;
277
278 // we refine our measurement by rejecting outliers and merging 'too
279 // close' points
280 std::vector<blob_point_t> world_points;
281 std::vector<blob_point_t> pruned_points;
282 std::vector<blob_point_t> merged_points;
283
284 std::vector<match_data_t> match_vertices;
285
286 float avg_optical_correction; // used to converge to a 'lock' correction
287 // rotation
288
289 bool done_correction; // set after a 'lock' is acquired
290
291 float max_correction;
292
293 // if we have made a lot of optical measurements that *should*
294 // be converging, but have not - we should reset
295 uint32_t bad_correction_count;
296
297 Eigen::Matrix4f last_pose;
298
299 uint64_t last_frame;
300
301 Eigen::Vector4f model_center; // center of rotation
302
303#ifdef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
304 FILE *dump_file;
305#endif
306};
307
308static float
309dist_3d(Eigen::Vector4f a, Eigen::Vector4f b)
310{
311 return sqrt((a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]) + (a[2] - b[2]) * (a[2] - b[2]));
312}
313
314static float
315dist_3d_cv(const cv::Point3f &a, const cv::Point3f &b)
316{
317 return sqrt((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z));
318}
319
320static void
321init_filter(cv::KalmanFilter &kf, float process_cov, float meas_cov, float dt)
322{
323 kf.init(6, 3);
324 kf.transitionMatrix =
325 (cv::Mat_<float>(6, 6) << 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
326 dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0);
327
328 cv::setIdentity(kf.measurementMatrix, cv::Scalar::all(1.0f));
329 cv::setIdentity(kf.errorCovPost, cv::Scalar::all(0.0f));
330
331 // our filter parameters set the process and measurement noise
332 // covariances.
333
334 cv::setIdentity(kf.processNoiseCov, cv::Scalar::all(process_cov));
335 cv::setIdentity(kf.measurementNoiseCov, cv::Scalar::all(meas_cov));
336}
337
338static void
339filter_predict(std::vector<match_data_t> *pose, cv::KalmanFilter *filters, float dt)
340{
341 for (uint32_t i = 0; i < PSVR_NUM_LEDS; i++) {
342 match_data_t current_led;
343 cv::KalmanFilter *current_kf = filters + i;
344
345 // set our dt components in the transition matrix
346 current_kf->transitionMatrix.at<float>(0, 3) = dt;
347 current_kf->transitionMatrix.at<float>(1, 4) = dt;
348 current_kf->transitionMatrix.at<float>(2, 5) = dt;
349
350 current_led.vertex_index = i;
351 // current_led->tag = (led_tag_t)(i);
352 cv::Mat prediction = current_kf->predict();
353 current_led.position[0] = prediction.at<float>(0, 0);
354 current_led.position[1] = prediction.at<float>(1, 0);
355 current_led.position[2] = prediction.at<float>(2, 0);
356 pose->push_back(current_led);
357 }
358}
359
360static void
361filter_update(std::vector<match_data_t> *pose, cv::KalmanFilter *filters, float dt)
362{
363 for (uint32_t i = 0; i < PSVR_NUM_LEDS; i++) {
364 match_data_t *current_led = &pose->at(i);
365 cv::KalmanFilter *current_kf = filters + i;
366
367 // set our dt components in the transition matrix
368 current_kf->transitionMatrix.at<float>(0, 3) = dt;
369 current_kf->transitionMatrix.at<float>(1, 4) = dt;
370 current_kf->transitionMatrix.at<float>(2, 5) = dt;
371
372 current_led->vertex_index = i;
373
374 cv::Mat measurement = cv::Mat(3, 1, CV_32F);
375 measurement.at<float>(0, 0) = current_led->position[0];
376 measurement.at<float>(1, 0) = current_led->position[1];
377 measurement.at<float>(2, 0) = current_led->position[2];
378 current_kf->correct(measurement);
379 }
380}
381
382static void
383pose_filter_predict(Eigen::Vector4f *pose, cv::KalmanFilter *filter, float dt)
384{
385 // set our dt components in the transition matrix
386 filter->transitionMatrix.at<float>(0, 3) = dt;
387 filter->transitionMatrix.at<float>(1, 4) = dt;
388 filter->transitionMatrix.at<float>(2, 5) = dt;
389
390 cv::Mat prediction = filter->predict();
391 (*pose)[0] = prediction.at<float>(0, 0);
392 (*pose)[1] = prediction.at<float>(1, 0);
393 (*pose)[2] = prediction.at<float>(2, 0);
394}
395
396static void
397pose_filter_update(Eigen::Vector4f *position, cv::KalmanFilter *filter, float dt)
398{
399 filter->transitionMatrix.at<float>(0, 3) = dt;
400 filter->transitionMatrix.at<float>(1, 4) = dt;
401 filter->transitionMatrix.at<float>(2, 5) = dt;
402
403 cv::Mat measurement = cv::Mat(3, 1, CV_32F);
404 measurement.at<float>(0, 0) = position->x();
405 measurement.at<float>(1, 0) = position->y();
406 measurement.at<float>(2, 0) = position->z();
407 filter->correct(measurement);
408}
409
410static bool
411match_possible(match_model_t *match)
412{
413 //@todo - this is currently unimplemented
414 // check if this match makes sense - we can remove
415 // unobservable combinations without checking them.
416
417
418 // we cannot see SR,SL at the same time so remove any matches that
419 // contain them both in the first 5 slots
420 return true;
421}
422
423static void
424verts_to_measurement(std::vector<blob_point_t> *meas_data, std::vector<match_data_t> *match_vertices)
425{
426 // create a data structure that holds the inter-point distances
427 // and angles we will use to match the pose
428
429 match_vertices->clear();
430 if (meas_data->size() < PSVR_OPTICAL_SOLVE_THRESH) {
431 for (uint32_t i = 0; i < meas_data->size(); i++) {
432 match_data_t md;
433 md.vertex_index = -1;
434 md.position =
435 Eigen::Vector4f(meas_data->at(i).p.x, meas_data->at(i).p.y, meas_data->at(i).p.z, 1.0f);
436 md.src_blob = meas_data->at(i);
437 match_vertices->push_back(md);
438 }
439
440 return;
441 }
442
443 blob_point_t ref_a = meas_data->at(0);
444 blob_point_t ref_b = meas_data->at(1);
445 cv::Point3f ref_vec = ref_b.p - ref_a.p;
446 float ref_len = dist_3d_cv(ref_a.p, ref_b.p);
447
448 for (uint32_t i = 0; i < meas_data->size(); i++) {
449 blob_point_t vp = meas_data->at(i);
450 cv::Point3f point_vec = vp.p - ref_a.p;
451 match_data_t md;
452 md.vertex_index = -1;
453 md.position = Eigen::Vector4f(vp.p.x, vp.p.y, vp.p.z, 1.0f);
454 Eigen::Vector3f ref_vec3(ref_vec.x, ref_vec.y, ref_vec.z);
455 Eigen::Vector3f point_vec3(point_vec.x, point_vec.y, point_vec.z);
456 Eigen::Vector3f vp_pos3(vp.p.x, vp.p.y, vp.p.z);
457
458 if (i != 0) {
459 Eigen::Vector3f plane_norm = ref_vec3.cross(point_vec3).normalized();
460 if (plane_norm.z() > 0) {
461 md.angle = -1 * acos(point_vec3.normalized().dot(ref_vec3.normalized()));
462 } else {
463 md.angle = acos(point_vec3.normalized().dot(ref_vec3.normalized()));
464 }
465
466 md.distance = dist_3d_cv(vp.p, ref_a.p) / ref_len;
467 } else {
468 md.angle = 0.0f;
469 md.distance = 0.0f;
470 }
471 // fix up any NaNs
472 if (md.angle != md.angle) {
473 md.angle = 0.0f;
474 }
475 if (md.distance != md.distance) {
476 md.distance = 0.0f;
477 }
478 md.src_blob = vp;
479 match_vertices->push_back(md);
480 }
481}
482
483static float
484last_diff(TrackerPSVR &t, std::vector<match_data_t> *meas_pose, std::vector<match_data_t> *last_pose)
485{
486 // compute the aggregate difference (sum of distances between matching
487 // indices)between two poses
488
489 float diff = 0.0f;
490 for (uint32_t i = 0; i < meas_pose->size(); i++) {
491 uint32_t meas_index = meas_pose->at(i).vertex_index;
492 for (uint32_t j = 0; j < last_pose->size(); j++) {
493 uint32_t last_index = last_pose->at(j).vertex_index;
494 if (last_index == meas_index) {
495 float d = fabs(
496 dist_3d(meas_pose->at(meas_index).position, last_pose->at(last_index).position));
497 diff += d;
498 }
499 }
500 }
501 return diff / meas_pose->size();
502}
503
504
505static void
506remove_outliers(std::vector<blob_point_t> *orig_points, std::vector<blob_point_t> *pruned_points, float outlier_thresh)
507{
508
509 if (orig_points->empty()) {
510 return;
511 }
512
513 std::vector<blob_point_t> temp_points;
514
515 // immediately prune anything that is measured as
516 // 'behind' the camera - often reflections or lights in the room etc.
517
518 for (uint32_t i = 0; i < orig_points->size(); i++) {
519 cv::Point3f p = orig_points->at(i).p;
520 if (p.z < 0) {
521 temp_points.push_back(orig_points->at(i));
522 }
523 }
524 if (temp_points.empty()) {
525 return;
526 }
527
528 // compute the 3d median of the points, and reject anything further away
529 // than a
530 // threshold distance
531
532
533 std::vector<float> x_values;
534 std::vector<float> y_values;
535 std::vector<float> z_values;
536 for (uint32_t i = 0; i < temp_points.size(); i++) {
537 x_values.push_back(temp_points[i].p.x);
538 y_values.push_back(temp_points[i].p.y);
539 z_values.push_back(temp_points[i].p.z);
540 }
541
542 std::nth_element(x_values.begin(), x_values.begin() + x_values.size() / 2, x_values.end());
543 float median_x = x_values[x_values.size() / 2];
544 std::nth_element(y_values.begin(), y_values.begin() + y_values.size() / 2, y_values.end());
545 float median_y = y_values[y_values.size() / 2];
546 std::nth_element(z_values.begin(), z_values.begin() + z_values.size() / 2, z_values.end());
547 float median_z = z_values[z_values.size() / 2];
548
549 for (uint32_t i = 0; i < temp_points.size(); i++) {
550 float error_x = temp_points[i].p.x - median_x;
551 float error_y = temp_points[i].p.y - median_y;
552 float error_z = temp_points[i].p.z - median_z;
553
554 float rms_error = sqrt((error_x * error_x) + (error_y * error_y) + (error_z * error_z));
555
556 // U_LOG_D("%f %f %f %f %f %f", temp_points[i].p.x,
557 // temp_points[i].p.y, temp_points[i].p.z, error_x,
558 // error_y, error_z);
559 if (rms_error < outlier_thresh) {
560 pruned_points->push_back(temp_points[i]);
561 }
562 }
563}
564
565struct close_pair
566{
567 int index_a;
568 int index_b;
569 float dist;
570};
571
572static void
573merge_close_points(std::vector<blob_point_t> *orig_points, std::vector<blob_point_t> *merged_points, float merge_thresh)
574{
575 // if a pair of points in the supplied lists are closer than the
576 // threshold, discard one of them.
577
578 //@todo - merge the 2d blob extents when we merge a pair of points
579
580 std::vector<struct close_pair> pairs;
581 for (uint32_t i = 0; i < orig_points->size(); i++) {
582 for (uint32_t j = 0; j < orig_points->size(); j++) {
583 if (i != j) {
584 float d = dist_3d_cv(orig_points->at(i).p, orig_points->at(j).p);
585 if (d < merge_thresh) {
586 struct close_pair p;
587 p.index_a = i;
588 p.index_b = j;
589 p.dist = d;
590
591 pairs.push_back(p);
592 }
593 }
594 }
595 }
596 std::vector<int> indices_to_remove;
597 for (uint32_t i = 0; i < pairs.size(); i++) {
598 if (pairs[i].index_a < pairs[i].index_b) {
599 indices_to_remove.push_back(pairs[i].index_a);
600 } else {
601 indices_to_remove.push_back(pairs[i].index_b);
602 }
603 }
604
605 for (int i = 0; i < (int)orig_points->size(); i++) {
606 bool remove_index = false;
607 for (int j = 0; j < (int)indices_to_remove.size(); j++) {
608 if (i == indices_to_remove[j]) {
609 remove_index = true;
610 }
611 }
612 if (!remove_index) {
613 merged_points->push_back(orig_points->at(i));
614 }
615 }
616}
617
618static void
619match_triangles(Eigen::Matrix4f *t1_mat,
620 Eigen::Matrix4f *t1_to_t2_mat,
621 const Eigen::Vector4f &t1_a,
622 const Eigen::Vector4f &t1_b,
623 const Eigen::Vector4f &t1_c,
624 const Eigen::Vector4f &t2_a,
625 const Eigen::Vector4f &t2_b,
626 const Eigen::Vector4f &t2_c)
627{
628 // given 3 vertices in 'model space', and a corresponding 3 vertices
629 // in 'world space', compute the transformation matrix to map one
630 // to the other
631
632 *t1_mat = Eigen::Matrix4f().Identity();
633 Eigen::Matrix4f t2_mat = Eigen::Matrix4f().Identity();
634
635 Eigen::Vector3f t1_x_vec = (t1_b - t1_a).head<3>().normalized();
636 Eigen::Vector3f t1_z_vec = (t1_c - t1_a).head<3>().cross((t1_b - t1_a).head<3>()).normalized();
637 Eigen::Vector3f t1_y_vec = t1_x_vec.cross(t1_z_vec).normalized();
638
639 Eigen::Vector3f t2_x_vec = (t2_b - t2_a).head<3>().normalized();
640 Eigen::Vector3f t2_z_vec = (t2_c - t2_a).head<3>().cross((t2_b - t2_a).head<3>()).normalized();
641 Eigen::Vector3f t2_y_vec = t2_x_vec.cross(t2_z_vec).normalized();
642
643 t1_mat->col(0) << t1_x_vec[0], t1_x_vec[1], t1_x_vec[2], 0.0f;
644 t1_mat->col(1) << t1_y_vec[0], t1_y_vec[1], t1_y_vec[2], 0.0f;
645 t1_mat->col(2) << t1_z_vec[0], t1_z_vec[1], t1_z_vec[2], 0.0f;
646 t1_mat->col(3) << t1_a[0], t1_a[1], t1_a[2], 1.0f;
647
648 t2_mat.col(0) << t2_x_vec[0], t2_x_vec[1], t2_x_vec[2], 0.0f;
649 t2_mat.col(1) << t2_y_vec[0], t2_y_vec[1], t2_y_vec[2], 0.0f;
650 t2_mat.col(2) << t2_z_vec[0], t2_z_vec[1], t2_z_vec[2], 0.0f;
651 t2_mat.col(3) << t2_a[0], t2_a[1], t2_a[2], 1.0f;
652
653 *t1_to_t2_mat = t1_mat->inverse() * t2_mat;
654}
655
656static Eigen::Matrix4f
657solve_for_measurement(TrackerPSVR *t, std::vector<match_data_t> *measurement, std::vector<match_data_t> *solved)
658{
659 // use the vertex positions (at least 3) in the measurement to
660 // construct a pair of triangles which are used to calculate the
661 // pose of the tracked HMD,
662 // based on the corresponding model vertices
663
664 // @todo: compute all possible unique triangles, and average the result
665
666 Eigen::Matrix4f tri_basis;
667 Eigen::Matrix4f model_to_measurement;
668
669 Eigen::Vector4f meas_ref_a = measurement->at(0).position;
670 Eigen::Vector4f meas_ref_b = measurement->at(1).position;
671 int meas_index_a = measurement->at(0).vertex_index;
672 int meas_index_b = measurement->at(1).vertex_index;
673
674 Eigen::Vector4f model_ref_a = t->model_vertices[meas_index_a].position;
675 Eigen::Vector4f model_ref_b = t->model_vertices[meas_index_b].position;
676
677 float highest_length = 0.0f;
678 int best_model_index = 0;
679 int most_distant_index = 0;
680
681 for (uint32_t i = 0; i < measurement->size(); i++) {
682 int model_tag_index = measurement->at(i).vertex_index;
683 Eigen::Vector4f model_vert = t->model_vertices[model_tag_index].position;
684 if (most_distant_index > 1 && dist_3d(model_vert, model_ref_a) > highest_length) {
685 best_model_index = most_distant_index;
686 }
687 most_distant_index++;
688 }
689
690 Eigen::Vector4f meas_ref_c = measurement->at(best_model_index).position;
691 int meas_index_c = measurement->at(best_model_index).vertex_index;
692
693 Eigen::Vector4f model_ref_c = t->model_vertices[meas_index_c].position;
694
695 match_triangles(&tri_basis, &model_to_measurement, model_ref_a, model_ref_b, model_ref_c, meas_ref_a,
696 meas_ref_b, meas_ref_c);
697 Eigen::Matrix4f model_center_transform_f = tri_basis * model_to_measurement * tri_basis.inverse();
698
699 // now reverse the order of our verts to contribute to a more accurate
700 // estimate.
701
702 meas_ref_a = measurement->at(measurement->size() - 1).position;
703 meas_ref_b = measurement->at(measurement->size() - 2).position;
704 meas_index_a = measurement->at(measurement->size() - 1).vertex_index;
705 meas_index_b = measurement->at(measurement->size() - 2).vertex_index;
706
707 model_ref_a = t->model_vertices[meas_index_a].position;
708 model_ref_b = t->model_vertices[meas_index_b].position;
709
710 highest_length = 0.0f;
711 best_model_index = 0;
712 most_distant_index = 0;
713
714 for (uint32_t i = 0; i < measurement->size(); i++) {
715 int model_tag_index = measurement->at(i).vertex_index;
716 Eigen::Vector4f model_vert = t->model_vertices[model_tag_index].position;
717 if (most_distant_index < (int)measurement->size() - 2 &&
718 dist_3d(model_vert, model_ref_a) > highest_length) {
719 best_model_index = most_distant_index;
720 }
721 most_distant_index++;
722 }
723
724 meas_ref_c = measurement->at(best_model_index).position;
725 meas_index_c = measurement->at(best_model_index).vertex_index;
726
727 model_ref_c = t->model_vertices[meas_index_c].position;
728
729 match_triangles(&tri_basis, &model_to_measurement, model_ref_a, model_ref_b, model_ref_c, meas_ref_a,
730 meas_ref_b, meas_ref_c);
731 Eigen::Matrix4f model_center_transform_r = tri_basis * model_to_measurement * tri_basis.inverse();
732
733 // decompose our transforms and slerp between them to get the avg of the
734 // rotation determined from the first 2 + most distant , and last 2 +
735 // most distant verts
736
737 Eigen::Matrix3f r = model_center_transform_f.block(0, 0, 3, 3);
738 Eigen::Quaternionf f_rot_part = Eigen::Quaternionf(r);
739 Eigen::Vector4f f_trans_part = model_center_transform_f.col(3);
740
741 r = model_center_transform_r.block(0, 0, 3, 3);
742 Eigen::Quaternionf r_rot_part = Eigen::Quaternionf(r);
743 Eigen::Vector4f r_trans_part = model_center_transform_r.col(3);
744
745 Eigen::Matrix4f pose = Eigen::Matrix4f().Identity();
746 pose.block(0, 0, 3, 3) = f_rot_part.slerp(0.5, r_rot_part).toRotationMatrix();
747 pose.col(3) = (f_trans_part + r_trans_part) / 2.0f;
748
749 solved->clear();
750 for (uint32_t i = 0; i < PSVR_NUM_LEDS; i++) {
751 match_data_t md;
752 md.vertex_index = i;
753 md.position = pose * t->model_vertices[i].position;
754 solved->push_back(md);
755 }
756
757 return pose;
758}
759
760typedef struct proximity_data
761{
762 Eigen::Vector4f position;
763 float lowest_distance;
764 int vertex_index;
765} proximity_data_t;
766
767static Eigen::Matrix4f
768solve_with_imu(TrackerPSVR &t,
769 std::vector<match_data_t> *measurements,
770 std::vector<match_data_t> *match_measurements,
771 std::vector<match_data_t> *solved,
772 float search_radius)
773{
774
775 // use the hungarian algorithm to find the closest set of points to the
776 // match measurement
777
778 // a 7x7 matrix of costs e.g distances between our points and the match
779 // measurements we will initialise to zero because we will not have
780 // distances for points we don't have
781
782 std::vector<vector<double> > costMatrix = {
783 {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0},
784 {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0},
785 };
786
787 HungarianAlgorithm HungAlgo;
788 vector<int> assignment;
789
790 // lets fill in our cost matrix with distances
791 // @todo: could use squared distance to save a handful of sqrts.
792
793 // @todo: artificially boost cost where distance from last exceeds
794 // search threshold
795 // @todo: artificially boost cost where blob type differs from match
796 // measurement
797
798 for (uint32_t i = 0; i < measurements->size(); i++) {
799 for (uint32_t j = 0; j < match_measurements->size(); j++) {
800 costMatrix[i][j] = dist_3d(measurements->at(i).position, match_measurements->at(j).position);
801 if (measurements->at(i).src_blob.btype == BLOB_TYPE_SIDE &&
802 match_measurements->at(j).src_blob.btype == BLOB_TYPE_FRONT) {
803 costMatrix[i][j] += 10.0f;
804 }
805 if (measurements->at(i).src_blob.btype == BLOB_TYPE_FRONT &&
806 match_measurements->at(j).src_blob.btype == BLOB_TYPE_SIDE) {
807 costMatrix[i][j] += 10.0f;
808 }
809 }
810 }
811
812 double cost = HungAlgo.Solve(costMatrix, assignment);
813 (void)cost;
814
815 for (uint32_t i = 0; i < measurements->size(); i++) {
816 measurements->at(i).vertex_index = assignment[i];
817 }
818
819 std::vector<proximity_data_t> proximity_data;
820 for (uint32_t i = 0; i < measurements->size(); i++) {
821 float lowest_distance = 65535.0;
822 int closest_index = 0;
823 (void)lowest_distance;
824 (void)closest_index;
825
826 proximity_data_t p;
827 match_data_t measurement = measurements->at(i);
828
829 p.position = measurement.position;
830 p.vertex_index = measurement.vertex_index;
831 p.lowest_distance = 0.0f;
832 proximity_data.push_back(p);
833 }
834
835 if (!proximity_data.empty()) {
836
837 // use the IMU rotation and the measured points in
838 // world space to compute a transform from model to world space.
839 // use each measured led individually and average the resulting
840 // positions
841
842 std::vector<match_model_t> temp_measurement_list;
843 for (uint32_t i = 0; i < proximity_data.size(); i++) {
844 proximity_data_t p = proximity_data[i];
845 Eigen::Vector4f model_vertex = t.model_vertices[p.vertex_index].position;
846 Eigen::Vector4f measurement_vertex = p.position;
847 Eigen::Vector4f measurement_offset = t.corrected_imu_rotation * model_vertex;
848 Eigen::Affine3f translation(
849 Eigen::Translation3f((measurement_vertex - measurement_offset).head<3>()));
850 Eigen::Matrix4f model_to_measurement = translation.matrix() * t.corrected_imu_rotation;
851 match_model_t temp_measurement;
852 for (uint32_t j = 0; j < PSVR_NUM_LEDS; j++) {
853 match_data_t md;
854 md.position = model_to_measurement * t.model_vertices[j].position;
855 md.vertex_index = j;
856 temp_measurement.measurements.push_back(md);
857 }
858 temp_measurement_list.push_back(temp_measurement);
859 }
860
861 for (uint32_t i = 0; i < PSVR_NUM_LEDS; i++) {
862 match_data_t avg_data;
863 avg_data.position = Eigen::Vector4f(0.0f, 0.0f, 0.0f, 1.0f);
864 for (uint32_t j = 0; j < temp_measurement_list.size(); j++) {
865 avg_data.position += temp_measurement_list[j].measurements[i].position;
866 }
867 avg_data.position /= float(temp_measurement_list.size());
868 avg_data.vertex_index = i;
869 solved->push_back(avg_data);
870 }
871
872 std::vector<match_data_t> _solved;
873 Eigen::Matrix4f pose = solve_for_measurement(&t, solved, &_solved) * t.corrected_imu_rotation;
874 t.last_pose = pose;
875 return pose;
876 }
877 PSVR_INFO("LOST TRACKING - RETURNING LAST POSE");
878 t.max_correction = PSVR_SLOW_CORRECTION;
879 return t.last_pose;
880}
881
882
883static Eigen::Matrix4f
884disambiguate(TrackerPSVR &t,
885 std::vector<match_data_t> *measured_points,
886 std::vector<match_data_t> *last_measurement,
887 std::vector<match_data_t> *solved,
888 uint32_t frame_no)
889{
890
891 // main disambiguation routine - if we have enough points, use
892 // optical matching, otherwise solve with imu.
893
894 // do our imu-based solve up front - we can use this to compute a more
895 // likely match (currently disabled)
896
897 Eigen::Matrix4f imu_solved_pose =
898 solve_with_imu(t, measured_points, last_measurement, solved, PSVR_SEARCH_RADIUS);
899
900 if (measured_points->size() < PSVR_OPTICAL_SOLVE_THRESH && !last_measurement->empty()) {
901 return imu_solved_pose;
902 }
903
904 if (measured_points->size() < 3) {
905 return imu_solved_pose;
906 }
907
908
909 // optical matching.
910
911 float lowest_error = 65535.0f;
912 int32_t best_model = -1;
913 uint32_t matched_vertex_indices[PSVR_NUM_LEDS];
914
915 // we can early-out if we are 'close enough' to our last match model.
916 // if we hold the previous led configuration, this increases
917 // performance and should cut down on jitter.
918 if (t.last_optical_model > 0 && t.done_correction) {
919
920 match_model_t m = t.matches[t.last_optical_model];
921 for (uint32_t i = 0; i < measured_points->size(); i++) {
922 measured_points->at(i).vertex_index = m.measurements.at(i).vertex_index;
923 }
924 Eigen::Matrix4f res = solve_for_measurement(&t, measured_points, solved);
925 float diff = last_diff(t, solved, &t.last_vertices);
926 if (diff < PSVR_HOLD_THRESH) {
927 // U_LOG_D("diff from last: %f", diff);
928
929 return res;
930 }
931 }
932
933
934
935 for (uint32_t i = 0; i < t.matches.size(); i++) {
936 match_model_t m = t.matches[i];
937 float error_sum = 0.0f;
938 float sign_diff = 0.0f;
939 (void)sign_diff;
940
941 // we have 2 measurements per vertex (distance and
942 // angle) and we are comparing only the 'non-basis
943 // vector' elements
944
945 // fill in our 'proposed' vertex indices from the model
946 // data (this will be overwritten once our best model is
947 // selected
948 for (uint32_t j = 0; j < measured_points->size(); j++) {
949 measured_points->at(j).vertex_index = m.measurements.at(j).vertex_index;
950 }
951
952 bool ignore = false;
953
954 // use the information we gathered on blob shapes to
955 // reject matches that would not fit
956
957 //@todo: use tags instead of numeric vertex indices
958
959 for (uint32_t j = 0; j < measured_points->size(); j++) {
960
961 if (measured_points->at(j).src_blob.btype == BLOB_TYPE_FRONT &&
962 measured_points->at(j).vertex_index > 4) {
963 error_sum += 50.0f;
964 }
965
966 if (measured_points->at(j).src_blob.btype == BLOB_TYPE_SIDE &&
967 measured_points->at(j).vertex_index < 5) {
968 error_sum += 50.0f;
969 }
970
971 // if the distance is between a measured point
972 // and its last-known position is significantly
973 // different, discard this
974 float dist = fabs(measured_points->at(j).distance - m.measurements.at(j).distance);
975 if (dist > PSVR_DISAMBIG_REJECT_DIST) {
976 error_sum += 50.0f;
977 } else {
978 error_sum += fabs(measured_points->at(j).distance - m.measurements.at(j).distance);
979 }
980
981 // if the angle is significantly different,
982 // discard this
983 float angdiff = fabs(measured_points->at(j).angle - m.measurements.at(j).angle);
984 if (angdiff > PSVR_DISAMBIG_REJECT_ANG) {
985 error_sum += 50.0f;
986 } else {
987
988 error_sum += fabs(measured_points->at(j).angle - m.measurements.at(j).angle);
989 }
990 }
991
992 float avg_error = (error_sum / measured_points->size());
993 if (error_sum < 50) {
994 std::vector<match_data_t> meas_solved;
995 solve_for_measurement(&t, measured_points, &meas_solved);
996 float prev_diff = last_diff(t, &meas_solved, &t.last_vertices);
997 float imu_diff = last_diff(t, &meas_solved, solved);
998
999 Eigen::Vector4f tl_pos;
1000 Eigen::Vector4f tr_pos;
1001 Eigen::Vector4f bl_pos;
1002 Eigen::Vector4f br_pos;
1003 bool has_bl = false;
1004 bool has_br = false;
1005 bool has_tl = false;
1006 bool has_tr = false;
1007
1008 for (uint32_t j = 0; j < meas_solved.size(); j++) {
1009 match_data_t *md = &meas_solved.at(j);
1010 if (md->vertex_index == TAG_BL) {
1011 bl_pos = md->position;
1012 has_bl = true;
1013 }
1014 if (md->vertex_index == TAG_BR) {
1015 br_pos = md->position;
1016 has_br = true;
1017 }
1018 if (md->vertex_index == TAG_TL) {
1019 tl_pos = md->position;
1020 has_tl = true;
1021 }
1022 if (md->vertex_index == TAG_TR) {
1023 tr_pos = md->position;
1024 has_tr = true;
1025 }
1026 }
1027
1028 // reject any configuration where 'top' is below
1029 // 'bottom
1030
1031 if (has_bl && has_tl && bl_pos.y() > tl_pos.y()) {
1032 // U_LOG_D("IGNORING BL > TL %f %f",
1033 // bl_pos.y(),
1034 // br_pos.y());
1035 // ignore = true;
1036 }
1037 if (has_br && has_tr && br_pos.y() > tr_pos.y()) {
1038 // U_LOG_D("IGNORING TL > TR %f %f",
1039 // tl_pos.y(),
1040 // tr_pos.y());
1041 // ignore = true;
1042 }
1043
1044 // once we have a lock, bias the detected
1045 // configuration using the imu-solved result,
1046 // and the solve from the previous frame
1047
1048 if (t.done_correction) {
1049 avg_error += prev_diff;
1050 avg_error += imu_diff;
1051 }
1052
1053 // useful for debugging
1054 // U_LOG_D(
1055 // "match %d dist to last: %f dist to imu: %f "
1056 // "rmsError: %f squaredSum:%f %d",
1057 // i, prev_diff, imu_diff, avg_error, error_sum,
1058 // ignore);
1059 }
1060 if (avg_error <= lowest_error && !ignore) {
1061 lowest_error = avg_error;
1062 best_model = i;
1063 for (uint32_t i = 0; i < measured_points->size(); i++) {
1064 matched_vertex_indices[i] = measured_points->at(i).vertex_index;
1065 }
1066 }
1067 }
1068
1069 // U_LOG_D("lowest_error %f", lowest_error);
1070 if (best_model == -1) {
1071 PSVR_INFO("COULD NOT MATCH MODEL!");
1072 return Eigen::Matrix4f().Identity();
1073 }
1074
1075 t.last_optical_model = best_model;
1076 for (uint32_t i = 0; i < measured_points->size(); i++) {
1077 measured_points->at(i).vertex_index = matched_vertex_indices[i];
1078 cv::putText(
1079 t.debug.rgb[0],
1080 cv::format("%d %d", measured_points->at(i).vertex_index, measured_points->at(i).src_blob.btype),
1081 measured_points->at(i).src_blob.lkp.pt, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0, 255, 0));
1082 }
1083
1084 t.last_pose = solve_for_measurement(&t, measured_points, solved);
1085
1086 return t.last_pose;
1087}
1088
1089static void
1090create_model(TrackerPSVR &t)
1091{
1092 // this is the model we use to match our measurements against.
1093 // these vertices came out of the blender prototype.
1094
1095 // NOTE: this is not an accurate measurement of the PSVRs
1096 // physical dimensions, rather an approximate model that serves
1097 // to minimize the incidence of incorrect led matches.
1098
1099 t.model_vertices[0] = {
1100 0,
1101 Eigen::Vector4f(-0.06502f, 0.04335f, 0.01861f, 1.0f),
1102 TAG_BL,
1103 true,
1104 };
1105 t.model_vertices[1] = {
1106 1,
1107 Eigen::Vector4f(0.06502f, 0.04335f, 0.01861f, 1.0f),
1108 TAG_BR,
1109 true,
1110 };
1111 t.model_vertices[2] = {
1112 2,
1113 Eigen::Vector4f(0.0f, 0.0f, 0.04533f, 1.0f),
1114 TAG_C,
1115 true,
1116 };
1117 t.model_vertices[3] = {
1118 3,
1119 Eigen::Vector4f(-0.06502f, -0.04335f, 0.01861f, 1.0f),
1120 TAG_TL,
1121 true,
1122 };
1123 t.model_vertices[4] = {
1124 4,
1125 Eigen::Vector4f(0.06502f, -0.04335f, 0.01861f, 1.0f),
1126 TAG_TR,
1127 true,
1128 };
1129 t.model_vertices[5] = {
1130 5,
1131 Eigen::Vector4f(-0.07802f, 0.0f, -0.02671f, 1.0f),
1132 TAG_SL,
1133 true,
1134 };
1135 t.model_vertices[6] = {
1136 6,
1137 Eigen::Vector4f(0.07802f, 0.0f, -0.02671f, 1.0f),
1138 TAG_SR,
1139 true,
1140 };
1141}
1142
1143struct Helper
1144{
1145public:
1146 m_permutator mp = {};
1147 model_vertex_t vec[PSVR_NUM_LEDS] = {};
1148 uint32_t indices[PSVR_NUM_LEDS];
1149
1150
1151 ~Helper()
1152 {
1153 m_permutator_reset(&mp);
1154 }
1155
1156 bool
1157 step(TrackerPSVR &t)
1158 {
1159 bool ret = m_permutator_step(&mp, &indices[0], PSVR_NUM_LEDS);
1160 if (!ret) {
1161 return false;
1162 }
1163
1164 for (size_t i = 0; i < PSVR_NUM_LEDS; i++) {
1165 vec[i] = t.model_vertices[indices[i]];
1166 }
1167
1168 return true;
1169 }
1170};
1171
1172static void
1173create_match_list(TrackerPSVR &t)
1174{
1175 // create our permutation list for matching
1176 // compute the distance and angles between a reference
1177 // vector, constructed from the first two vertices in
1178 // the permutation.
1179
1180 Helper mp = {};
1181 while (mp.step(t)) {
1182 match_model_t m;
1183
1184 model_vertex_t ref_pt_a = mp.vec[0];
1185 model_vertex_t ref_pt_b = mp.vec[1];
1186 Eigen::Vector3f ref_vec3 = (ref_pt_b.position - ref_pt_a.position).head<3>();
1187
1188 float normScale = dist_3d(ref_pt_a.position, ref_pt_b.position);
1189
1190 match_data_t md;
1191 for (auto &&i : mp.vec) {
1192 Eigen::Vector3f point_vec3 = (i.position - ref_pt_a.position).head<3>();
1193 md.vertex_index = i.vertex_index;
1194 md.distance = dist_3d(i.position, ref_pt_a.position) / normScale;
1195 if (i.position.head<3>().dot(Eigen::Vector3f(0.0, 0.0, 1.0f)) < 0) {
1196 md.distance *= -1;
1197 }
1198
1199 Eigen::Vector3f plane_norm = ref_vec3.cross(point_vec3).normalized();
1200 if (ref_pt_a.position != i.position) {
1201
1202 if (plane_norm.normalized().z() > 0) {
1203 md.angle = -1 * acos((point_vec3).normalized().dot(ref_vec3.normalized()));
1204 } else {
1205 md.angle = acos(point_vec3.normalized().dot(ref_vec3.normalized()));
1206 }
1207 } else {
1208 md.angle = 0.0f;
1209 }
1210 // fix up any NaNs
1211 if (md.angle != md.angle) {
1212 md.angle = 0.0f;
1213 }
1214 if (md.distance != md.distance) {
1215 md.distance = 0.0f;
1216 }
1217
1218 m.measurements.push_back(md);
1219 }
1220
1221 if (match_possible(&m)) {
1222 t.matches.push_back(m);
1223 }
1224 }
1225}
1226
1227static void
1228do_view(TrackerPSVR &t, View &view, cv::Mat &grey, cv::Mat &rgb)
1229{
1230 // Undistort and rectify the whole image.
1231 cv::remap(grey, // src
1232 view.frame_undist_rectified, // dst
1233 view.undistort_rectify_map_x, // map1
1234 view.undistort_rectify_map_y, // map2
1235 cv::INTER_NEAREST, // interpolation - LINEAR seems
1236 // very slow on my setup
1237 cv::BORDER_CONSTANT, // borderMode
1238 cv::Scalar(0, 0, 0)); // borderValue
1239
1240 cv::threshold(view.frame_undist_rectified, // src
1241 view.frame_undist_rectified, // dst
1242 32.0, // thresh
1243 255.0, // maxval
1244 0);
1245 t.sbd->detect(view.frame_undist_rectified, // image
1246 view.keypoints, // keypoints
1247 cv::noArray()); // mask
1248
1249 // Debug is wanted, draw the keypoints.
1250 if (rgb.cols > 0) {
1251 cv::drawKeypoints(view.frame_undist_rectified, // image
1252 view.keypoints, // keypoints
1253 rgb, // outImage
1254 cv::Scalar(255, 0, 0), // color
1255 cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS); // flags
1256 }
1257}
1258
1259typedef struct blob_data
1260{
1261 int tc_to_bc; // top center to bottom center
1262 int lc_to_rc; // left center to right center
1263 int tl_to_br; // top left to bottom right
1264 int bl_to_tr; // bottom left to top right
1265 int diff_a;
1266 int diff_b;
1267 bool ignore;
1268} blob_data_t;
1269
1270
1271static void
1272sample_line(cv::Mat &src, const cv::Point2i &start, const cv::Point2i &end, int *inside_length)
1273{
1274 // use bresenhams algorithm to sample the
1275 // pixels between two points in an image
1276
1277 *inside_length = 0;
1278 int curr_x = start.x;
1279 int curr_y = start.y;
1280
1281 int slope_x = start.x < end.x ? 1 : -1;
1282 int slope_y = start.y < end.y ? 1 : -1;
1283
1284 int dx = end.x - start.x;
1285 int dy = -1 * abs(end.y - start.y);
1286 int e_xy = dx + dy; /* error value e_xy */
1287
1288 while (1) {
1289 // sample our pixel and see if it is in the interior
1290 if (curr_x > 0 && curr_y > 0 && curr_x < src.cols && curr_y < src.rows) {
1291 // cv is row, column
1292 uint8_t *val = src.ptr(curr_y, curr_x);
1293
1294 /// @todo: we are counting pixels rather than measuring length - bresenhams may introduce some
1295 /// inaccuracy here.
1296 if (*val > 128) {
1297 (*inside_length) += 1;
1298 }
1299 }
1300 if (curr_x == end.x && curr_y == end.y) {
1301 break;
1302 }
1303 int err2 = 2 * e_xy;
1304 if (err2 >= dy) {
1305 e_xy += dy;
1306 curr_x += slope_x;
1307 }
1308 if (err2 <= dx) {
1309 e_xy += dx;
1310 curr_y += slope_y;
1311 }
1312 }
1313}
1314
1315static void
1316blob_intersections(cv::Mat &src, cv::KeyPoint *kp, struct blob_data *bd)
1317{
1318 // compute the intersections in 4 'directions' between the
1319 // extents of the 'square' region we get from the opencv blob
1320 // detector
1321
1322 // compute the difference between the 'axis pairs' - the
1323 // relative magnitude and signs of these diffs can differentiate
1324 // between front and side blobs, as we can only ever see one
1325 // 'side' blob at a time, and its orientation will be opposite
1326 // to the others
1327
1328 int radius = kp->size / 2;
1329 cv::Rect2i sq_b(kp->pt.x - radius, kp->pt.y - radius, kp->size, kp->size);
1330
1331 sample_line(src, cv::Point2i(sq_b.x, sq_b.y), cv::Point2i(sq_b.x + sq_b.width, sq_b.y + sq_b.height),
1332 &bd->tl_to_br);
1333 sample_line(src, cv::Point2i(sq_b.x, sq_b.y + sq_b.height), cv::Point2i(sq_b.x + sq_b.width, sq_b.y),
1334 &bd->bl_to_tr);
1335
1336 sample_line(src, cv::Point2i(sq_b.x, sq_b.y + sq_b.height / 2),
1337 cv::Point2i(sq_b.x + sq_b.width, sq_b.y + sq_b.height / 2), &bd->tc_to_bc);
1338
1339 sample_line(src, cv::Point2i(sq_b.x + sq_b.width / 2, sq_b.y),
1340 cv::Point2i(sq_b.x + sq_b.width / 2, sq_b.y + sq_b.height), &bd->lc_to_rc);
1341
1342 bd->diff_a = bd->tl_to_br - bd->bl_to_tr;
1343 bd->diff_b = bd->tc_to_bc - bd->lc_to_rc;
1344 bd->ignore = false;
1345}
1346
1347static void
1348tag_points(TrackerPSVR &t, std::vector<blob_data_t> *blob_datas)
1349{
1350 // determine the 'channel' horiz/vert or 45 deg offset - with
1351 // the highest signal - and calculate the lower bound below
1352 // which we will ignore the blob, as it is not sufficiently
1353 // 'long' to identify
1354 int channel_a_total = 0;
1355 int channel_b_total = 0;
1356 int channel_a_min = INT_MAX;
1357 int channel_b_min = INT_MAX;
1358 int channel_a_max = INT_MIN;
1359 int channel_b_max = INT_MIN;
1360 int channel_a_pos = 0;
1361 int channel_a_neg = 0;
1362 int channel_b_pos = 0;
1363 int channel_b_neg = 0;
1364
1365
1366 for (uint32_t i = 0; i < blob_datas->size(); i++) {
1367 blob_data_t b = blob_datas->at(i);
1368 channel_a_total += abs(b.diff_a);
1369 if (abs(b.diff_a) < channel_a_min) {
1370 channel_a_min = b.diff_a;
1371 }
1372 if (abs(b.diff_a) > channel_a_max) {
1373 channel_a_min = b.diff_a;
1374 }
1375
1376 if (b.diff_a < 0) {
1377 channel_a_neg++;
1378 } else {
1379 channel_a_pos++;
1380 }
1381
1382 if (b.diff_b < 0) {
1383 channel_b_neg++;
1384 } else {
1385 channel_b_pos++;
1386 }
1387
1388 channel_b_total += abs(b.diff_b);
1389 if (abs(b.diff_b) < channel_b_min) {
1390 channel_b_min = b.diff_b;
1391 }
1392 if (abs(b.diff_b) > channel_b_max) {
1393 channel_b_min = b.diff_b;
1394 }
1395 }
1396
1397 int side_count = 0;
1398 if (channel_a_total > channel_b_total) {
1399 // use channel a
1400 float channel_dev = (channel_a_total / float(blob_datas->size())) / 2.0f;
1401 int usable_count = 0;
1402
1403 for (uint32_t i = 0; i < blob_datas->size(); i++) {
1404 if (abs(blob_datas->at(i).diff_a) > channel_dev) {
1405 usable_count++;
1406 } else {
1407 if (blob_datas->at(i).diff_a < 0) {
1408 channel_a_neg--;
1409 blob_datas->at(i).ignore = true;
1410 } else {
1411 channel_a_pos--;
1412 blob_datas->at(i).ignore = true;
1413 }
1414 }
1415 }
1416
1417
1418 if (usable_count > 2) {
1419 // we can now check the signs, and identify the
1420 // 'odd one out' as the side LED - if we have a
1421 // consensus of directions, we can identify them
1422 // all as 'front' LEDs.
1423 for (uint32_t i = 0; i < blob_datas->size(); i++) {
1424 if (!blob_datas->at(i).ignore) {
1425 if (channel_a_pos > channel_a_neg) {
1426 // we can tag all the
1427 // positive ones with
1428 // FRONT and all the
1429 // negative ones with
1430 // SIDE
1431 if (blob_datas->at(i).diff_a >= 0) {
1432 t.world_points[i].btype = BLOB_TYPE_FRONT;
1433 } else {
1434 t.world_points[i].btype = BLOB_TYPE_SIDE;
1435 side_count++;
1436 }
1437
1438 } else {
1439 if (blob_datas->at(i).diff_a < 0) {
1440 t.world_points[i].btype = BLOB_TYPE_FRONT;
1441 } else {
1442 t.world_points[i].btype = BLOB_TYPE_SIDE;
1443 side_count++;
1444 }
1445 }
1446 }
1447 }
1448 }
1449 } else {
1450 // use channel b
1451 float channel_dev = (channel_b_total / float(blob_datas->size())) / 2.0f;
1452 int usable_count = 0;
1453 for (uint32_t i = 0; i < blob_datas->size(); i++) {
1454 if (abs(blob_datas->at(i).diff_b) > channel_dev) {
1455 usable_count++;
1456 } else {
1457 if (blob_datas->at(i).diff_b < 0) {
1458 channel_b_neg--;
1459 blob_datas->at(i).ignore = true;
1460 } else {
1461 channel_b_pos--;
1462 blob_datas->at(i).ignore = true;
1463 }
1464 }
1465 }
1466
1467 if (usable_count > 2) {
1468 // we can now check the signs, and identify the
1469 // 'odd one out' as the side LED - if we have a
1470 // consensus of directions, we can identify them
1471 // all as 'front' LEDs.
1472 for (uint32_t i = 0; i < blob_datas->size(); i++) {
1473 if (blob_datas->at(i).ignore) {
1474 continue;
1475 }
1476 if (channel_b_pos > channel_b_neg) {
1477 // we can tag all the positive ones with
1478 // FRONT and all the egative ones with
1479 // SIDE
1480 if (blob_datas->at(i).diff_b >= 0) {
1481 t.world_points[i].btype = BLOB_TYPE_FRONT;
1482 } else {
1483 t.world_points[i].btype = BLOB_TYPE_SIDE;
1484 side_count++;
1485 }
1486
1487 } else {
1488 if (blob_datas->at(i).diff_b < 0) {
1489 t.world_points[i].btype = BLOB_TYPE_FRONT;
1490 } else {
1491 t.world_points[i].btype = BLOB_TYPE_SIDE;
1492 side_count++;
1493 }
1494 }
1495 }
1496 }
1497 }
1498
1499 if (side_count > 1) {
1500 PSVR_INFO("FOUND MULTIPLE SIDE LEDS. should never happen!");
1501 for (uint32_t i = 0; i < t.world_points.size(); i++) {
1502 t.world_points.at(i).btype = BLOB_TYPE_UNKNOWN;
1503 }
1504 }
1505}
1506
1507
1508static void
1509process(TrackerPSVR &t, struct xrt_frame *xf)
1510{
1511 // No frame supplied, early-out.
1512 if (xf == NULL) {
1513 return;
1514 }
1515
1516 t.debug.refresh(xf);
1517
1518 // compute a dt for our filter(s)
1519 //@todo - use a more precise measurement here
1520 float dt = xf->source_sequence - t.last_frame;
1521 if (dt > 10.0f) {
1522 dt = 1.0f;
1523 }
1524
1525 std::vector<match_data_t> predicted_pose;
1526 filter_predict(&predicted_pose, t.track_filters, dt / 2.0f);
1527
1528
1529 model_vertex_t measured_pose[PSVR_NUM_LEDS];
1530 (void)measured_pose;
1531
1532 // get our raw measurements
1533
1534 t.view[0].keypoints.clear();
1535 t.view[1].keypoints.clear();
1536 t.l_blobs.clear();
1537 t.r_blobs.clear();
1538 t.world_points.clear();
1539
1540 int cols = xf->width / 2;
1541 int rows = xf->height;
1542 int stride = xf->stride;
1543
1544 cv::Mat l_grey(rows, cols, CV_8UC1, xf->data, stride);
1545 cv::Mat r_grey(rows, cols, CV_8UC1, xf->data + cols, stride);
1546
1547 do_view(t, t.view[0], l_grey, t.debug.rgb[0]);
1548 do_view(t, t.view[1], r_grey, t.debug.rgb[1]);
1549
1550 // if we wish to confirm our camera input contents, dump frames
1551 // to disk
1552
1553 // cv::imwrite("/tmp/l_view.png", t.view[0].frame_undist_rectified);
1554 // cv::imwrite("/tmp/r_view.png", t.view[1].frame_undist_rectified);
1555
1556 // do some basic matching to come up with likely
1557 // disparity-pairs.
1558
1559 for (uint32_t i = 0; i < t.view[0].keypoints.size(); i++) {
1560 cv::KeyPoint l_blob = t.view[0].keypoints[i];
1561 int l_index = -1;
1562 int r_index = -1;
1563
1564 for (uint32_t j = 0; j < t.view[1].keypoints.size(); j++) {
1565 float lowest_dist = 65535.0f;
1566 cv::KeyPoint r_blob = t.view[1].keypoints[j];
1567 // find closest point on same-ish scanline
1568 float xdiff = r_blob.pt.x - l_blob.pt.x;
1569 float ydiff = r_blob.pt.y - l_blob.pt.y;
1570 if ((ydiff < 3.0f) && (ydiff > -3.0f) && (abs(xdiff) < lowest_dist)) {
1571 lowest_dist = abs(xdiff);
1572 r_index = j;
1573 l_index = i;
1574 }
1575 }
1576
1577 if (l_index > -1 && r_index > -1) {
1578 cv::KeyPoint lkp = t.view[0].keypoints.at(l_index);
1579 cv::KeyPoint rkp = t.view[1].keypoints.at(r_index);
1580 t.l_blobs.push_back(lkp);
1581 t.r_blobs.push_back(rkp);
1582 // U_LOG_D("2D coords: LX %f LY %f RX %f RY %f",
1583 // lkp.pt.x,
1584 // lkp.pt.y, rkp.pt.x, rkp.pt.y);
1585 }
1586 }
1587
1588 // Convert our 2d point + disparities into 3d points.
1589 std::vector<blob_data_t> blob_datas;
1590
1591 if (!t.l_blobs.empty()) {
1592 for (uint32_t i = 0; i < t.l_blobs.size(); i++) {
1593 float disp = t.r_blobs[i].pt.x - t.l_blobs[i].pt.x;
1594 cv::Vec4d xydw(t.l_blobs[i].pt.x, t.l_blobs[i].pt.y, disp, 1.0f);
1595 // Transform
1596 cv::Vec4d h_world = (cv::Matx44d)t.disparity_to_depth * xydw;
1597
1598 // Divide by scale to get 3D vector from
1599 // homogeneous coordinate. we also invert x here
1600 blob_point_t bp;
1601 bp.p =
1602 cv::Point3f(-h_world[0] / h_world[3], h_world[1] / h_world[3], (h_world[2] / h_world[3]));
1603 bp.lkp = t.l_blobs[i];
1604 bp.rkp = t.r_blobs[i];
1605 bp.btype = BLOB_TYPE_UNKNOWN;
1606 t.world_points.push_back(bp);
1607
1608 // compute the shape data for each blob
1609
1610 blob_data_t intersections;
1611 blob_intersections(t.view[0].frame_undist_rectified, &bp.lkp, &intersections);
1612 blob_datas.push_back(intersections);
1613 }
1614 }
1615
1616 tag_points(t, &blob_datas);
1617
1618
1619
1620 t.pruned_points.clear();
1621 t.merged_points.clear();
1622
1623 // remove outliers from our measurement list
1624 remove_outliers(&t.world_points, &t.pruned_points, PSVR_OUTLIER_THRESH);
1625
1626 // remove any points that are too close to be
1627 // treated as separate leds
1628 merge_close_points(&t.pruned_points, &t.merged_points, PSVR_MERGE_THRESH);
1629
1630
1631 // uncomment to debug 'overpruning' or other issues
1632 // that may be related to calibration scale
1633 PSVR_INFO("world points: %d pruned points: %d merged points %d", (uint32_t)t.world_points.size(),
1634 (uint32_t)t.pruned_points.size(), (uint32_t)t.merged_points.size());
1635
1636
1637 // put our blob positions in a slightly more
1638 // useful data structure
1639
1640 if (t.merged_points.size() > PSVR_NUM_LEDS) {
1641 PSVR_INFO("Too many blobs to be a PSVR! %d", (uint32_t)t.merged_points.size());
1642 } else {
1643 // convert our points to match data,
1644 // this tags our match_vertices with
1645 // everything we need to solve the pose.
1646
1647 verts_to_measurement(&t.merged_points, &t.match_vertices);
1648 }
1649
1650#ifdef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
1651 // raw debug output for Blender algo development
1652 for (size_t i = 0; i < t.merged_points.size(); i++) {
1653
1654 cv::Point3f unscaled = t.merged_points.at(i).p;
1655
1656
1657 fprintf(t.dump_file, "P,%" PRIu64 ",%f,%f,%f\n", xf->source_sequence, unscaled.x, unscaled.y,
1658 unscaled.z);
1659 }
1660 fprintf(t.dump_file, "\n");
1661#endif
1662
1663
1664 // our primary solving technique - optical and
1665 // fallback to imu-based is handled in the
1666 // disambiguate function - solved will contain our
1667 // best estimate of the position of the model vertices
1668 // in world space, and model_center_transform will
1669 // contain the pose matrix
1670 std::vector<match_data_t> solved;
1671 Eigen::Matrix4f model_center_transform = disambiguate(t, &t.match_vertices, &predicted_pose, &solved, 0);
1672
1673
1674 // derive our optical rotation correction from the
1675 // pose transform
1676
1677 Eigen::Matrix3f r = model_center_transform.block(0, 0, 3, 3);
1678 Eigen::Quaternionf rot(r);
1679
1680 // we only do this if we are pretty confident we
1681 // will have a 'good' optical pose i.e. front-5
1682 // leds.
1683 if (t.merged_points.size() >= PSVR_OPTICAL_SOLVE_THRESH) {
1684 Eigen::Quaternionf correction =
1685 rot * Eigen::Quaternionf(t.fusion.imu_3dof.rot.w, t.fusion.imu_3dof.rot.x, t.fusion.imu_3dof.rot.y,
1686 t.fusion.imu_3dof.rot.z)
1687 .inverse();
1688
1689 float correction_magnitude = t.target_optical_rotation_correction.angularDistance(correction);
1690
1691 // for corrections subsequent to the
1692 // first, we never want to depart
1693 // massively from the imu rotation, as
1694 // such major adjustments are likely to
1695 // be erroneous.
1696
1697 // uncomment to debug rotation correction convergence
1698 // issues
1699
1700 PSVR_TRACE("Q1: %f %f %f %f Q2: %f %f %f %f", t.target_optical_rotation_correction.x(),
1701 t.target_optical_rotation_correction.y(), t.target_optical_rotation_correction.z(),
1702 t.target_optical_rotation_correction.w(), correction.x(), correction.y(), correction.z(),
1703 correction.w());
1704 PSVR_TRACE("correction mag: %f avg %f", correction_magnitude, t.avg_optical_correction);
1705
1706 // keep a running average of the last 10 corrections -
1707 // so we can apply the correction only when we are
1708 // relatively stable
1709 t.avg_optical_correction -= t.avg_optical_correction / 10.0f;
1710 t.avg_optical_correction += correction_magnitude / 10.0f;
1711
1712 PSVR_DEBUG("optical solve %f", t.avg_optical_correction);
1713
1714
1715 // if we have not yet applied a 'converged' correction,
1716 // our best chance of 'locking on' is to apply whatever
1717 // correction we compute.
1718 if (!t.done_correction) {
1719 t.target_optical_rotation_correction = correction;
1720 PSVR_INFO("RECORRECTING");
1721 }
1722
1723 // only correct when we are stable
1724 if (t.avg_optical_correction < PSVR_CORRECTION_THRESH) {
1725 t.target_optical_rotation_correction = correction;
1726 t.done_correction = true;
1727 PSVR_INFO("LOCKED");
1728 t.max_correction = PSVR_FAST_CORRECTION;
1729 t.bad_correction_count = 0;
1730 }
1731 if (t.avg_optical_correction > PSVR_BAD_CORRECTION_THRESH) {
1732 t.bad_correction_count++;
1733 }
1734
1735 if (t.bad_correction_count > PSVR_MAX_BAD_CORR) {
1736 t.max_correction = PSVR_SLOW_CORRECTION;
1737 t.target_optical_rotation_correction =
1738 t.target_optical_rotation_correction.slerp(t.max_correction, correction);
1739 t.bad_correction_count = 0;
1740 PSVR_INFO("TOO MANY BAD CORRECTIONS. DRIFTED?");
1741 }
1742
1743 std::vector<match_data_t> resolved;
1744 for (uint32_t i = 0; i < solved.size(); i++) {
1745 resolved.push_back(solved[i]);
1746 }
1747 solved.clear();
1748 model_center_transform = solve_with_imu(t, &resolved, &predicted_pose, &solved, PSVR_SEARCH_RADIUS);
1749 }
1750
1751 // move our applied correction towards the
1752 // target correction, rather than applying it
1753 // immediately to smooth things out.
1754
1755 t.optical_rotation_correction =
1756 t.optical_rotation_correction.slerp(t.max_correction, t.target_optical_rotation_correction);
1757
1758#ifdef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
1759 fprintf(t.dump_file, "\n");
1760 for (uint32_t i = 0; i < solved.size(); i++) {
1761 fprintf(t.dump_file, "S,%" PRIu64 ",%f,%f,%f\n", xf->source_sequence, solved[i].position.x(),
1762 solved[i].position.y(), solved[i].position.z());
1763 }
1764 fprintf(t.dump_file, "\n");
1765
1766 /*std::vector<match_data_t> alt_solved;
1767 Eigen::Matrix4f f_pose = solve_with_imu(
1768 t, &t.match_vertices, &t.last_vertices, &alt_solved, 10.0f);
1769
1770 for (uint32_t i = 0; i < alt_solved.size(); i++) {
1771 fprintf(t.dump_file, "A,%" PRIu64 ",%f,%f,%f\n",
1772 xf->source_sequence, alt_solved[i].position.x(),
1773 alt_solved[i].position.y(),
1774 alt_solved[i].position.z());
1775 }
1776 fprintf(t.dump_file, "\n");*/
1777
1778#endif
1779
1780 // store our last vertices for continuity
1781 // matching
1782 t.last_vertices.clear();
1783 for (uint32_t i = 0; i < solved.size(); i++) {
1784 t.last_vertices.push_back(solved[i]);
1785 }
1786
1787 if (!t.last_vertices.empty()) {
1788 filter_update(&t.last_vertices, t.track_filters, dt / 1000.0f);
1789 }
1790
1791
1792 Eigen::Vector4f position = model_center_transform.col(3);
1793 pose_filter_update(&position, &t.pose_filter, dt);
1794
1795
1796
1797 // NOTE: we will apply our rotation when we get imu
1798 // data - applying our calculated optical
1799 // correction at this time. We can update our
1800 // position now.
1801 Eigen::Vector4f filtered_pose;
1802 pose_filter_predict(&filtered_pose, &t.pose_filter, dt / 1000.0f);
1803
1804
1805 t.optical.pos.x = filtered_pose.x();
1806 t.optical.pos.y = filtered_pose.y();
1807 t.optical.pos.z = filtered_pose.z();
1808
1809 t.last_frame = xf->source_sequence;
1810
1811 t.debug.submit();
1812
1813 xrt_frame_reference(&xf, NULL);
1814}
1815
1816static void
1817run(TrackerPSVR &t)
1818{
1819 struct xrt_frame *frame = NULL;
1820
1821 os_thread_helper_lock(&t.oth);
1822
1823 while (os_thread_helper_is_running_locked(&t.oth)) {
1824
1825 // No data
1826 if (!t.has_imu && t.frame == NULL) {
1827 os_thread_helper_wait_locked(&t.oth);
1828
1829 /*
1830 * Loop back to the top to check if we should stop,
1831 * also handles spurious wakeups by re-checking the
1832 * condition in the if case. Essentially two loops.
1833 */
1834 continue;
1835 }
1836
1837 // Take a reference on the current frame
1838
1839 frame = t.frame;
1840 t.frame = NULL;
1841
1842 // Unlock the mutex when we do the work.
1843 os_thread_helper_unlock(&t.oth);
1844
1845 process(t, frame);
1846
1847 // Have to lock it again.
1848 os_thread_helper_lock(&t.oth);
1849 }
1850
1851 os_thread_helper_unlock(&t.oth);
1852}
1853
1854static void
1855get_pose(TrackerPSVR &t, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
1856{
1857 os_thread_helper_lock(&t.oth);
1858
1859 // Don't do anything if we have stopped.
1860 if (!os_thread_helper_is_running_locked(&t.oth)) {
1861 os_thread_helper_unlock(&t.oth);
1862 return;
1863 }
1864
1865 out_relation->pose.position = t.optical.pos;
1866 out_relation->pose.orientation = t.optical.rot;
1867
1868 //! @todo assuming that orientation is actually
1869 //! currently tracked.
1870 out_relation->relation_flags = (enum xrt_space_relation_flags)(XRT_SPACE_RELATION_POSITION_VALID_BIT |
1871 XRT_SPACE_RELATION_POSITION_TRACKED_BIT |
1872 XRT_SPACE_RELATION_ORIENTATION_VALID_BIT);
1873
1874 if (t.done_correction) {
1875 out_relation->relation_flags = (enum xrt_space_relation_flags)(
1876 out_relation->relation_flags | XRT_SPACE_RELATION_ORIENTATION_TRACKED_BIT);
1877 }
1878 os_thread_helper_unlock(&t.oth);
1879}
1880
1881static void
1882imu_data(TrackerPSVR &t, timepoint_ns timestamp_ns, struct xrt_tracking_sample *sample)
1883{
1884 os_thread_helper_lock(&t.oth);
1885
1886 // Don't do anything if we have stopped.
1887 if (!os_thread_helper_is_running_locked(&t.oth)) {
1888 os_thread_helper_unlock(&t.oth);
1889 return;
1890 }
1891 if (t.last_imu != 0) {
1892 // Update 3DOF fusion
1893 m_imu_3dof_update(&t.fusion.imu_3dof, timestamp_ns, &sample->accel_m_s2, &sample->gyro_rad_secs);
1894 }
1895
1896 // apply our optical correction to imu rotation
1897 // data
1898
1899 Eigen::Quaternionf corrected_rot_q =
1900 t.optical_rotation_correction * Eigen::Quaternionf(t.fusion.imu_3dof.rot.w, t.fusion.imu_3dof.rot.x,
1901 t.fusion.imu_3dof.rot.y, t.fusion.imu_3dof.rot.z);
1902
1903 Eigen::Matrix4f corrected_rot = Eigen::Matrix4f::Identity();
1904 corrected_rot.block(0, 0, 3, 3) = corrected_rot_q.toRotationMatrix();
1905
1906 t.corrected_imu_rotation = corrected_rot;
1907
1908 if (t.done_correction) {
1909 corrected_rot_q = t.axis_align_rot * corrected_rot_q;
1910 }
1911
1912 t.optical.rot.x = corrected_rot_q.x();
1913 t.optical.rot.y = corrected_rot_q.y();
1914 t.optical.rot.z = corrected_rot_q.z();
1915 t.optical.rot.w = corrected_rot_q.w();
1916
1917 t.last_imu = timestamp_ns;
1918
1919#ifdef PSVR_DUMP_IMU_FOR_OFFLINE_ANALYSIS
1920 fprintf(t.dump_file, "I,%" PRIu64 ", %f,%f,%f,%f\n\n", timestamp_ns, t.fusion.rot.x, t.fusion.rot.y,
1921 t.fusion.rot.z, t.fusion.rot.w);
1922
1923 fprintf(t.dump_file, "C,%" PRIu64 ", %f,%f,%f,%f\n\n", timestamp_ns, corrected_rot_q.x(), corrected_rot_q.y(),
1924 corrected_rot_q.z(), corrected_rot_q.w());
1925#endif
1926
1927
1928 os_thread_helper_unlock(&t.oth);
1929}
1930
1931static void
1932frame(TrackerPSVR &t, struct xrt_frame *xf)
1933{
1934 os_thread_helper_lock(&t.oth);
1935
1936 // Don't do anything if we have stopped.
1937 if (!os_thread_helper_is_running_locked(&t.oth)) {
1938 os_thread_helper_unlock(&t.oth);
1939 return;
1940 }
1941
1942 xrt_frame_reference(&t.frame, xf);
1943
1944 // Wake up the thread.
1945 os_thread_helper_signal_locked(&t.oth);
1946
1947 os_thread_helper_unlock(&t.oth);
1948}
1949
1950static void
1951break_apart(TrackerPSVR &t)
1952{
1953 os_thread_helper_stop_and_wait(&t.oth);
1954}
1955
1956} // namespace xrt::auxiliary::tracking::psvr
1957
1958using xrt::auxiliary::tracking::psvr::TrackerPSVR;
1959
1960/*
1961 *
1962 * C wrapper functions.
1963 *
1964 */
1965
1966extern "C" void
1967t_psvr_push_imu(struct xrt_tracked_psvr *xtvr, timepoint_ns timestamp_ns, struct xrt_tracking_sample *sample)
1968{
1969 auto &t = *container_of(xtvr, TrackerPSVR, base);
1970 imu_data(t, timestamp_ns, sample);
1971}
1972
1973extern "C" void
1974t_psvr_get_tracked_pose(struct xrt_tracked_psvr *xtvr, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
1975{
1976 auto &t = *container_of(xtvr, TrackerPSVR, base);
1977 get_pose(t, when_ns, out_relation);
1978}
1979
1980extern "C" void
1981t_psvr_fake_destroy(struct xrt_tracked_psvr *xtvr)
1982{
1983 auto &t = *container_of(xtvr, TrackerPSVR, base);
1984 (void)t;
1985 // Not the real destroy function
1986}
1987
1988extern "C" void
1989t_psvr_sink_push_frame(struct xrt_frame_sink *xsink, struct xrt_frame *xf)
1990{
1991 auto &t = *container_of(xsink, TrackerPSVR, sink);
1992 frame(t, xf);
1993}
1994
1995extern "C" void
1996t_psvr_node_break_apart(struct xrt_frame_node *node)
1997{
1998 auto &t = *container_of(node, TrackerPSVR, node);
1999 break_apart(t);
2000}
2001
2002extern "C" void
2003t_psvr_node_destroy(struct xrt_frame_node *node)
2004{
2005 auto *t_ptr = container_of(node, TrackerPSVR, node);
2006
2007 os_thread_helper_destroy(&t_ptr->oth);
2008
2009 m_imu_3dof_close(&t_ptr->fusion.imu_3dof);
2010
2011 delete t_ptr;
2012}
2013
2014extern "C" void *
2015t_psvr_run(void *ptr)
2016{
2017 auto &t = *(TrackerPSVR *)ptr;
2018 run(t);
2019 return NULL;
2020}
2021
2022
2023/*
2024 *
2025 * Exported functions.
2026 *
2027 */
2028
2029extern "C" int
2030t_psvr_start(struct xrt_tracked_psvr *xtvr)
2031{
2032 auto &t = *container_of(xtvr, TrackerPSVR, base);
2033 int ret;
2034
2035
2036 ret = os_thread_helper_start(&t.oth, t_psvr_run, &t);
2037 if (ret != 0) {
2038 return ret;
2039 }
2040
2041 return ret;
2042}
2043
2044extern "C" int
2045t_psvr_create(struct xrt_frame_context *xfctx,
2046 struct t_stereo_camera_calibration *data,
2047 struct xrt_tracked_psvr **out_xtvr,
2048 struct xrt_frame_sink **out_sink)
2049{
2050 auto &t = *(new TrackerPSVR());
2051 t.log_level = debug_get_log_option_psvr_log();
2052
2053 PSVR_INFO("%s", __func__);
2054 int ret;
2055
2056 using xrt::auxiliary::tracking::psvr::init_filter;
2057
2058 for (uint32_t i = 0; i < PSVR_NUM_LEDS; i++) {
2059 init_filter(t.track_filters[i], PSVR_BLOB_PROCESS_NOISE, PSVR_BLOB_MEASUREMENT_NOISE, 1.0f);
2060 }
2061
2062 init_filter(t.pose_filter, PSVR_POSE_PROCESS_NOISE, PSVR_POSE_MEASUREMENT_NOISE, 1.0f);
2063
2064 StereoRectificationMaps rectify(data);
2065 t.view[0].populate_from_calib(data->view[0], rectify.view[0].rectify);
2066 t.view[1].populate_from_calib(data->view[1], rectify.view[1].rectify);
2067 t.disparity_to_depth = rectify.disparity_to_depth_mat;
2068 StereoCameraCalibrationWrapper wrapped(data);
2069 t.r_cam_rotation = wrapped.camera_rotation_mat;
2070 t.r_cam_translation = wrapped.camera_translation_mat;
2071 t.calibrated = true;
2072
2073
2074
2075 // clang-format off
2076 cv::SimpleBlobDetector::Params blob_params;
2077 blob_params.filterByArea = false;
2078 blob_params.filterByConvexity = false;
2079 blob_params.filterByInertia = false;
2080 blob_params.filterByColor = true;
2081 blob_params.blobColor = 255; // 0 or 255 - color comes from binarized image?
2082 blob_params.minArea = 1;
2083 blob_params.maxArea = 1000;
2084 blob_params.maxThreshold = 51; // using a wide threshold span slows things down bigtime
2085 blob_params.minThreshold = 50;
2086 blob_params.thresholdStep = 1;
2087 blob_params.minDistBetweenBlobs = 5;
2088 blob_params.minRepeatability = 1; // need this to avoid error?
2089 // clang-format on
2090
2091 t.sbd = cv::SimpleBlobDetector::create(blob_params);
2092
2093 t.target_optical_rotation_correction = Eigen::Quaternionf(1.0f, 0.0f, 0.0f, 0.0f);
2094 t.optical_rotation_correction = Eigen::Quaternionf(1.0f, 0.0f, 0.0f, 0.0f);
2095 t.axis_align_rot = Eigen::Quaternionf(1.0f, 0.0f, 0.0f, 0.0f);
2096 t.corrected_imu_rotation = Eigen::Matrix4f().Identity();
2097 t.avg_optical_correction = 10.0f; // initialise to a high value, so we
2098 // can converge to a low one.
2099 t.max_correction = PSVR_FAST_CORRECTION;
2100 t.bad_correction_count = 0;
2101
2102 Eigen::Quaternionf align(Eigen::AngleAxis<float>(-M_PI / 2, Eigen::Vector3f(0.0f, 0.0f, 1.0f)));
2103 Eigen::Quaternionf align2(Eigen::AngleAxis<float>(M_PI, Eigen::Vector3f(0.0f, 1.0f, 0.0f)));
2104
2105 t.axis_align_rot = align2; // * align;
2106
2107 t.last_optical_model = 0;
2108
2109 // offset our models center of rotation
2110 create_model(t);
2111 create_match_list(t);
2112
2113 t.base.get_tracked_pose = t_psvr_get_tracked_pose;
2114 t.base.push_imu = t_psvr_push_imu;
2115 t.base.destroy = t_psvr_fake_destroy;
2116 t.sink.push_frame = t_psvr_sink_push_frame;
2117 t.node.break_apart = t_psvr_node_break_apart;
2118 t.node.destroy = t_psvr_node_destroy;
2119
2120 ret = os_thread_helper_init(&t.oth);
2121 if (ret != 0) {
2122 delete (&t);
2123 return ret;
2124 }
2125
2126 t.fusion.pos.x = 0.0f;
2127 t.fusion.pos.y = 0.0f;
2128 t.fusion.pos.z = 0.0f;
2129
2130 m_imu_3dof_init(&t.fusion.imu_3dof, M_IMU_3DOF_USE_GRAVITY_DUR_20MS);
2131
2132 t.fusion.imu_3dof.rot.x = 0.0f;
2133 t.fusion.imu_3dof.rot.y = 0.0f;
2134 t.fusion.imu_3dof.rot.z = 0.0f;
2135 t.fusion.imu_3dof.rot.w = 1.0f;
2136
2137 xrt_frame_context_add(xfctx, &t.node);
2138
2139 // Everything is safe, now setup the variable tracking.
2140 u_var_add_root(&t, "PSVR Tracker", true);
2141 u_var_add_log_level(&t, &t.log_level, "Log level");
2142 u_var_add_sink_debug(&t, &t.debug.usd, "Debug");
2143
2144 *out_sink = &t.sink;
2145 *out_xtvr = &t.base;
2146
2147#ifdef PSVR_DUMP_FOR_OFFLINE_ANALYSIS
2148 t.dump_file = fopen("/tmp/psvr_dump.txt", "w");
2149#endif
2150
2151 return 0;
2152}