The open source OpenXR runtime
1// Copyright 2020-2024, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Handling functions called from generated dispatch function.
6 * @author Pete Black <pblack@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @author Korcan Hussein <korcan.hussein@collabora.com>
9 * @ingroup ipc_server
10 */
11
12#include "util/u_misc.h"
13#include "util/u_handles.h"
14#include "util/u_pretty_print.h"
15#include "util/u_visibility_mask.h"
16#include "util/u_trace_marker.h"
17
18#include "server/ipc_server.h"
19#include "ipc_server_generated.h"
20#include "xrt/xrt_device.h"
21#include "xrt/xrt_results.h"
22
23#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
24#include <unistd.h>
25#endif
26
27
28/*
29 *
30 * Helper functions.
31 *
32 */
33
34static xrt_result_t
35validate_device_id(volatile struct ipc_client_state *ics, int64_t device_id, struct xrt_device **out_device)
36{
37 if (device_id >= XRT_SYSTEM_MAX_DEVICES) {
38 IPC_ERROR(ics->server, "Invalid device ID (device_id >= XRT_SYSTEM_MAX_DEVICES)!");
39 return XRT_ERROR_IPC_FAILURE;
40 }
41
42 struct xrt_device *xdev = ics->server->idevs[device_id].xdev;
43 if (xdev == NULL) {
44 IPC_ERROR(ics->server, "Invalid device ID (xdev is NULL)!");
45 return XRT_ERROR_IPC_FAILURE;
46 }
47
48 *out_device = xdev;
49
50 return XRT_SUCCESS;
51}
52
53#define GET_XDEV_OR_RETURN(ics, device_id, out_device) \
54 do { \
55 xrt_result_t res = validate_device_id(ics, device_id, &out_device); \
56 if (res != XRT_SUCCESS) { \
57 return res; \
58 } \
59 } while (0)
60
61
62static xrt_result_t
63validate_origin_id(volatile struct ipc_client_state *ics, int64_t origin_id, struct xrt_tracking_origin **out_xtrack)
64{
65 if (origin_id >= XRT_SYSTEM_MAX_DEVICES) {
66 IPC_ERROR(ics->server, "Invalid origin ID (origin_id >= XRT_SYSTEM_MAX_DEVICES)!");
67 return XRT_ERROR_IPC_FAILURE;
68 }
69
70 struct xrt_tracking_origin *xtrack = ics->server->xtracks[origin_id];
71 if (xtrack == NULL) {
72 IPC_ERROR(ics->server, "Invalid origin ID (xtrack is NULL)!");
73 return XRT_ERROR_IPC_FAILURE;
74 }
75
76 *out_xtrack = xtrack;
77
78 return XRT_SUCCESS;
79}
80
81static xrt_result_t
82validate_swapchain_state(volatile struct ipc_client_state *ics, uint32_t *out_index)
83{
84 // Our handle is just the index for now.
85 uint32_t index = 0;
86 for (; index < IPC_MAX_CLIENT_SWAPCHAINS; index++) {
87 if (!ics->swapchain_data[index].active) {
88 break;
89 }
90 }
91
92 if (index >= IPC_MAX_CLIENT_SWAPCHAINS) {
93 IPC_ERROR(ics->server, "Too many swapchains!");
94 return XRT_ERROR_IPC_FAILURE;
95 }
96
97 *out_index = index;
98
99 return XRT_SUCCESS;
100}
101
102static void
103set_swapchain_info(volatile struct ipc_client_state *ics,
104 uint32_t index,
105 const struct xrt_swapchain_create_info *info,
106 struct xrt_swapchain *xsc)
107{
108 ics->xscs[index] = xsc;
109 ics->swapchain_data[index].active = true;
110 ics->swapchain_data[index].width = info->width;
111 ics->swapchain_data[index].height = info->height;
112 ics->swapchain_data[index].format = info->format;
113 ics->swapchain_data[index].image_count = xsc->image_count;
114}
115
116static xrt_result_t
117validate_reference_space_type(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
118{
119 if ((uint32_t)type >= XRT_SPACE_REFERENCE_TYPE_COUNT) {
120 IPC_ERROR(ics->server, "Invalid reference space type %u", type);
121 return XRT_ERROR_IPC_FAILURE;
122 }
123
124 return XRT_SUCCESS;
125}
126
127static xrt_result_t
128validate_device_feature_type(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
129{
130 if ((uint32_t)type >= XRT_DEVICE_FEATURE_MAX_ENUM) {
131 IPC_ERROR(ics->server, "Invalid device feature type %u", type);
132 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
133 }
134
135 return XRT_SUCCESS;
136}
137
138
139static xrt_result_t
140validate_space_id(volatile struct ipc_client_state *ics, int64_t space_id, struct xrt_space **out_xspc)
141{
142 if (space_id < 0) {
143 return XRT_ERROR_IPC_FAILURE;
144 }
145
146 if (space_id >= IPC_MAX_CLIENT_SPACES) {
147 return XRT_ERROR_IPC_FAILURE;
148 }
149
150 if (ics->xspcs[space_id] == NULL) {
151 return XRT_ERROR_IPC_FAILURE;
152 }
153
154 *out_xspc = (struct xrt_space *)ics->xspcs[space_id];
155
156 return XRT_SUCCESS;
157}
158
159static xrt_result_t
160get_new_space_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
161{
162 // Our handle is just the index for now.
163 uint32_t index = 0;
164 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
165 if (ics->xspcs[index] == NULL) {
166 break;
167 }
168 }
169
170 if (index >= IPC_MAX_CLIENT_SPACES) {
171 IPC_ERROR(ics->server, "Too many spaces!");
172 return XRT_ERROR_IPC_FAILURE;
173 }
174
175 *out_id = index;
176
177 return XRT_SUCCESS;
178}
179
180static xrt_result_t
181track_space(volatile struct ipc_client_state *ics, struct xrt_space *xs, uint32_t *out_id)
182{
183 uint32_t id = UINT32_MAX;
184 xrt_result_t xret = get_new_space_id(ics, &id);
185 if (xret != XRT_SUCCESS) {
186 return xret;
187 }
188
189 // Remove volatile
190 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[id];
191 xrt_space_reference(xs_ptr, xs);
192
193 *out_id = id;
194
195 return XRT_SUCCESS;
196}
197
198
199static xrt_result_t
200get_new_localspace_id(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
201{
202 // Our handle is just the index for now.
203 uint32_t index = 0;
204 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
205 if (ics->server->xso->localspace[index] == NULL) {
206 break;
207 }
208 }
209
210 if (index >= IPC_MAX_CLIENT_SPACES) {
211 IPC_ERROR(ics->server, "Too many localspaces!");
212 return XRT_ERROR_IPC_FAILURE;
213 }
214
215 ics->local_space_overseer_index = index;
216 index = 0;
217 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
218 if (ics->xspcs[index] == NULL) {
219 break;
220 }
221 }
222
223 if (index >= IPC_MAX_CLIENT_SPACES) {
224 IPC_ERROR(ics->server, "Too many spaces!");
225 return XRT_ERROR_IPC_FAILURE;
226 }
227
228 ics->local_space_index = index;
229 *out_local_id = index;
230
231 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
232 if (ics->server->xso->localfloorspace[index] == NULL) {
233 break;
234 }
235 }
236
237 if (index >= IPC_MAX_CLIENT_SPACES) {
238 IPC_ERROR(ics->server, "Too many localfloorspaces!");
239 return XRT_ERROR_IPC_FAILURE;
240 }
241
242 ics->local_floor_space_overseer_index = index;
243
244 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
245 if (ics->xspcs[index] == NULL && index != ics->local_space_index) {
246 break;
247 }
248 }
249
250 if (index >= IPC_MAX_CLIENT_SPACES) {
251 IPC_ERROR(ics->server, "Too many spaces!");
252 return XRT_ERROR_IPC_FAILURE;
253 }
254
255 ics->local_floor_space_index = index;
256 *out_local_floor_id = index;
257
258 return XRT_SUCCESS;
259}
260
261static xrt_result_t
262create_localspace(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
263{
264 uint32_t local_id = UINT32_MAX;
265 uint32_t local_floor_id = UINT32_MAX;
266 xrt_result_t xret = get_new_localspace_id(ics, &local_id, &local_floor_id);
267 if (xret != XRT_SUCCESS) {
268 return xret;
269 }
270
271 struct xrt_space_overseer *xso = ics->server->xso;
272 struct xrt_space **xslocal_ptr = (struct xrt_space **)&ics->xspcs[local_id];
273 struct xrt_space **xslocalfloor_ptr = (struct xrt_space **)&ics->xspcs[local_floor_id];
274
275 xret = xrt_space_overseer_create_local_space(xso, &xso->localspace[ics->local_space_overseer_index],
276 &xso->localfloorspace[ics->local_floor_space_overseer_index]);
277 if (xret != XRT_SUCCESS) {
278 return xret;
279 }
280 xrt_space_reference(xslocal_ptr, xso->localspace[ics->local_space_overseer_index]);
281 xrt_space_reference(xslocalfloor_ptr, xso->localfloorspace[ics->local_floor_space_overseer_index]);
282 *out_local_id = local_id;
283 *out_local_floor_id = local_floor_id;
284
285 return XRT_SUCCESS;
286}
287
288XRT_MAYBE_UNUSED xrt_result_t
289get_new_future_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
290{
291 // Our handle is just the index for now.
292 uint32_t index = 0;
293 for (; index < IPC_MAX_CLIENT_FUTURES; ++index) {
294 if (ics->xfts[index] == NULL) {
295 break;
296 }
297 }
298
299 if (index >= IPC_MAX_CLIENT_FUTURES) {
300 IPC_ERROR(ics->server, "Too many futures!");
301 return XRT_ERROR_IPC_FAILURE;
302 }
303
304 *out_id = index;
305
306 return XRT_SUCCESS;
307}
308
309static inline xrt_result_t
310validate_future_id(volatile struct ipc_client_state *ics, uint32_t future_id, struct xrt_future **out_xft)
311{
312 if (future_id >= IPC_MAX_CLIENT_FUTURES) {
313 return XRT_ERROR_IPC_FAILURE;
314 }
315
316 if (ics->xfts[future_id] == NULL) {
317 return XRT_ERROR_IPC_FAILURE;
318 }
319
320 *out_xft = (struct xrt_future *)ics->xfts[future_id];
321 return (*out_xft != NULL) ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
322}
323
324static inline xrt_result_t
325release_future(volatile struct ipc_client_state *ics, uint32_t future_id)
326{
327 struct xrt_future *xft = NULL;
328 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
329 if (xret != XRT_SUCCESS) {
330 return xret;
331 }
332 xrt_future_reference(&xft, NULL);
333 ics->xfts[future_id] = NULL;
334 return XRT_SUCCESS;
335}
336
337/*
338 *
339 * Handle functions.
340 *
341 */
342
343xrt_result_t
344ipc_handle_instance_get_shm_fd(volatile struct ipc_client_state *ics,
345 uint32_t max_handle_capacity,
346 xrt_shmem_handle_t *out_handles,
347 uint32_t *out_handle_count)
348{
349 IPC_TRACE_MARKER();
350
351 assert(max_handle_capacity >= 1);
352
353 out_handles[0] = get_ism_handle(ics);
354 *out_handle_count = 1;
355
356 return XRT_SUCCESS;
357}
358
359xrt_result_t
360ipc_handle_instance_describe_client(volatile struct ipc_client_state *ics,
361 const struct ipc_client_description *client_desc)
362{
363 ics->client_state.info = client_desc->info;
364 ics->client_state.pid = client_desc->pid;
365
366 struct u_pp_sink_stack_only sink;
367 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
368
369#define P(...) u_pp(dg, __VA_ARGS__)
370#define PNT(...) u_pp(dg, "\n\t" __VA_ARGS__)
371#define PNTT(...) u_pp(dg, "\n\t\t" __VA_ARGS__)
372#define EXT(NAME) PNTT(#NAME ": %s", client_desc->info.NAME ? "true" : "false")
373
374 P("Client info:");
375 PNT("id: %u", ics->client_state.id);
376 PNT("application_name: '%s'", client_desc->info.application_name);
377 PNT("pid: %i", client_desc->pid);
378 PNT("extensions:");
379
380 EXT(ext_hand_tracking_enabled);
381 EXT(ext_hand_tracking_data_source_enabled);
382 EXT(ext_eye_gaze_interaction_enabled);
383 EXT(ext_future_enabled);
384 EXT(ext_hand_interaction_enabled);
385 EXT(htc_facial_tracking_enabled);
386 EXT(fb_body_tracking_enabled);
387 EXT(meta_body_tracking_full_body_enabled);
388 EXT(meta_body_tracking_calibration_enabled);
389 EXT(fb_face_tracking2_enabled);
390
391#undef EXT
392#undef PTT
393#undef PT
394#undef P
395
396 // Log the pretty message.
397 IPC_INFO(ics->server, "%s", sink.buffer);
398
399 return XRT_SUCCESS;
400}
401
402xrt_result_t
403ipc_handle_instance_is_system_available(volatile struct ipc_client_state *ics, bool *out_available)
404{
405 IPC_TRACE_MARKER();
406
407 xrt_result_t xret = XRT_SUCCESS;
408
409 struct ipc_server *s = ics->server;
410
411 os_mutex_lock(&s->global_state.lock);
412
413 xret = ipc_server_init_system_if_available_locked(s, ics, out_available);
414 IPC_CHK_WITH_GOTO(s, xret, "ipc_server_init_system_if_available_locked", cleanup);
415
416cleanup:
417 os_mutex_unlock(&s->global_state.lock);
418 return xret;
419}
420
421xrt_result_t
422ipc_handle_system_compositor_get_info(volatile struct ipc_client_state *ics,
423 struct xrt_system_compositor_info *out_info)
424{
425 IPC_TRACE_MARKER();
426
427 *out_info = ics->server->xsysc->info;
428
429 return XRT_SUCCESS;
430}
431
432xrt_result_t
433ipc_handle_session_create(volatile struct ipc_client_state *ics,
434 const struct xrt_session_info *xsi,
435 bool create_native_compositor)
436{
437 IPC_TRACE_MARKER();
438
439 struct xrt_session *xs = NULL;
440 struct xrt_compositor_native *xcn = NULL;
441
442 if (ics->xs != NULL) {
443 return XRT_ERROR_IPC_SESSION_ALREADY_CREATED;
444 }
445
446 if (!create_native_compositor) {
447 IPC_INFO(ics->server, "App asked for headless session, creating native compositor anyways");
448 }
449
450 xrt_result_t xret = xrt_system_create_session(ics->server->xsys, xsi, &xs, &xcn);
451 if (xret != XRT_SUCCESS) {
452 return xret;
453 }
454
455 ics->client_state.session_overlay = xsi->is_overlay;
456 ics->client_state.z_order = xsi->z_order;
457
458 ics->xs = xs;
459 ics->xc = &xcn->base;
460
461 xrt_syscomp_set_state(ics->server->xsysc, ics->xc, ics->client_state.session_visible,
462 ics->client_state.session_focused);
463 xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, ics->client_state.z_order);
464
465 return XRT_SUCCESS;
466}
467
468xrt_result_t
469ipc_handle_session_poll_events(volatile struct ipc_client_state *ics, union xrt_session_event *out_xse)
470{
471 // Have we created the session?
472 if (ics->xs == NULL) {
473 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
474 }
475
476 return xrt_session_poll_events(ics->xs, out_xse);
477}
478
479xrt_result_t
480ipc_handle_session_begin(volatile struct ipc_client_state *ics)
481{
482 IPC_TRACE_MARKER();
483
484 // Have we created the session?
485 if (ics->xs == NULL) {
486 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
487 }
488
489 // Need to check both because begin session is handled by compositor.
490 if (ics->xc == NULL) {
491 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
492 }
493
494 //! @todo Pass the view type down.
495 const struct xrt_begin_session_info begin_session_info = {
496 .view_type = XRT_VIEW_TYPE_STEREO,
497 .ext_hand_tracking_enabled = ics->client_state.info.ext_hand_tracking_enabled,
498 .ext_hand_tracking_data_source_enabled = ics->client_state.info.ext_hand_tracking_data_source_enabled,
499 .ext_eye_gaze_interaction_enabled = ics->client_state.info.ext_eye_gaze_interaction_enabled,
500 .ext_future_enabled = ics->client_state.info.ext_future_enabled,
501 .ext_hand_interaction_enabled = ics->client_state.info.ext_hand_interaction_enabled,
502 .htc_facial_tracking_enabled = ics->client_state.info.htc_facial_tracking_enabled,
503 .fb_body_tracking_enabled = ics->client_state.info.fb_body_tracking_enabled,
504 .fb_face_tracking2_enabled = ics->client_state.info.fb_face_tracking2_enabled,
505 .meta_body_tracking_full_body_enabled = ics->client_state.info.meta_body_tracking_full_body_enabled,
506 .meta_body_tracking_calibration_enabled = ics->client_state.info.meta_body_tracking_calibration_enabled,
507 };
508
509 return xrt_comp_begin_session(ics->xc, &begin_session_info);
510}
511
512xrt_result_t
513ipc_handle_session_end(volatile struct ipc_client_state *ics)
514{
515 IPC_TRACE_MARKER();
516
517 // Have we created the session?
518 if (ics->xs == NULL) {
519 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
520 }
521
522 // Need to check both because end session is handled by compositor.
523 if (ics->xc == NULL) {
524 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
525 }
526
527 return xrt_comp_end_session(ics->xc);
528}
529
530xrt_result_t
531ipc_handle_session_destroy(volatile struct ipc_client_state *ics)
532{
533 IPC_TRACE_MARKER();
534
535 // Have we created the session?
536 if (ics->xs == NULL) {
537 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
538 }
539
540 ipc_server_client_destroy_session_and_compositor(ics);
541
542 return XRT_SUCCESS;
543}
544
545xrt_result_t
546ipc_handle_space_create_semantic_ids(volatile struct ipc_client_state *ics,
547 uint32_t *out_root_id,
548 uint32_t *out_view_id,
549 uint32_t *out_local_id,
550 uint32_t *out_local_floor_id,
551 uint32_t *out_stage_id,
552 uint32_t *out_unbounded_id)
553{
554 IPC_TRACE_MARKER();
555
556 struct xrt_space_overseer *xso = ics->server->xso;
557
558#define CREATE(NAME) \
559 do { \
560 *out_##NAME##_id = UINT32_MAX; \
561 if (xso->semantic.NAME == NULL) { \
562 break; \
563 } \
564 uint32_t id = 0; \
565 xrt_result_t xret = track_space(ics, xso->semantic.NAME, &id); \
566 if (xret != XRT_SUCCESS) { \
567 break; \
568 } \
569 *out_##NAME##_id = id; \
570 } while (false)
571
572 CREATE(root);
573 CREATE(view);
574 CREATE(stage);
575 CREATE(unbounded);
576
577#undef CREATE
578 return create_localspace(ics, out_local_id, out_local_floor_id);
579}
580
581xrt_result_t
582ipc_handle_space_create_offset(volatile struct ipc_client_state *ics,
583 uint32_t parent_id,
584 const struct xrt_pose *offset,
585 uint32_t *out_space_id)
586{
587 IPC_TRACE_MARKER();
588
589 struct xrt_space_overseer *xso = ics->server->xso;
590
591 struct xrt_space *parent = NULL;
592 xrt_result_t xret = validate_space_id(ics, parent_id, &parent);
593 if (xret != XRT_SUCCESS) {
594 return xret;
595 }
596
597
598 struct xrt_space *xs = NULL;
599 xret = xrt_space_overseer_create_offset_space(xso, parent, offset, &xs);
600 if (xret != XRT_SUCCESS) {
601 return xret;
602 }
603
604 uint32_t space_id = UINT32_MAX;
605 xret = track_space(ics, xs, &space_id);
606
607 // Track space grabs a reference, or it errors and we don't want to keep it around.
608 xrt_space_reference(&xs, NULL);
609
610 if (xret != XRT_SUCCESS) {
611 return xret;
612 }
613
614 *out_space_id = space_id;
615
616 return XRT_SUCCESS;
617}
618
619xrt_result_t
620ipc_handle_space_create_pose(volatile struct ipc_client_state *ics,
621 uint32_t xdev_id,
622 enum xrt_input_name name,
623 uint32_t *out_space_id)
624{
625 IPC_TRACE_MARKER();
626
627 struct xrt_space_overseer *xso = ics->server->xso;
628
629 struct xrt_device *xdev = NULL;
630 GET_XDEV_OR_RETURN(ics, xdev_id, xdev);
631
632 struct xrt_space *xs = NULL;
633 xrt_result_t xret = xrt_space_overseer_create_pose_space(xso, xdev, name, &xs);
634 if (xret != XRT_SUCCESS) {
635 return xret;
636 }
637
638 uint32_t space_id = UINT32_MAX;
639 xret = track_space(ics, xs, &space_id);
640
641 // Track space grabs a reference, or it errors and we don't want to keep it around.
642 xrt_space_reference(&xs, NULL);
643
644 if (xret != XRT_SUCCESS) {
645 return xret;
646 }
647
648 *out_space_id = space_id;
649
650 return xret;
651}
652
653xrt_result_t
654ipc_handle_space_locate_space(volatile struct ipc_client_state *ics,
655 uint32_t base_space_id,
656 const struct xrt_pose *base_offset,
657 int64_t at_timestamp,
658 uint32_t space_id,
659 const struct xrt_pose *offset,
660 struct xrt_space_relation *out_relation)
661{
662 IPC_TRACE_MARKER();
663
664 struct xrt_space_overseer *xso = ics->server->xso;
665 struct xrt_space *base_space = NULL;
666 struct xrt_space *space = NULL;
667 xrt_result_t xret;
668
669 xret = validate_space_id(ics, base_space_id, &base_space);
670 if (xret != XRT_SUCCESS) {
671 U_LOG_E("Invalid base_space_id!");
672 return xret;
673 }
674
675 xret = validate_space_id(ics, space_id, &space);
676 if (xret != XRT_SUCCESS) {
677 U_LOG_E("Invalid space_id!");
678 return xret;
679 }
680
681 return xrt_space_overseer_locate_space( //
682 xso, //
683 base_space, //
684 base_offset, //
685 at_timestamp, //
686 space, //
687 offset, //
688 out_relation); //
689}
690
691xrt_result_t
692ipc_handle_space_locate_spaces(volatile struct ipc_client_state *ics,
693 uint32_t base_space_id,
694 const struct xrt_pose *base_offset,
695 uint32_t space_count,
696 int64_t at_timestamp)
697{
698 IPC_TRACE_MARKER();
699 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
700 struct ipc_server *s = ics->server;
701
702 struct xrt_space_overseer *xso = ics->server->xso;
703 struct xrt_space *base_space = NULL;
704
705 struct xrt_space **xspaces = U_TYPED_ARRAY_CALLOC(struct xrt_space *, space_count);
706 struct xrt_pose *offsets = U_TYPED_ARRAY_CALLOC(struct xrt_pose, space_count);
707 struct xrt_space_relation *out_relations = U_TYPED_ARRAY_CALLOC(struct xrt_space_relation, space_count);
708
709 xrt_result_t xret;
710
711 os_mutex_lock(&ics->server->global_state.lock);
712
713 uint32_t *space_ids = U_TYPED_ARRAY_CALLOC(uint32_t, space_count);
714
715 // we need to send back whether allocation succeeded so the client knows whether to send more data
716 if (space_ids == NULL) {
717 xret = XRT_ERROR_ALLOCATION;
718 } else {
719 xret = XRT_SUCCESS;
720 }
721
722 xret = ipc_send(imc, &xret, sizeof(enum xrt_result));
723 if (xret != XRT_SUCCESS) {
724 IPC_ERROR(ics->server, "Failed to send spaces allocate result");
725 // Nothing else we can do
726 goto out_locate_spaces;
727 }
728
729 // only after sending the allocation result can we skip to the end in the allocation error case
730 if (space_ids == NULL) {
731 IPC_ERROR(s, "Failed to allocate space for receiving spaces ids");
732 xret = XRT_ERROR_ALLOCATION;
733 goto out_locate_spaces;
734 }
735
736 xret = ipc_receive(imc, space_ids, space_count * sizeof(uint32_t));
737 if (xret != XRT_SUCCESS) {
738 IPC_ERROR(ics->server, "Failed to receive spaces ids");
739 // assume early abort is possible, i.e. client will not send more data for this request
740 goto out_locate_spaces;
741 }
742
743 xret = ipc_receive(imc, offsets, space_count * sizeof(struct xrt_pose));
744 if (xret != XRT_SUCCESS) {
745 IPC_ERROR(ics->server, "Failed to receive spaces offsets");
746 // assume early abort is possible, i.e. client will not send more data for this request
747 goto out_locate_spaces;
748 }
749
750 xret = validate_space_id(ics, base_space_id, &base_space);
751 if (xret != XRT_SUCCESS) {
752 U_LOG_E("Invalid base_space_id %d!", base_space_id);
753 // Client is receiving out_relations now, it will get xret on this receive.
754 goto out_locate_spaces;
755 }
756
757 for (uint32_t i = 0; i < space_count; i++) {
758 if (space_ids[i] == UINT32_MAX) {
759 xspaces[i] = NULL;
760 } else {
761 xret = validate_space_id(ics, space_ids[i], &xspaces[i]);
762 if (xret != XRT_SUCCESS) {
763 U_LOG_E("Invalid space_id space_ids[%d] = %d!", i, space_ids[i]);
764 // Client is receiving out_relations now, it will get xret on this receive.
765 goto out_locate_spaces;
766 }
767 }
768 }
769 xret = xrt_space_overseer_locate_spaces( //
770 xso, //
771 base_space, //
772 base_offset, //
773 at_timestamp, //
774 xspaces, //
775 space_count, //
776 offsets, //
777 out_relations); //
778
779 xret = ipc_send(imc, out_relations, sizeof(struct xrt_space_relation) * space_count);
780 if (xret != XRT_SUCCESS) {
781 IPC_ERROR(ics->server, "Failed to send spaces relations");
782 // Nothing else we can do
783 goto out_locate_spaces;
784 }
785
786out_locate_spaces:
787 free(xspaces);
788 free(offsets);
789 free(out_relations);
790 os_mutex_unlock(&ics->server->global_state.lock);
791 return xret;
792}
793
794xrt_result_t
795ipc_handle_space_locate_device(volatile struct ipc_client_state *ics,
796 uint32_t base_space_id,
797 const struct xrt_pose *base_offset,
798 int64_t at_timestamp,
799 uint32_t xdev_id,
800 struct xrt_space_relation *out_relation)
801{
802 IPC_TRACE_MARKER();
803
804 struct xrt_space_overseer *xso = ics->server->xso;
805 struct xrt_space *base_space = NULL;
806 struct xrt_device *xdev = NULL;
807 xrt_result_t xret;
808
809 xret = validate_space_id(ics, base_space_id, &base_space);
810 if (xret != XRT_SUCCESS) {
811 U_LOG_E("Invalid base_space_id!");
812 return xret;
813 }
814
815 xret = validate_device_id(ics, xdev_id, &xdev);
816 if (xret != XRT_SUCCESS) {
817 U_LOG_E("Invalid device_id!");
818 return xret;
819 }
820
821 return xrt_space_overseer_locate_device( //
822 xso, //
823 base_space, //
824 base_offset, //
825 at_timestamp, //
826 xdev, //
827 out_relation); //
828}
829
830xrt_result_t
831ipc_handle_space_destroy(volatile struct ipc_client_state *ics, uint32_t space_id)
832{
833 struct xrt_space *xs = NULL;
834 xrt_result_t xret;
835
836 xret = validate_space_id(ics, space_id, &xs);
837 if (xret != XRT_SUCCESS) {
838 U_LOG_E("Invalid space_id!");
839 return xret;
840 }
841
842 assert(xs != NULL);
843 xs = NULL;
844
845 // Remove volatile
846 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[space_id];
847 xrt_space_reference(xs_ptr, NULL);
848
849 if (space_id == ics->local_space_index) {
850 struct xrt_space **xslocal_ptr =
851 (struct xrt_space **)&ics->server->xso->localspace[ics->local_space_overseer_index];
852 xrt_space_reference(xslocal_ptr, NULL);
853 }
854
855 if (space_id == ics->local_floor_space_index) {
856 struct xrt_space **xslocalfloor_ptr =
857 (struct xrt_space **)&ics->server->xso->localfloorspace[ics->local_floor_space_overseer_index];
858 xrt_space_reference(xslocalfloor_ptr, NULL);
859 }
860
861 return XRT_SUCCESS;
862}
863
864xrt_result_t
865ipc_handle_space_mark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
866{
867 struct xrt_space_overseer *xso = ics->server->xso;
868 xrt_result_t xret;
869
870 xret = validate_reference_space_type(ics, type);
871 if (xret != XRT_SUCCESS) {
872 return XRT_ERROR_IPC_FAILURE;
873 }
874
875 // Is this space already used?
876 if (ics->ref_space_used[type]) {
877 IPC_ERROR(ics->server, "Space '%u' already used!", type);
878 return XRT_ERROR_IPC_FAILURE;
879 }
880
881 xret = xrt_space_overseer_ref_space_inc(xso, type);
882 if (xret != XRT_SUCCESS) {
883 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_inc failed");
884 return xret;
885 }
886
887 // Can now mark it as used.
888 ics->ref_space_used[type] = true;
889
890 return XRT_SUCCESS;
891}
892
893xrt_result_t
894ipc_handle_space_unmark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
895{
896 struct xrt_space_overseer *xso = ics->server->xso;
897 xrt_result_t xret;
898
899 xret = validate_reference_space_type(ics, type);
900 if (xret != XRT_SUCCESS) {
901 return XRT_ERROR_IPC_FAILURE;
902 }
903
904 if (!ics->ref_space_used[type]) {
905 IPC_ERROR(ics->server, "Space '%u' not used!", type);
906 return XRT_ERROR_IPC_FAILURE;
907 }
908
909 xret = xrt_space_overseer_ref_space_dec(xso, type);
910 if (xret != XRT_SUCCESS) {
911 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_dec failed");
912 return xret;
913 }
914
915 // Now we can mark it as not used.
916 ics->ref_space_used[type] = false;
917
918 return XRT_SUCCESS;
919}
920
921xrt_result_t
922ipc_handle_space_recenter_local_spaces(volatile struct ipc_client_state *ics)
923{
924 struct xrt_space_overseer *xso = ics->server->xso;
925
926 return xrt_space_overseer_recenter_local_spaces(xso);
927}
928
929xrt_result_t
930ipc_handle_space_get_tracking_origin_offset(volatile struct ipc_client_state *ics,
931 uint32_t origin_id,
932 struct xrt_pose *out_offset)
933{
934 struct xrt_space_overseer *xso = ics->server->xso;
935 struct xrt_tracking_origin *xto;
936 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
937 if (xret != XRT_SUCCESS) {
938 return xret;
939 }
940 return xrt_space_overseer_get_tracking_origin_offset(xso, xto, out_offset);
941}
942
943xrt_result_t
944ipc_handle_space_set_tracking_origin_offset(volatile struct ipc_client_state *ics,
945 uint32_t origin_id,
946 const struct xrt_pose *offset)
947{
948 struct xrt_space_overseer *xso = ics->server->xso;
949 struct xrt_tracking_origin *xto;
950 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
951 if (xret != XRT_SUCCESS) {
952 return xret;
953 }
954 return xrt_space_overseer_set_tracking_origin_offset(xso, xto, offset);
955}
956
957xrt_result_t
958ipc_handle_space_get_reference_space_offset(volatile struct ipc_client_state *ics,
959 enum xrt_reference_space_type type,
960 struct xrt_pose *out_offset)
961{
962 struct xrt_space_overseer *xso = ics->server->xso;
963 return xrt_space_overseer_get_reference_space_offset(xso, type, out_offset);
964}
965
966xrt_result_t
967ipc_handle_space_set_reference_space_offset(volatile struct ipc_client_state *ics,
968 enum xrt_reference_space_type type,
969 const struct xrt_pose *offset)
970{
971 struct xrt_space_overseer *xso = ics->server->xso;
972 return xrt_space_overseer_set_reference_space_offset(xso, type, offset);
973}
974
975xrt_result_t
976ipc_handle_compositor_get_info(volatile struct ipc_client_state *ics, struct xrt_compositor_info *out_info)
977{
978 IPC_TRACE_MARKER();
979
980 if (ics->xc == NULL) {
981 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
982 }
983
984 *out_info = ics->xc->info;
985
986 return XRT_SUCCESS;
987}
988
989xrt_result_t
990ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics,
991 int64_t *out_frame_id,
992 int64_t *out_wake_up_time_ns,
993 int64_t *out_predicted_display_time_ns,
994 int64_t *out_predicted_display_period_ns)
995{
996 IPC_TRACE_MARKER();
997
998 if (ics->xc == NULL) {
999 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1000 }
1001
1002 /*
1003 * We use this to signal that the session has started, this is needed
1004 * to make this client/session active/visible/focused.
1005 */
1006 ipc_server_activate_session(ics);
1007
1008 int64_t gpu_time_ns = 0;
1009 return xrt_comp_predict_frame( //
1010 ics->xc, //
1011 out_frame_id, //
1012 out_wake_up_time_ns, //
1013 &gpu_time_ns, //
1014 out_predicted_display_time_ns, //
1015 out_predicted_display_period_ns); //
1016}
1017
1018xrt_result_t
1019ipc_handle_compositor_wait_woke(volatile struct ipc_client_state *ics, int64_t frame_id)
1020{
1021 IPC_TRACE_MARKER();
1022
1023 if (ics->xc == NULL) {
1024 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1025 }
1026
1027 return xrt_comp_mark_frame(ics->xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, os_monotonic_get_ns());
1028}
1029
1030xrt_result_t
1031ipc_handle_compositor_begin_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1032{
1033 IPC_TRACE_MARKER();
1034
1035 if (ics->xc == NULL) {
1036 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1037 }
1038
1039 return xrt_comp_begin_frame(ics->xc, frame_id);
1040}
1041
1042xrt_result_t
1043ipc_handle_compositor_discard_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1044{
1045 IPC_TRACE_MARKER();
1046
1047 if (ics->xc == NULL) {
1048 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1049 }
1050
1051 return xrt_comp_discard_frame(ics->xc, frame_id);
1052}
1053
1054xrt_result_t
1055ipc_handle_compositor_get_display_refresh_rate(volatile struct ipc_client_state *ics,
1056 float *out_display_refresh_rate_hz)
1057{
1058 IPC_TRACE_MARKER();
1059
1060 if (ics->xc == NULL) {
1061 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1062 }
1063
1064 return xrt_comp_get_display_refresh_rate(ics->xc, out_display_refresh_rate_hz);
1065}
1066
1067xrt_result_t
1068ipc_handle_compositor_request_display_refresh_rate(volatile struct ipc_client_state *ics, float display_refresh_rate_hz)
1069{
1070 IPC_TRACE_MARKER();
1071
1072 if (ics->xc == NULL) {
1073 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1074 }
1075
1076 return xrt_comp_request_display_refresh_rate(ics->xc, display_refresh_rate_hz);
1077}
1078
1079xrt_result_t
1080ipc_handle_compositor_set_performance_level(volatile struct ipc_client_state *ics,
1081 enum xrt_perf_domain domain,
1082 enum xrt_perf_set_level level)
1083{
1084 IPC_TRACE_MARKER();
1085
1086 if (ics->xc == NULL) {
1087 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
1088 }
1089
1090 if (ics->xc->set_performance_level == NULL) {
1091 return XRT_ERROR_IPC_FAILURE;
1092 }
1093
1094 return xrt_comp_set_performance_level(ics->xc, domain, level);
1095}
1096
1097static bool
1098_update_projection_layer(struct xrt_compositor *xc,
1099 volatile struct ipc_client_state *ics,
1100 volatile struct ipc_layer_entry *layer,
1101 uint32_t i)
1102{
1103 // xdev
1104 uint32_t device_id = layer->xdev_id;
1105 struct xrt_device *xdev = NULL;
1106 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1107
1108 if (xdev == NULL) {
1109 U_LOG_E("Invalid xdev for projection layer!");
1110 return false;
1111 }
1112
1113 uint32_t view_count = xdev->hmd->view_count;
1114
1115 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1116 for (uint32_t k = 0; k < view_count; k++) {
1117 const uint32_t xsci = layer->swapchain_ids[k];
1118 xcs[k] = ics->xscs[xsci];
1119 if (xcs[k] == NULL) {
1120 U_LOG_E("Invalid swap chain for projection layer!");
1121 return false;
1122 }
1123 }
1124
1125
1126 // Cast away volatile.
1127 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1128
1129 xrt_comp_layer_projection(xc, xdev, xcs, data);
1130
1131 return true;
1132}
1133
1134static bool
1135_update_projection_layer_depth(struct xrt_compositor *xc,
1136 volatile struct ipc_client_state *ics,
1137 volatile struct ipc_layer_entry *layer,
1138 uint32_t i)
1139{
1140 // xdev
1141 uint32_t xdevi = layer->xdev_id;
1142
1143 // Cast away volatile.
1144 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1145
1146 struct xrt_device *xdev = NULL;
1147 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1148 if (xdev == NULL) {
1149 U_LOG_E("Invalid xdev for projection layer #%u!", i);
1150 return false;
1151 }
1152
1153 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1154 struct xrt_swapchain *d_xcs[XRT_MAX_VIEWS];
1155
1156 for (uint32_t j = 0; j < data->view_count; j++) {
1157 int xsci = layer->swapchain_ids[j];
1158 int d_xsci = layer->swapchain_ids[j + data->view_count];
1159
1160 xcs[j] = ics->xscs[xsci];
1161 d_xcs[j] = ics->xscs[d_xsci];
1162 if (xcs[j] == NULL || d_xcs[j] == NULL) {
1163 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
1164 return false;
1165 }
1166 }
1167
1168 xrt_comp_layer_projection_depth(xc, xdev, xcs, d_xcs, data);
1169
1170 return true;
1171}
1172
1173static bool
1174do_single(struct xrt_compositor *xc,
1175 volatile struct ipc_client_state *ics,
1176 volatile struct ipc_layer_entry *layer,
1177 uint32_t i,
1178 const char *name,
1179 struct xrt_device **out_xdev,
1180 struct xrt_swapchain **out_xcs,
1181 struct xrt_layer_data **out_data)
1182{
1183 uint32_t device_id = layer->xdev_id;
1184 uint32_t sci = layer->swapchain_ids[0];
1185
1186 struct xrt_device *xdev = NULL;
1187 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1188 struct xrt_swapchain *xcs = ics->xscs[sci];
1189
1190 if (xcs == NULL) {
1191 U_LOG_E("Invalid swapchain for layer #%u, '%s'!", i, name);
1192 return false;
1193 }
1194
1195 if (xdev == NULL) {
1196 U_LOG_E("Invalid xdev for layer #%u, '%s'!", i, name);
1197 return false;
1198 }
1199
1200 // Cast away volatile.
1201 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1202
1203 *out_xdev = xdev;
1204 *out_xcs = xcs;
1205 *out_data = data;
1206
1207 return true;
1208}
1209
1210static bool
1211_update_quad_layer(struct xrt_compositor *xc,
1212 volatile struct ipc_client_state *ics,
1213 volatile struct ipc_layer_entry *layer,
1214 uint32_t i)
1215{
1216 struct xrt_device *xdev;
1217 struct xrt_swapchain *xcs;
1218 struct xrt_layer_data *data;
1219
1220 if (!do_single(xc, ics, layer, i, "quad", &xdev, &xcs, &data)) {
1221 return false;
1222 }
1223
1224 xrt_comp_layer_quad(xc, xdev, xcs, data);
1225
1226 return true;
1227}
1228
1229static bool
1230_update_cube_layer(struct xrt_compositor *xc,
1231 volatile struct ipc_client_state *ics,
1232 volatile struct ipc_layer_entry *layer,
1233 uint32_t i)
1234{
1235 struct xrt_device *xdev;
1236 struct xrt_swapchain *xcs;
1237 struct xrt_layer_data *data;
1238
1239 if (!do_single(xc, ics, layer, i, "cube", &xdev, &xcs, &data)) {
1240 return false;
1241 }
1242
1243 xrt_comp_layer_cube(xc, xdev, xcs, data);
1244
1245 return true;
1246}
1247
1248static bool
1249_update_cylinder_layer(struct xrt_compositor *xc,
1250 volatile struct ipc_client_state *ics,
1251 volatile struct ipc_layer_entry *layer,
1252 uint32_t i)
1253{
1254 struct xrt_device *xdev;
1255 struct xrt_swapchain *xcs;
1256 struct xrt_layer_data *data;
1257
1258 if (!do_single(xc, ics, layer, i, "cylinder", &xdev, &xcs, &data)) {
1259 return false;
1260 }
1261
1262 xrt_comp_layer_cylinder(xc, xdev, xcs, data);
1263
1264 return true;
1265}
1266
1267static bool
1268_update_equirect1_layer(struct xrt_compositor *xc,
1269 volatile struct ipc_client_state *ics,
1270 volatile struct ipc_layer_entry *layer,
1271 uint32_t i)
1272{
1273 struct xrt_device *xdev;
1274 struct xrt_swapchain *xcs;
1275 struct xrt_layer_data *data;
1276
1277 if (!do_single(xc, ics, layer, i, "equirect1", &xdev, &xcs, &data)) {
1278 return false;
1279 }
1280
1281 xrt_comp_layer_equirect1(xc, xdev, xcs, data);
1282
1283 return true;
1284}
1285
1286static bool
1287_update_equirect2_layer(struct xrt_compositor *xc,
1288 volatile struct ipc_client_state *ics,
1289 volatile struct ipc_layer_entry *layer,
1290 uint32_t i)
1291{
1292 struct xrt_device *xdev;
1293 struct xrt_swapchain *xcs;
1294 struct xrt_layer_data *data;
1295
1296 if (!do_single(xc, ics, layer, i, "equirect2", &xdev, &xcs, &data)) {
1297 return false;
1298 }
1299
1300 xrt_comp_layer_equirect2(xc, xdev, xcs, data);
1301
1302 return true;
1303}
1304
1305static bool
1306_update_passthrough_layer(struct xrt_compositor *xc,
1307 volatile struct ipc_client_state *ics,
1308 volatile struct ipc_layer_entry *layer,
1309 uint32_t i)
1310{
1311 // xdev
1312 uint32_t xdevi = layer->xdev_id;
1313
1314 struct xrt_device *xdev = NULL;
1315 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1316
1317 if (xdev == NULL) {
1318 U_LOG_E("Invalid xdev for passthrough layer #%u!", i);
1319 return false;
1320 }
1321
1322 // Cast away volatile.
1323 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1324
1325 xrt_comp_layer_passthrough(xc, xdev, data);
1326
1327 return true;
1328}
1329
1330static bool
1331_update_layers(volatile struct ipc_client_state *ics, struct xrt_compositor *xc, struct ipc_layer_slot *slot)
1332{
1333 IPC_TRACE_MARKER();
1334
1335 for (uint32_t i = 0; i < slot->layer_count; i++) {
1336 volatile struct ipc_layer_entry *layer = &slot->layers[i];
1337
1338 switch (layer->data.type) {
1339 case XRT_LAYER_PROJECTION:
1340 if (!_update_projection_layer(xc, ics, layer, i)) {
1341 return false;
1342 }
1343 break;
1344 case XRT_LAYER_PROJECTION_DEPTH:
1345 if (!_update_projection_layer_depth(xc, ics, layer, i)) {
1346 return false;
1347 }
1348 break;
1349 case XRT_LAYER_QUAD:
1350 if (!_update_quad_layer(xc, ics, layer, i)) {
1351 return false;
1352 }
1353 break;
1354 case XRT_LAYER_CUBE:
1355 if (!_update_cube_layer(xc, ics, layer, i)) {
1356 return false;
1357 }
1358 break;
1359 case XRT_LAYER_CYLINDER:
1360 if (!_update_cylinder_layer(xc, ics, layer, i)) {
1361 return false;
1362 }
1363 break;
1364 case XRT_LAYER_EQUIRECT1:
1365 if (!_update_equirect1_layer(xc, ics, layer, i)) {
1366 return false;
1367 }
1368 break;
1369 case XRT_LAYER_EQUIRECT2:
1370 if (!_update_equirect2_layer(xc, ics, layer, i)) {
1371 return false;
1372 }
1373 break;
1374 case XRT_LAYER_PASSTHROUGH:
1375 if (!_update_passthrough_layer(xc, ics, layer, i)) {
1376 return false;
1377 }
1378 break;
1379 default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
1380 }
1381 }
1382
1383 return true;
1384}
1385
1386xrt_result_t
1387ipc_handle_compositor_layer_sync(volatile struct ipc_client_state *ics,
1388 uint32_t slot_id,
1389 uint32_t *out_free_slot_id,
1390 const xrt_graphics_sync_handle_t *handles,
1391 const uint32_t handle_count)
1392{
1393 IPC_TRACE_MARKER();
1394
1395 if (ics->xc == NULL) {
1396 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1397 }
1398
1399 struct ipc_shared_memory *ism = get_ism(ics);
1400 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1401 xrt_graphics_sync_handle_t sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1402
1403 // If we have one or more save the first handle.
1404 if (handle_count >= 1) {
1405 sync_handle = handles[0];
1406 }
1407
1408 // Free all sync handles after the first one.
1409 for (uint32_t i = 1; i < handle_count; i++) {
1410 // Checks for valid handle.
1411 xrt_graphics_sync_handle_t tmp = handles[i];
1412 u_graphics_sync_unref(&tmp);
1413 }
1414
1415 // Copy current slot data.
1416 struct ipc_layer_slot copy = *slot;
1417
1418
1419 /*
1420 * Transfer data to underlying compositor.
1421 */
1422
1423 xrt_comp_layer_begin(ics->xc, ©.data);
1424
1425 _update_layers(ics, ics->xc, ©);
1426
1427 xrt_comp_layer_commit(ics->xc, sync_handle);
1428
1429
1430 /*
1431 * Manage shared state.
1432 */
1433
1434 os_mutex_lock(&ics->server->global_state.lock);
1435
1436 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1437 ics->server->current_slot_index = *out_free_slot_id;
1438
1439 os_mutex_unlock(&ics->server->global_state.lock);
1440
1441 return XRT_SUCCESS;
1442}
1443
1444xrt_result_t
1445ipc_handle_compositor_layer_sync_with_semaphore(volatile struct ipc_client_state *ics,
1446 uint32_t slot_id,
1447 uint32_t semaphore_id,
1448 uint64_t semaphore_value,
1449 uint32_t *out_free_slot_id)
1450{
1451 IPC_TRACE_MARKER();
1452
1453 if (ics->xc == NULL) {
1454 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1455 }
1456 if (semaphore_id >= IPC_MAX_CLIENT_SEMAPHORES) {
1457 IPC_ERROR(ics->server, "Invalid semaphore_id");
1458 return XRT_ERROR_IPC_FAILURE;
1459 }
1460 if (ics->xcsems[semaphore_id] == NULL) {
1461 IPC_ERROR(ics->server, "Semaphore of id %u not created!", semaphore_id);
1462 return XRT_ERROR_IPC_FAILURE;
1463 }
1464
1465 struct xrt_compositor_semaphore *xcsem = ics->xcsems[semaphore_id];
1466
1467 struct ipc_shared_memory *ism = get_ism(ics);
1468 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1469
1470 // Copy current slot data.
1471 struct ipc_layer_slot copy = *slot;
1472
1473
1474
1475 /*
1476 * Transfer data to underlying compositor.
1477 */
1478
1479 xrt_comp_layer_begin(ics->xc, ©.data);
1480
1481 _update_layers(ics, ics->xc, ©);
1482
1483 xrt_comp_layer_commit_with_semaphore(ics->xc, xcsem, semaphore_value);
1484
1485
1486 /*
1487 * Manage shared state.
1488 */
1489
1490 os_mutex_lock(&ics->server->global_state.lock);
1491
1492 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1493 ics->server->current_slot_index = *out_free_slot_id;
1494
1495 os_mutex_unlock(&ics->server->global_state.lock);
1496
1497 return XRT_SUCCESS;
1498}
1499
1500xrt_result_t
1501ipc_handle_compositor_create_passthrough(volatile struct ipc_client_state *ics,
1502 const struct xrt_passthrough_create_info *info)
1503{
1504 IPC_TRACE_MARKER();
1505
1506 if (ics->xc == NULL) {
1507 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1508 }
1509
1510 return xrt_comp_create_passthrough(ics->xc, info);
1511}
1512
1513xrt_result_t
1514ipc_handle_compositor_create_passthrough_layer(volatile struct ipc_client_state *ics,
1515 const struct xrt_passthrough_layer_create_info *info)
1516{
1517 IPC_TRACE_MARKER();
1518
1519 if (ics->xc == NULL) {
1520 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1521 }
1522
1523 return xrt_comp_create_passthrough_layer(ics->xc, info);
1524}
1525
1526xrt_result_t
1527ipc_handle_compositor_destroy_passthrough(volatile struct ipc_client_state *ics)
1528{
1529 IPC_TRACE_MARKER();
1530
1531 if (ics->xc == NULL) {
1532 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1533 }
1534
1535 xrt_comp_destroy_passthrough(ics->xc);
1536
1537 return XRT_SUCCESS;
1538}
1539
1540xrt_result_t
1541ipc_handle_compositor_set_thread_hint(volatile struct ipc_client_state *ics,
1542 enum xrt_thread_hint hint,
1543 uint32_t thread_id)
1544
1545{
1546 IPC_TRACE_MARKER();
1547
1548 if (ics->xc == NULL) {
1549 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1550 }
1551
1552 return xrt_comp_set_thread_hint(ics->xc, hint, thread_id);
1553}
1554
1555xrt_result_t
1556ipc_handle_compositor_get_reference_bounds_rect(volatile struct ipc_client_state *ics,
1557 enum xrt_reference_space_type reference_space_type,
1558 struct xrt_vec2 *bounds)
1559{
1560 IPC_TRACE_MARKER();
1561
1562 if (ics->xc == NULL) {
1563 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1564 }
1565
1566 return xrt_comp_get_reference_bounds_rect(ics->xc, reference_space_type, bounds);
1567}
1568
1569xrt_result_t
1570ipc_handle_system_get_clients(volatile struct ipc_client_state *_ics, struct ipc_client_list *list)
1571{
1572 struct ipc_server *s = _ics->server;
1573
1574 // Look client list.
1575 os_mutex_lock(&s->global_state.lock);
1576
1577 uint32_t count = 0;
1578 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
1579
1580 volatile struct ipc_client_state *ics = &s->threads[i].ics;
1581
1582 // Is this thread running?
1583 if (ics->server_thread_index < 0) {
1584 continue;
1585 }
1586
1587 list->ids[count++] = ics->client_state.id;
1588 }
1589
1590 list->id_count = count;
1591
1592 // Unlock now.
1593 os_mutex_unlock(&s->global_state.lock);
1594
1595 return XRT_SUCCESS;
1596}
1597
1598xrt_result_t
1599ipc_handle_system_get_properties(volatile struct ipc_client_state *_ics, struct xrt_system_properties *out_properties)
1600{
1601 struct ipc_server *s = _ics->server;
1602
1603 return ipc_server_get_system_properties(s, out_properties);
1604}
1605
1606xrt_result_t
1607ipc_handle_system_get_client_info(volatile struct ipc_client_state *_ics,
1608 uint32_t client_id,
1609 struct ipc_app_state *out_ias)
1610{
1611 struct ipc_server *s = _ics->server;
1612
1613 return ipc_server_get_client_app_state(s, client_id, out_ias);
1614}
1615
1616xrt_result_t
1617ipc_handle_system_set_primary_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1618{
1619 struct ipc_server *s = _ics->server;
1620
1621 IPC_INFO(s, "System setting active client to %d.", client_id);
1622
1623 return ipc_server_set_active_client(s, client_id);
1624}
1625
1626xrt_result_t
1627ipc_handle_system_set_focused_client(volatile struct ipc_client_state *ics, uint32_t client_id)
1628{
1629 IPC_INFO(ics->server, "UNIMPLEMENTED: system setting focused client to %d.", client_id);
1630
1631 return XRT_SUCCESS;
1632}
1633
1634xrt_result_t
1635ipc_handle_system_toggle_io_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1636{
1637 struct ipc_server *s = _ics->server;
1638
1639 IPC_INFO(s, "System toggling io for client %u.", client_id);
1640
1641 return ipc_server_toggle_io_client(s, client_id);
1642}
1643
1644xrt_result_t
1645ipc_handle_system_toggle_io_device(volatile struct ipc_client_state *ics, uint32_t device_id)
1646{
1647 if (device_id >= IPC_MAX_DEVICES) {
1648 return XRT_ERROR_IPC_FAILURE;
1649 }
1650
1651 struct ipc_device *idev = &ics->server->idevs[device_id];
1652
1653 idev->io_active = !idev->io_active;
1654
1655 return XRT_SUCCESS;
1656}
1657
1658xrt_result_t
1659ipc_handle_swapchain_get_properties(volatile struct ipc_client_state *ics,
1660 const struct xrt_swapchain_create_info *info,
1661 struct xrt_swapchain_create_properties *xsccp)
1662{
1663 IPC_TRACE_MARKER();
1664
1665 if (ics->xc == NULL) {
1666 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1667 }
1668
1669 return xrt_comp_get_swapchain_create_properties(ics->xc, info, xsccp);
1670}
1671
1672xrt_result_t
1673ipc_handle_swapchain_create(volatile struct ipc_client_state *ics,
1674 const struct xrt_swapchain_create_info *info,
1675 uint32_t *out_id,
1676 uint32_t *out_image_count,
1677 uint64_t *out_size,
1678 bool *out_use_dedicated_allocation,
1679 uint32_t max_handle_capacity,
1680 xrt_graphics_buffer_handle_t *out_handles,
1681 uint32_t *out_handle_count)
1682{
1683 IPC_TRACE_MARKER();
1684
1685 xrt_result_t xret = XRT_SUCCESS;
1686 uint32_t index = 0;
1687
1688 xret = validate_swapchain_state(ics, &index);
1689 if (xret != XRT_SUCCESS) {
1690 return xret;
1691 }
1692
1693 // Create the swapchain
1694 struct xrt_swapchain *xsc = NULL; // Has to be NULL.
1695 xret = xrt_comp_create_swapchain(ics->xc, info, &xsc);
1696 if (xret != XRT_SUCCESS) {
1697 if (xret == XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED) {
1698 IPC_WARN(ics->server,
1699 "xrt_comp_create_swapchain: Attempted to create valid, but unsupported swapchain");
1700 } else {
1701 IPC_ERROR(ics->server, "Error xrt_comp_create_swapchain failed!");
1702 }
1703 return xret;
1704 }
1705
1706 // It's now safe to increment the number of swapchains.
1707 ics->swapchain_count++;
1708
1709 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1710
1711 set_swapchain_info(ics, index, info, xsc);
1712
1713 // return our result to the caller.
1714 struct xrt_swapchain_native *xscn = (struct xrt_swapchain_native *)xsc;
1715
1716 // Limit checking
1717 assert(xsc->image_count <= XRT_MAX_SWAPCHAIN_IMAGES);
1718 assert(xsc->image_count <= max_handle_capacity);
1719
1720 for (size_t i = 1; i < xsc->image_count; i++) {
1721 assert(xscn->images[0].size == xscn->images[i].size);
1722 assert(xscn->images[0].use_dedicated_allocation == xscn->images[i].use_dedicated_allocation);
1723 }
1724
1725 // Assuming all images allocated in the same swapchain have the same allocation requirements.
1726 *out_size = xscn->images[0].size;
1727 *out_use_dedicated_allocation = xscn->images[0].use_dedicated_allocation;
1728 *out_id = index;
1729 *out_image_count = xsc->image_count;
1730
1731 // Setup the fds.
1732 *out_handle_count = xsc->image_count;
1733 for (size_t i = 0; i < xsc->image_count; i++) {
1734 out_handles[i] = xscn->images[i].handle;
1735 }
1736
1737 return XRT_SUCCESS;
1738}
1739
1740xrt_result_t
1741ipc_handle_swapchain_import(volatile struct ipc_client_state *ics,
1742 const struct xrt_swapchain_create_info *info,
1743 const struct ipc_arg_swapchain_from_native *args,
1744 uint32_t *out_id,
1745 const xrt_graphics_buffer_handle_t *handles,
1746 uint32_t handle_count)
1747{
1748 IPC_TRACE_MARKER();
1749
1750 xrt_result_t xret = XRT_SUCCESS;
1751 uint32_t index = 0;
1752
1753 xret = validate_swapchain_state(ics, &index);
1754 if (xret != XRT_SUCCESS) {
1755 return xret;
1756 }
1757
1758 struct xrt_image_native xins[XRT_MAX_SWAPCHAIN_IMAGES] = XRT_STRUCT_INIT;
1759 for (uint32_t i = 0; i < handle_count; i++) {
1760 xins[i].handle = handles[i];
1761 xins[i].size = args->sizes[i];
1762#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
1763 // DXGI handles need to be dealt with differently, they are identified
1764 // by having their lower bit set to 1 during transfer
1765 if ((size_t)xins[i].handle & 1) {
1766 xins[i].handle = (HANDLE)((size_t)xins[i].handle - 1);
1767 xins[i].is_dxgi_handle = true;
1768 }
1769#endif
1770 }
1771
1772 // create the swapchain
1773 struct xrt_swapchain *xsc = NULL;
1774 xret = xrt_comp_import_swapchain(ics->xc, info, xins, handle_count, &xsc);
1775 if (xret != XRT_SUCCESS) {
1776 return xret;
1777 }
1778
1779 // It's now safe to increment the number of swapchains.
1780 ics->swapchain_count++;
1781
1782 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1783
1784 set_swapchain_info(ics, index, info, xsc);
1785 *out_id = index;
1786
1787 return XRT_SUCCESS;
1788}
1789
1790xrt_result_t
1791ipc_handle_swapchain_wait_image(volatile struct ipc_client_state *ics, uint32_t id, int64_t timeout_ns, uint32_t index)
1792{
1793 if (ics->xc == NULL) {
1794 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1795 }
1796
1797 //! @todo Look up the index.
1798 uint32_t sc_index = id;
1799 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1800
1801 return xrt_swapchain_wait_image(xsc, timeout_ns, index);
1802}
1803
1804xrt_result_t
1805ipc_handle_swapchain_acquire_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t *out_index)
1806{
1807 if (ics->xc == NULL) {
1808 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1809 }
1810
1811 //! @todo Look up the index.
1812 uint32_t sc_index = id;
1813 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1814
1815 xrt_swapchain_acquire_image(xsc, out_index);
1816
1817 return XRT_SUCCESS;
1818}
1819
1820xrt_result_t
1821ipc_handle_swapchain_release_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t index)
1822{
1823 if (ics->xc == NULL) {
1824 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1825 }
1826
1827 //! @todo Look up the index.
1828 uint32_t sc_index = id;
1829 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1830
1831 xrt_swapchain_release_image(xsc, index);
1832
1833 return XRT_SUCCESS;
1834}
1835
1836xrt_result_t
1837ipc_handle_swapchain_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1838{
1839 if (ics->xc == NULL) {
1840 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1841 }
1842
1843 ics->swapchain_count--;
1844
1845 // Drop our reference, does NULL checking. Cast away volatile.
1846 xrt_swapchain_reference((struct xrt_swapchain **)&ics->xscs[id], NULL);
1847 ics->swapchain_data[id].active = false;
1848
1849 return XRT_SUCCESS;
1850}
1851
1852
1853/*
1854 *
1855 * Compositor semaphore function..
1856 *
1857 */
1858
1859xrt_result_t
1860ipc_handle_compositor_semaphore_create(volatile struct ipc_client_state *ics,
1861 uint32_t *out_id,
1862 uint32_t max_handle_count,
1863 xrt_graphics_sync_handle_t *out_handles,
1864 uint32_t *out_handle_count)
1865{
1866 xrt_result_t xret;
1867
1868 if (ics->xc == NULL) {
1869 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1870 }
1871
1872 int id = 0;
1873 for (; id < IPC_MAX_CLIENT_SEMAPHORES; id++) {
1874 if (ics->xcsems[id] == NULL) {
1875 break;
1876 }
1877 }
1878
1879 if (id == IPC_MAX_CLIENT_SEMAPHORES) {
1880 IPC_ERROR(ics->server, "Too many compositor semaphores alive!");
1881 return XRT_ERROR_IPC_FAILURE;
1882 }
1883
1884 struct xrt_compositor_semaphore *xcsem = NULL;
1885 xrt_graphics_sync_handle_t handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1886
1887 xret = xrt_comp_create_semaphore(ics->xc, &handle, &xcsem);
1888 if (xret != XRT_SUCCESS) {
1889 IPC_ERROR(ics->server, "Failed to create compositor semaphore!");
1890 return xret;
1891 }
1892
1893 // Set it directly, no need to use reference here.
1894 ics->xcsems[id] = xcsem;
1895
1896 // Set out parameters.
1897 *out_id = id;
1898 out_handles[0] = handle;
1899 *out_handle_count = 1;
1900
1901 return XRT_SUCCESS;
1902}
1903
1904xrt_result_t
1905ipc_handle_compositor_semaphore_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1906{
1907 if (ics->xc == NULL) {
1908 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1909 }
1910
1911 if (ics->xcsems[id] == NULL) {
1912 IPC_ERROR(ics->server, "Client tried to delete non-existent compositor semaphore!");
1913 return XRT_ERROR_IPC_FAILURE;
1914 }
1915
1916 ics->compositor_semaphore_count--;
1917
1918 // Drop our reference, does NULL checking. Cast away volatile.
1919 xrt_compositor_semaphore_reference((struct xrt_compositor_semaphore **)&ics->xcsems[id], NULL);
1920
1921 return XRT_SUCCESS;
1922}
1923
1924
1925/*
1926 *
1927 * Device functions.
1928 *
1929 */
1930
1931xrt_result_t
1932ipc_handle_device_update_input(volatile struct ipc_client_state *ics, uint32_t id)
1933{
1934 // To make the code a bit more readable.
1935 uint32_t device_id = id;
1936 struct ipc_shared_memory *ism = get_ism(ics);
1937 struct ipc_device *idev = get_idev(ics, device_id);
1938 struct xrt_device *xdev = idev->xdev;
1939 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1940
1941 // Update inputs.
1942 xrt_result_t xret = xrt_device_update_inputs(xdev);
1943 if (xret != XRT_SUCCESS) {
1944 IPC_ERROR(ics->server, "Failed to update input");
1945 return xret;
1946 }
1947
1948 // Copy data into the shared memory.
1949 struct xrt_input *src = xdev->inputs;
1950 struct xrt_input *dst = &ism->inputs[isdev->first_input_index];
1951 size_t size = sizeof(struct xrt_input) * isdev->input_count;
1952
1953 bool io_active = ics->io_active && idev->io_active;
1954 if (io_active) {
1955 memcpy(dst, src, size);
1956 } else {
1957 memset(dst, 0, size);
1958
1959 for (uint32_t i = 0; i < isdev->input_count; i++) {
1960 dst[i].name = src[i].name;
1961
1962 // Special case the rotation of the head.
1963 if (dst[i].name == XRT_INPUT_GENERIC_HEAD_POSE) {
1964 dst[i].active = src[i].active;
1965 }
1966 }
1967 }
1968
1969 // Reply.
1970 return XRT_SUCCESS;
1971}
1972
1973static struct xrt_input *
1974find_input(volatile struct ipc_client_state *ics, uint32_t device_id, enum xrt_input_name name)
1975{
1976 struct ipc_shared_memory *ism = get_ism(ics);
1977 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1978 struct xrt_input *io = &ism->inputs[isdev->first_input_index];
1979
1980 for (uint32_t i = 0; i < isdev->input_count; i++) {
1981 if (io[i].name == name) {
1982 return &io[i];
1983 }
1984 }
1985
1986 return NULL;
1987}
1988
1989xrt_result_t
1990ipc_handle_device_get_tracked_pose(volatile struct ipc_client_state *ics,
1991 uint32_t id,
1992 enum xrt_input_name name,
1993 int64_t at_timestamp,
1994 struct xrt_space_relation *out_relation)
1995{
1996 // To make the code a bit more readable.
1997 uint32_t device_id = id;
1998 struct ipc_device *isdev = &ics->server->idevs[device_id];
1999 struct xrt_device *xdev = isdev->xdev;
2000
2001 // Find the input
2002 struct xrt_input *input = find_input(ics, device_id, name);
2003 if (input == NULL) {
2004 return XRT_ERROR_IPC_FAILURE;
2005 }
2006
2007 // Special case the headpose.
2008 bool disabled = (!isdev->io_active || !ics->io_active) && name != XRT_INPUT_GENERIC_HEAD_POSE;
2009 bool active_on_client = input->active;
2010
2011 // We have been disabled but the client hasn't called update.
2012 if (disabled && active_on_client) {
2013 U_ZERO(out_relation);
2014 return XRT_SUCCESS;
2015 }
2016
2017 if (disabled || !active_on_client) {
2018 return XRT_ERROR_POSE_NOT_ACTIVE;
2019 }
2020
2021 // Get the pose.
2022 return xrt_device_get_tracked_pose(xdev, name, at_timestamp, out_relation);
2023}
2024
2025xrt_result_t
2026ipc_handle_device_get_hand_tracking(volatile struct ipc_client_state *ics,
2027 uint32_t id,
2028 enum xrt_input_name name,
2029 int64_t at_timestamp,
2030 struct xrt_hand_joint_set *out_value,
2031 int64_t *out_timestamp)
2032{
2033
2034 // To make the code a bit more readable.
2035 uint32_t device_id = id;
2036 struct xrt_device *xdev = NULL;
2037 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2038
2039 // Get the pose.
2040 return xrt_device_get_hand_tracking(xdev, name, at_timestamp, out_value, out_timestamp);
2041}
2042
2043xrt_result_t
2044ipc_handle_device_get_view_poses(volatile struct ipc_client_state *ics,
2045 uint32_t id,
2046 const struct xrt_vec3 *fallback_eye_relation,
2047 int64_t at_timestamp_ns,
2048 enum xrt_view_type view_type,
2049 uint32_t view_count)
2050{
2051 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2052 struct ipc_device_get_view_poses_reply reply = XRT_STRUCT_INIT;
2053 struct ipc_server *s = ics->server;
2054 xrt_result_t xret;
2055
2056 // To make the code a bit more readable.
2057 uint32_t device_id = id;
2058 struct xrt_device *xdev = NULL;
2059 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2060
2061
2062 if (view_count == 0 || view_count > IPC_MAX_RAW_VIEWS) {
2063 IPC_ERROR(s, "Client asked for zero or too many views! (%u)", view_count);
2064
2065 reply.result = XRT_ERROR_IPC_FAILURE;
2066 // Send the full reply, the client expects it.
2067 return ipc_send(imc, &reply, sizeof(reply));
2068 }
2069
2070 // Data to get.
2071 struct xrt_fov fovs[IPC_MAX_RAW_VIEWS];
2072 struct xrt_pose poses[IPC_MAX_RAW_VIEWS];
2073
2074 reply.result = xrt_device_get_view_poses( //
2075 xdev, //
2076 fallback_eye_relation, //
2077 at_timestamp_ns, //
2078 view_type, //
2079 view_count, //
2080 &reply.head_relation, //
2081 fovs, //
2082 poses); //
2083
2084 /*
2085 * This isn't really needed, but demonstrates the server sending the
2086 * length back in the reply, a common pattern for other functions.
2087 */
2088 reply.view_count = view_count;
2089
2090 /*
2091 * Send the reply first isn't required for functions in general, but it
2092 * will need to match what the client expects. This demonstrates the
2093 * server sending the length back in the reply, a common pattern for
2094 * other functions.
2095 */
2096 xret = ipc_send(imc, &reply, sizeof(reply));
2097 if (xret != XRT_SUCCESS) {
2098 IPC_ERROR(s, "Failed to send reply!");
2099 return xret;
2100 }
2101
2102 // Send the fovs that we got.
2103 xret = ipc_send(imc, fovs, sizeof(struct xrt_fov) * view_count);
2104 if (xret != XRT_SUCCESS) {
2105 IPC_ERROR(s, "Failed to send fovs!");
2106 return xret;
2107 }
2108
2109 // And finally the poses.
2110 xret = ipc_send(imc, poses, sizeof(struct xrt_pose) * view_count);
2111 if (xret != XRT_SUCCESS) {
2112 IPC_ERROR(s, "Failed to send poses!");
2113 return xret;
2114 }
2115
2116 return XRT_SUCCESS;
2117}
2118
2119xrt_result_t
2120ipc_handle_device_get_view_poses_2(volatile struct ipc_client_state *ics,
2121 uint32_t id,
2122 const struct xrt_vec3 *default_eye_relation,
2123 int64_t at_timestamp_ns,
2124 enum xrt_view_type view_type,
2125 uint32_t view_count,
2126 struct ipc_info_get_view_poses_2 *out_info)
2127{
2128 // To make the code a bit more readable.
2129 uint32_t device_id = id;
2130 struct xrt_device *xdev = NULL;
2131 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2132
2133 return xrt_device_get_view_poses( //
2134 xdev, //
2135 default_eye_relation, //
2136 at_timestamp_ns, //
2137 view_type, //
2138 view_count, //
2139 &out_info->head_relation, //
2140 out_info->fovs, //
2141 out_info->poses); //
2142}
2143
2144xrt_result_t
2145ipc_handle_device_compute_distortion(volatile struct ipc_client_state *ics,
2146 uint32_t id,
2147 uint32_t view,
2148 float u,
2149 float v,
2150 struct xrt_uv_triplet *out_triplet)
2151{
2152 // To make the code a bit more readable.
2153 uint32_t device_id = id;
2154 struct xrt_device *xdev = NULL;
2155 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2156
2157 return xrt_device_compute_distortion(xdev, view, u, v, out_triplet);
2158}
2159
2160xrt_result_t
2161ipc_handle_device_begin_plane_detection_ext(volatile struct ipc_client_state *ics,
2162 uint32_t id,
2163 uint64_t plane_detection_id,
2164 uint64_t *out_plane_detection_id)
2165{
2166 // To make the code a bit more readable.
2167 uint32_t device_id = id;
2168 struct xrt_device *xdev = NULL;
2169 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2170
2171 uint64_t new_count = ics->plane_detection_count + 1;
2172
2173 if (new_count > ics->plane_detection_size) {
2174 IPC_TRACE(ics->server, "Plane detections tracking size: %u -> %u", (uint32_t)ics->plane_detection_count,
2175 (uint32_t)new_count);
2176
2177 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_ids, uint64_t, new_count);
2178 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_xdev, struct xrt_device *, new_count);
2179 ics->plane_detection_size = new_count;
2180 }
2181
2182 struct xrt_plane_detector_begin_info_ext *begin_info = &get_ism(ics)->plane_begin_info_ext;
2183
2184 enum xrt_result xret =
2185 xrt_device_begin_plane_detection_ext(xdev, begin_info, plane_detection_id, out_plane_detection_id);
2186 if (xret != XRT_SUCCESS) {
2187 IPC_TRACE(ics->server, "xrt_device_begin_plane_detection_ext error: %d", xret);
2188 return xret;
2189 }
2190
2191 if (*out_plane_detection_id != 0) {
2192 uint64_t index = ics->plane_detection_count;
2193 ics->plane_detection_ids[index] = *out_plane_detection_id;
2194 ics->plane_detection_xdev[index] = xdev;
2195 ics->plane_detection_count = new_count;
2196 }
2197
2198 return XRT_SUCCESS;
2199}
2200
2201xrt_result_t
2202ipc_handle_device_destroy_plane_detection_ext(volatile struct ipc_client_state *ics,
2203 uint32_t id,
2204 uint64_t plane_detection_id)
2205{
2206 // To make the code a bit more readable.
2207 uint32_t device_id = id;
2208 struct xrt_device *xdev = NULL;
2209 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2210
2211 enum xrt_result xret = xrt_device_destroy_plane_detection_ext(xdev, plane_detection_id);
2212
2213 // Iterate through plane detection ids. Once found, move every item one slot to the left.
2214 bool compact_right = false;
2215 for (uint32_t i = 0; i < ics->plane_detection_count; i++) {
2216 if (ics->plane_detection_ids[i] == plane_detection_id) {
2217 compact_right = true;
2218 }
2219 if (compact_right && (i + 1) < ics->plane_detection_count) {
2220 ics->plane_detection_ids[i] = ics->plane_detection_ids[i + 1];
2221 ics->plane_detection_xdev[i] = ics->plane_detection_xdev[i + 1];
2222 }
2223 }
2224 // if the plane detection was correctly tracked compact_right should always be true
2225 if (compact_right) {
2226 ics->plane_detection_count -= 1;
2227 } else {
2228 IPC_ERROR(ics->server, "Destroyed plane detection that was not tracked");
2229 }
2230
2231 if (xret != XRT_SUCCESS) {
2232 IPC_ERROR(ics->server, "xrt_device_destroy_plane_detection_ext error: %d", xret);
2233 return xret;
2234 }
2235
2236 return XRT_SUCCESS;
2237}
2238
2239xrt_result_t
2240ipc_handle_device_get_plane_detection_state_ext(volatile struct ipc_client_state *ics,
2241 uint32_t id,
2242 uint64_t plane_detection_id,
2243 enum xrt_plane_detector_state_ext *out_state)
2244{
2245 // To make the code a bit more readable.
2246 uint32_t device_id = id;
2247 struct xrt_device *xdev = NULL;
2248 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2249
2250 xrt_result_t xret = xrt_device_get_plane_detection_state_ext(xdev, plane_detection_id, out_state);
2251 if (xret != XRT_SUCCESS) {
2252 IPC_ERROR(ics->server, "xrt_device_get_plane_detection_state_ext error: %d", xret);
2253 return xret;
2254 }
2255
2256 return XRT_SUCCESS;
2257}
2258
2259xrt_result_t
2260ipc_handle_device_get_plane_detections_ext(volatile struct ipc_client_state *ics,
2261 uint32_t id,
2262 uint64_t plane_detection_id)
2263
2264{
2265 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2266 struct ipc_device_get_plane_detections_ext_reply reply = XRT_STRUCT_INIT;
2267 struct ipc_server *s = ics->server;
2268
2269 // To make the code a bit more readable.
2270 uint32_t device_id = id;
2271 struct xrt_device *xdev = NULL;
2272 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2273
2274 struct xrt_plane_detections_ext out = {0};
2275
2276 xrt_result_t xret = xrt_device_get_plane_detections_ext(xdev, plane_detection_id, &out);
2277 if (xret != XRT_SUCCESS) {
2278 IPC_ERROR(ics->server, "xrt_device_get_plane_detections_ext error: %d", xret);
2279 // probably nothing allocated on error, but make sure
2280 xrt_plane_detections_ext_clear(&out);
2281 return xret;
2282 }
2283
2284 reply.result = XRT_SUCCESS;
2285 reply.location_size = out.location_count; // because we initialized to 0, now size == count
2286 reply.polygon_size = out.polygon_info_size;
2287 reply.vertex_size = out.vertex_size;
2288
2289 xret = ipc_send(imc, &reply, sizeof(reply));
2290 if (xret != XRT_SUCCESS) {
2291 IPC_ERROR(s, "Failed to send reply!");
2292 goto out;
2293 }
2294
2295 // send expected contents
2296
2297 if (out.location_count > 0) {
2298 xret =
2299 ipc_send(imc, out.locations, sizeof(struct xrt_plane_detector_location_ext) * out.location_count);
2300 if (xret != XRT_SUCCESS) {
2301 IPC_ERROR(s, "Failed to send locations!");
2302 goto out;
2303 }
2304
2305 xret = ipc_send(imc, out.polygon_info_start_index, sizeof(uint32_t) * out.location_count);
2306 if (xret != XRT_SUCCESS) {
2307 IPC_ERROR(s, "Failed to send locations!");
2308 goto out;
2309 }
2310 }
2311
2312 if (out.polygon_info_size > 0) {
2313 xret =
2314 ipc_send(imc, out.polygon_infos, sizeof(struct xrt_plane_polygon_info_ext) * out.polygon_info_size);
2315 if (xret != XRT_SUCCESS) {
2316 IPC_ERROR(s, "Failed to send polygon_infos!");
2317 goto out;
2318 }
2319 }
2320
2321 if (out.vertex_size > 0) {
2322 xret = ipc_send(imc, out.vertices, sizeof(struct xrt_vec2) * out.vertex_size);
2323 if (xret != XRT_SUCCESS) {
2324 IPC_ERROR(s, "Failed to send vertices!");
2325 goto out;
2326 }
2327 }
2328
2329out:
2330 xrt_plane_detections_ext_clear(&out);
2331 return xret;
2332}
2333
2334xrt_result_t
2335ipc_handle_device_get_presence(volatile struct ipc_client_state *ics, uint32_t id, bool *presence)
2336{
2337 struct xrt_device *xdev = NULL;
2338 GET_XDEV_OR_RETURN(ics, id, xdev);
2339 return xrt_device_get_presence(xdev, presence);
2340}
2341
2342xrt_result_t
2343ipc_handle_device_set_output(volatile struct ipc_client_state *ics,
2344 uint32_t id,
2345 enum xrt_output_name name,
2346 const struct xrt_output_value *value)
2347{
2348 // To make the code a bit more readable.
2349 uint32_t device_id = id;
2350 struct xrt_device *xdev = NULL;
2351 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2352
2353 // Set the output.
2354 return xrt_device_set_output(xdev, name, value);
2355}
2356
2357xrt_result_t
2358ipc_handle_device_set_haptic_output(volatile struct ipc_client_state *ics,
2359 uint32_t id,
2360 enum xrt_output_name name,
2361 const struct ipc_pcm_haptic_buffer *buffer)
2362{
2363 IPC_TRACE_MARKER();
2364 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2365 struct ipc_server *s = ics->server;
2366
2367 xrt_result_t xret;
2368
2369 // To make the code a bit more readable.
2370 uint32_t device_id = id;
2371 struct xrt_device *xdev = NULL;
2372 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2373
2374 os_mutex_lock(&ics->server->global_state.lock);
2375
2376 float *samples = U_TYPED_ARRAY_CALLOC(float, buffer->num_samples);
2377
2378 // send the allocation result
2379 xret = samples ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
2380 xret = ipc_send(imc, &xret, sizeof xret);
2381 if (xret != XRT_SUCCESS) {
2382 IPC_ERROR(ics->server, "Failed to send samples allocate result");
2383 goto set_haptic_output_end;
2384 }
2385
2386 if (!samples) {
2387 IPC_ERROR(s, "Failed to allocate samples for haptic output");
2388 xret = XRT_ERROR_ALLOCATION;
2389 goto set_haptic_output_end;
2390 }
2391
2392 xret = ipc_receive(imc, samples, sizeof(float) * buffer->num_samples);
2393 if (xret != XRT_SUCCESS) {
2394 IPC_ERROR(s, "Failed to receive samples");
2395 goto set_haptic_output_end;
2396 }
2397
2398 uint32_t samples_consumed;
2399 struct xrt_output_value value = {
2400 .type = XRT_OUTPUT_VALUE_TYPE_PCM_VIBRATION,
2401 .pcm_vibration =
2402 {
2403 .append = buffer->append,
2404 .buffer_size = buffer->num_samples,
2405 .sample_rate = buffer->sample_rate,
2406 .samples_consumed = &samples_consumed,
2407 .buffer = samples,
2408 },
2409 };
2410
2411 // Set the output.
2412 xrt_device_set_output(xdev, name, &value);
2413
2414 xret = ipc_send(imc, &samples_consumed, sizeof samples_consumed);
2415 if (xret != XRT_SUCCESS) {
2416 IPC_ERROR(ics->server, "Failed to send samples consumed");
2417 goto set_haptic_output_end;
2418 }
2419
2420 xret = XRT_SUCCESS;
2421
2422set_haptic_output_end:
2423 os_mutex_unlock(&ics->server->global_state.lock);
2424
2425 free(samples);
2426
2427 return xret;
2428}
2429
2430xrt_result_t
2431ipc_handle_device_get_output_limits(volatile struct ipc_client_state *ics,
2432 uint32_t id,
2433 struct xrt_output_limits *limits)
2434{
2435 // To make the code a bit more readable.
2436 uint32_t device_id = id;
2437 struct xrt_device *xdev = NULL;
2438 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2439
2440 // Set the output.
2441 return xrt_device_get_output_limits(xdev, limits);
2442}
2443
2444xrt_result_t
2445ipc_handle_device_get_visibility_mask(volatile struct ipc_client_state *ics,
2446 uint32_t device_id,
2447 enum xrt_visibility_mask_type type,
2448 uint32_t view_index)
2449{
2450 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2451 struct ipc_device_get_visibility_mask_reply reply = XRT_STRUCT_INIT;
2452 struct ipc_server *s = ics->server;
2453 xrt_result_t xret;
2454
2455 // @todo verify
2456 struct xrt_device *xdev = NULL;
2457 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2458 struct xrt_visibility_mask *mask = NULL;
2459 if (xdev->get_visibility_mask) {
2460 xret = xrt_device_get_visibility_mask(xdev, type, view_index, &mask);
2461 if (xret != XRT_SUCCESS) {
2462 IPC_ERROR(s, "Failed to get visibility mask");
2463 return xret;
2464 }
2465 } else {
2466 struct xrt_fov fov = xdev->hmd->distortion.fov[view_index];
2467 u_visibility_mask_get_default(type, &fov, &mask);
2468 }
2469
2470 if (mask == NULL) {
2471 IPC_ERROR(s, "Failed to get visibility mask");
2472 reply.mask_size = 0;
2473 } else {
2474 reply.mask_size = xrt_visibility_mask_get_size(mask);
2475 }
2476
2477 xret = ipc_send(imc, &reply, sizeof(reply));
2478 if (xret != XRT_SUCCESS) {
2479 IPC_ERROR(s, "Failed to send reply");
2480 goto out_free;
2481 }
2482
2483 xret = ipc_send(imc, mask, reply.mask_size);
2484 if (xret != XRT_SUCCESS) {
2485 IPC_ERROR(s, "Failed to send mask");
2486 goto out_free;
2487 }
2488
2489out_free:
2490 free(mask);
2491 return xret;
2492}
2493
2494xrt_result_t
2495ipc_handle_device_is_form_factor_available(volatile struct ipc_client_state *ics,
2496 uint32_t id,
2497 enum xrt_form_factor form_factor,
2498 bool *out_available)
2499{
2500 // To make the code a bit more readable.
2501 uint32_t device_id = id;
2502 struct xrt_device *xdev = NULL;
2503 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2504 *out_available = xrt_device_is_form_factor_available(xdev, form_factor);
2505 return XRT_SUCCESS;
2506}
2507
2508xrt_result_t
2509ipc_handle_system_devices_get_roles(volatile struct ipc_client_state *ics, struct xrt_system_roles *out_roles)
2510{
2511 return xrt_system_devices_get_roles(ics->server->xsysd, out_roles);
2512}
2513
2514xrt_result_t
2515ipc_handle_system_devices_begin_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2516{
2517 struct xrt_system_devices *xsysd = ics->server->xsysd;
2518 xrt_result_t xret;
2519
2520 xret = validate_device_feature_type(ics, type);
2521 if (xret != XRT_SUCCESS) {
2522 return XRT_ERROR_IPC_FAILURE;
2523 }
2524
2525 // Is this feature already used?
2526 if (ics->device_feature_used[type]) {
2527 IPC_ERROR(ics->server, "feature '%u' already used!", type);
2528 return XRT_ERROR_IPC_FAILURE;
2529 }
2530
2531 xret = xrt_system_devices_feature_inc(xsysd, type);
2532 if (xret != XRT_SUCCESS) {
2533 IPC_ERROR(ics->server, "xrt_system_devices_feature_inc failed");
2534 return xret;
2535 }
2536
2537 // Can now mark it as used.
2538 ics->device_feature_used[type] = true;
2539
2540 return XRT_SUCCESS;
2541}
2542
2543xrt_result_t
2544ipc_handle_system_devices_end_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2545{
2546 struct xrt_system_devices *xsysd = ics->server->xsysd;
2547 xrt_result_t xret;
2548
2549 xret = validate_device_feature_type(ics, type);
2550 if (xret != XRT_SUCCESS) {
2551 return XRT_ERROR_IPC_FAILURE;
2552 }
2553
2554 if (!ics->device_feature_used[type]) {
2555 IPC_ERROR(ics->server, "feature '%u' not used!", type);
2556 return XRT_ERROR_IPC_FAILURE;
2557 }
2558
2559 xret = xrt_system_devices_feature_dec(xsysd, type);
2560 if (xret != XRT_SUCCESS) {
2561 IPC_ERROR(ics->server, "xrt_system_devices_feature_dec failed");
2562 return xret;
2563 }
2564
2565 // Now we can mark it as not used.
2566 ics->device_feature_used[type] = false;
2567
2568 return XRT_SUCCESS;
2569}
2570
2571xrt_result_t
2572ipc_handle_device_get_face_tracking(volatile struct ipc_client_state *ics,
2573 uint32_t id,
2574 enum xrt_input_name facial_expression_type,
2575 int64_t at_timestamp_ns,
2576 struct xrt_facial_expression_set *out_value)
2577{
2578 const uint32_t device_id = id;
2579 struct xrt_device *xdev = NULL;
2580 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2581 // Get facial expression data.
2582 return xrt_device_get_face_tracking(xdev, facial_expression_type, at_timestamp_ns, out_value);
2583}
2584
2585xrt_result_t
2586ipc_handle_device_get_body_skeleton(volatile struct ipc_client_state *ics,
2587 uint32_t id,
2588 enum xrt_input_name body_tracking_type,
2589 struct xrt_body_skeleton *out_value)
2590{
2591 struct xrt_device *xdev = NULL;
2592 GET_XDEV_OR_RETURN(ics, id, xdev);
2593 return xrt_device_get_body_skeleton(xdev, body_tracking_type, out_value);
2594}
2595
2596xrt_result_t
2597ipc_handle_device_get_body_joints(volatile struct ipc_client_state *ics,
2598 uint32_t id,
2599 enum xrt_input_name body_tracking_type,
2600 int64_t desired_timestamp_ns,
2601 struct xrt_body_joint_set *out_value)
2602{
2603 struct xrt_device *xdev = NULL;
2604 GET_XDEV_OR_RETURN(ics, id, xdev);
2605 return xrt_device_get_body_joints(xdev, body_tracking_type, desired_timestamp_ns, out_value);
2606}
2607
2608xrt_result_t
2609ipc_handle_device_reset_body_tracking_calibration_meta(volatile struct ipc_client_state *ics, uint32_t id)
2610{
2611 struct xrt_device *xdev = get_xdev(ics, id);
2612 return xrt_device_reset_body_tracking_calibration_meta(xdev);
2613}
2614
2615xrt_result_t
2616ipc_handle_device_set_body_tracking_calibration_override_meta(volatile struct ipc_client_state *ics,
2617 uint32_t id,
2618 float new_body_height)
2619{
2620 struct xrt_device *xdev = get_xdev(ics, id);
2621 return xrt_device_set_body_tracking_calibration_override_meta(xdev, new_body_height);
2622}
2623
2624xrt_result_t
2625ipc_handle_device_get_battery_status(
2626 volatile struct ipc_client_state *ics, uint32_t id, bool *out_present, bool *out_charging, float *out_charge)
2627{
2628 struct xrt_device *xdev = NULL;
2629 GET_XDEV_OR_RETURN(ics, id, xdev);
2630 return xrt_device_get_battery_status(xdev, out_present, out_charging, out_charge);
2631}
2632
2633xrt_result_t
2634ipc_handle_device_get_brightness(volatile struct ipc_client_state *ics, uint32_t id, float *out_brightness)
2635{
2636 struct xrt_device *xdev = NULL;
2637 GET_XDEV_OR_RETURN(ics, id, xdev);
2638
2639 if (!xdev->supported.brightness_control) {
2640 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2641 }
2642
2643 return xrt_device_get_brightness(xdev, out_brightness);
2644}
2645
2646xrt_result_t
2647ipc_handle_device_set_brightness(volatile struct ipc_client_state *ics, uint32_t id, float brightness, bool relative)
2648{
2649 struct xrt_device *xdev = NULL;
2650 GET_XDEV_OR_RETURN(ics, id, xdev);
2651
2652 if (!xdev->supported.brightness_control) {
2653 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2654 }
2655
2656 return xrt_device_set_brightness(xdev, brightness, relative);
2657}
2658
2659xrt_result_t
2660ipc_handle_future_get_state(volatile struct ipc_client_state *ics, uint32_t future_id, enum xrt_future_state *out_state)
2661{
2662 struct xrt_future *xft = NULL;
2663 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2664 if (xret != XRT_SUCCESS) {
2665 return xret;
2666 }
2667 return xrt_future_get_state(xft, out_state);
2668}
2669
2670xrt_result_t
2671ipc_handle_future_get_result(volatile struct ipc_client_state *ics,
2672 uint32_t future_id,
2673 struct xrt_future_result *out_ft_result)
2674{
2675 struct xrt_future *xft = NULL;
2676 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2677 if (xret != XRT_SUCCESS) {
2678 return xret;
2679 }
2680 return xrt_future_get_result(xft, out_ft_result);
2681}
2682
2683xrt_result_t
2684ipc_handle_future_cancel(volatile struct ipc_client_state *ics, uint32_t future_id)
2685{
2686 struct xrt_future *xft = NULL;
2687 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2688 if (xret != XRT_SUCCESS) {
2689 return xret;
2690 }
2691 return xrt_future_cancel(xft);
2692}
2693
2694xrt_result_t
2695ipc_handle_future_destroy(volatile struct ipc_client_state *ics, uint32_t future_id)
2696{
2697 return release_future(ics, future_id);
2698}