The open source OpenXR runtime
1// Copyright 2020-2024, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Handling functions called from generated dispatch function.
6 * @author Pete Black <pblack@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @author Korcan Hussein <korcan.hussein@collabora.com>
9 * @ingroup ipc_server
10 */
11
12#include "util/u_misc.h"
13#include "util/u_handles.h"
14#include "util/u_pretty_print.h"
15#include "util/u_visibility_mask.h"
16#include "util/u_trace_marker.h"
17
18#include "server/ipc_server.h"
19#include "ipc_server_generated.h"
20#include "xrt/xrt_device.h"
21#include "xrt/xrt_results.h"
22
23#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
24#include <unistd.h>
25#endif
26
27
28/*
29 *
30 * Helper functions.
31 *
32 */
33
34static xrt_result_t
35validate_device_id(volatile struct ipc_client_state *ics, int64_t device_id, struct xrt_device **out_device)
36{
37 if (device_id >= XRT_SYSTEM_MAX_DEVICES) {
38 IPC_ERROR(ics->server, "Invalid device ID (device_id >= XRT_SYSTEM_MAX_DEVICES)!");
39 return XRT_ERROR_IPC_FAILURE;
40 }
41
42 struct xrt_device *xdev = ics->server->idevs[device_id].xdev;
43 if (xdev == NULL) {
44 IPC_ERROR(ics->server, "Invalid device ID (xdev is NULL)!");
45 return XRT_ERROR_IPC_FAILURE;
46 }
47
48 *out_device = xdev;
49
50 return XRT_SUCCESS;
51}
52
53#define GET_XDEV_OR_RETURN(ics, device_id, out_device) \
54 do { \
55 xrt_result_t res = validate_device_id(ics, device_id, &out_device); \
56 if (res != XRT_SUCCESS) { \
57 return res; \
58 } \
59 } while (0)
60
61
62static xrt_result_t
63validate_origin_id(volatile struct ipc_client_state *ics, int64_t origin_id, struct xrt_tracking_origin **out_xtrack)
64{
65 if (origin_id >= XRT_SYSTEM_MAX_DEVICES) {
66 IPC_ERROR(ics->server, "Invalid origin ID (origin_id >= XRT_SYSTEM_MAX_DEVICES)!");
67 return XRT_ERROR_IPC_FAILURE;
68 }
69
70 struct xrt_tracking_origin *xtrack = ics->server->xtracks[origin_id];
71 if (xtrack == NULL) {
72 IPC_ERROR(ics->server, "Invalid origin ID (xtrack is NULL)!");
73 return XRT_ERROR_IPC_FAILURE;
74 }
75
76 *out_xtrack = xtrack;
77
78 return XRT_SUCCESS;
79}
80
81static xrt_result_t
82validate_swapchain_state(volatile struct ipc_client_state *ics, uint32_t *out_index)
83{
84 // Our handle is just the index for now.
85 uint32_t index = 0;
86 for (; index < IPC_MAX_CLIENT_SWAPCHAINS; index++) {
87 if (!ics->swapchain_data[index].active) {
88 break;
89 }
90 }
91
92 if (index >= IPC_MAX_CLIENT_SWAPCHAINS) {
93 IPC_ERROR(ics->server, "Too many swapchains!");
94 return XRT_ERROR_IPC_FAILURE;
95 }
96
97 *out_index = index;
98
99 return XRT_SUCCESS;
100}
101
102static void
103set_swapchain_info(volatile struct ipc_client_state *ics,
104 uint32_t index,
105 const struct xrt_swapchain_create_info *info,
106 struct xrt_swapchain *xsc)
107{
108 ics->xscs[index] = xsc;
109 ics->swapchain_data[index].active = true;
110 ics->swapchain_data[index].width = info->width;
111 ics->swapchain_data[index].height = info->height;
112 ics->swapchain_data[index].format = info->format;
113 ics->swapchain_data[index].image_count = xsc->image_count;
114}
115
116static xrt_result_t
117validate_reference_space_type(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
118{
119 if ((uint32_t)type >= XRT_SPACE_REFERENCE_TYPE_COUNT) {
120 IPC_ERROR(ics->server, "Invalid reference space type %u", type);
121 return XRT_ERROR_IPC_FAILURE;
122 }
123
124 return XRT_SUCCESS;
125}
126
127static xrt_result_t
128validate_device_feature_type(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
129{
130 if ((uint32_t)type >= XRT_DEVICE_FEATURE_MAX_ENUM) {
131 IPC_ERROR(ics->server, "Invalid device feature type %u", type);
132 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
133 }
134
135 return XRT_SUCCESS;
136}
137
138
139static xrt_result_t
140validate_space_id(volatile struct ipc_client_state *ics, int64_t space_id, struct xrt_space **out_xspc)
141{
142 if (space_id < 0) {
143 return XRT_ERROR_IPC_FAILURE;
144 }
145
146 if (space_id >= IPC_MAX_CLIENT_SPACES) {
147 return XRT_ERROR_IPC_FAILURE;
148 }
149
150 if (ics->xspcs[space_id] == NULL) {
151 return XRT_ERROR_IPC_FAILURE;
152 }
153
154 *out_xspc = (struct xrt_space *)ics->xspcs[space_id];
155
156 return XRT_SUCCESS;
157}
158
159static xrt_result_t
160get_new_space_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
161{
162 // Our handle is just the index for now.
163 uint32_t index = 0;
164 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
165 if (ics->xspcs[index] == NULL) {
166 break;
167 }
168 }
169
170 if (index >= IPC_MAX_CLIENT_SPACES) {
171 IPC_ERROR(ics->server, "Too many spaces!");
172 return XRT_ERROR_IPC_FAILURE;
173 }
174
175 *out_id = index;
176
177 return XRT_SUCCESS;
178}
179
180static xrt_result_t
181track_space(volatile struct ipc_client_state *ics, struct xrt_space *xs, uint32_t *out_id)
182{
183 uint32_t id = UINT32_MAX;
184 xrt_result_t xret = get_new_space_id(ics, &id);
185 if (xret != XRT_SUCCESS) {
186 return xret;
187 }
188
189 // Remove volatile
190 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[id];
191 xrt_space_reference(xs_ptr, xs);
192
193 *out_id = id;
194
195 return XRT_SUCCESS;
196}
197
198
199static xrt_result_t
200get_new_localspace_id(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
201{
202 // Our handle is just the index for now.
203 uint32_t index = 0;
204 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
205 if (ics->server->xso->localspace[index] == NULL) {
206 break;
207 }
208 }
209
210 if (index >= IPC_MAX_CLIENT_SPACES) {
211 IPC_ERROR(ics->server, "Too many localspaces!");
212 return XRT_ERROR_IPC_FAILURE;
213 }
214
215 ics->local_space_overseer_index = index;
216 index = 0;
217 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
218 if (ics->xspcs[index] == NULL) {
219 break;
220 }
221 }
222
223 if (index >= IPC_MAX_CLIENT_SPACES) {
224 IPC_ERROR(ics->server, "Too many spaces!");
225 return XRT_ERROR_IPC_FAILURE;
226 }
227
228 ics->local_space_index = index;
229 *out_local_id = index;
230
231 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
232 if (ics->server->xso->localfloorspace[index] == NULL) {
233 break;
234 }
235 }
236
237 if (index >= IPC_MAX_CLIENT_SPACES) {
238 IPC_ERROR(ics->server, "Too many localfloorspaces!");
239 return XRT_ERROR_IPC_FAILURE;
240 }
241
242 ics->local_floor_space_overseer_index = index;
243
244 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
245 if (ics->xspcs[index] == NULL && index != ics->local_space_index) {
246 break;
247 }
248 }
249
250 if (index >= IPC_MAX_CLIENT_SPACES) {
251 IPC_ERROR(ics->server, "Too many spaces!");
252 return XRT_ERROR_IPC_FAILURE;
253 }
254
255 ics->local_floor_space_index = index;
256 *out_local_floor_id = index;
257
258 return XRT_SUCCESS;
259}
260
261static xrt_result_t
262create_localspace(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
263{
264 uint32_t local_id = UINT32_MAX;
265 uint32_t local_floor_id = UINT32_MAX;
266 xrt_result_t xret = get_new_localspace_id(ics, &local_id, &local_floor_id);
267 if (xret != XRT_SUCCESS) {
268 return xret;
269 }
270
271 struct xrt_space_overseer *xso = ics->server->xso;
272 struct xrt_space **xslocal_ptr = (struct xrt_space **)&ics->xspcs[local_id];
273 struct xrt_space **xslocalfloor_ptr = (struct xrt_space **)&ics->xspcs[local_floor_id];
274
275 xret = xrt_space_overseer_create_local_space(xso, &xso->localspace[ics->local_space_overseer_index],
276 &xso->localfloorspace[ics->local_floor_space_overseer_index]);
277 if (xret != XRT_SUCCESS) {
278 return xret;
279 }
280 xrt_space_reference(xslocal_ptr, xso->localspace[ics->local_space_overseer_index]);
281 xrt_space_reference(xslocalfloor_ptr, xso->localfloorspace[ics->local_floor_space_overseer_index]);
282 *out_local_id = local_id;
283 *out_local_floor_id = local_floor_id;
284
285 return XRT_SUCCESS;
286}
287
288XRT_MAYBE_UNUSED xrt_result_t
289get_new_future_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
290{
291 // Our handle is just the index for now.
292 uint32_t index = 0;
293 for (; index < IPC_MAX_CLIENT_FUTURES; ++index) {
294 if (ics->xfts[index] == NULL) {
295 break;
296 }
297 }
298
299 if (index >= IPC_MAX_CLIENT_FUTURES) {
300 IPC_ERROR(ics->server, "Too many futures!");
301 return XRT_ERROR_IPC_FAILURE;
302 }
303
304 *out_id = index;
305
306 return XRT_SUCCESS;
307}
308
309static inline xrt_result_t
310validate_future_id(volatile struct ipc_client_state *ics, uint32_t future_id, struct xrt_future **out_xft)
311{
312 if (future_id >= IPC_MAX_CLIENT_FUTURES) {
313 return XRT_ERROR_IPC_FAILURE;
314 }
315
316 if (ics->xfts[future_id] == NULL) {
317 return XRT_ERROR_IPC_FAILURE;
318 }
319
320 *out_xft = (struct xrt_future *)ics->xfts[future_id];
321 return (*out_xft != NULL) ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
322}
323
324static inline xrt_result_t
325release_future(volatile struct ipc_client_state *ics, uint32_t future_id)
326{
327 struct xrt_future *xft = NULL;
328 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
329 if (xret != XRT_SUCCESS) {
330 return xret;
331 }
332 xrt_future_reference(&xft, NULL);
333 ics->xfts[future_id] = NULL;
334 return XRT_SUCCESS;
335}
336
337/*
338 *
339 * Handle functions.
340 *
341 */
342
343xrt_result_t
344ipc_handle_instance_get_shm_fd(volatile struct ipc_client_state *ics,
345 uint32_t max_handle_capacity,
346 xrt_shmem_handle_t *out_handles,
347 uint32_t *out_handle_count)
348{
349 IPC_TRACE_MARKER();
350
351 assert(max_handle_capacity >= 1);
352
353 out_handles[0] = get_ism_handle(ics);
354 *out_handle_count = 1;
355
356 return XRT_SUCCESS;
357}
358
359xrt_result_t
360ipc_handle_instance_describe_client(volatile struct ipc_client_state *ics,
361 const struct ipc_client_description *client_desc)
362{
363 ics->client_state.info = client_desc->info;
364 ics->client_state.pid = client_desc->pid;
365
366 struct u_pp_sink_stack_only sink;
367 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
368
369#define P(...) u_pp(dg, __VA_ARGS__)
370#define PNT(...) u_pp(dg, "\n\t" __VA_ARGS__)
371#define PNTT(...) u_pp(dg, "\n\t\t" __VA_ARGS__)
372#define EXT(NAME) PNTT(#NAME ": %s", client_desc->info.NAME ? "true" : "false")
373
374 P("Client info:");
375 PNT("id: %u", ics->client_state.id);
376 PNT("application_name: '%s'", client_desc->info.application_name);
377 PNT("pid: %i", client_desc->pid);
378 PNT("extensions:");
379
380 EXT(ext_hand_tracking_enabled);
381 EXT(ext_hand_tracking_data_source_enabled);
382 EXT(ext_eye_gaze_interaction_enabled);
383 EXT(ext_future_enabled);
384 EXT(ext_hand_interaction_enabled);
385 EXT(htc_facial_tracking_enabled);
386 EXT(fb_body_tracking_enabled);
387 EXT(meta_body_tracking_full_body_enabled);
388 EXT(meta_body_tracking_calibration_enabled);
389 EXT(fb_face_tracking2_enabled);
390
391#undef EXT
392#undef PTT
393#undef PT
394#undef P
395
396 // Log the pretty message.
397 IPC_INFO(ics->server, "%s", sink.buffer);
398
399 return XRT_SUCCESS;
400}
401
402xrt_result_t
403ipc_handle_system_compositor_get_info(volatile struct ipc_client_state *ics,
404 struct xrt_system_compositor_info *out_info)
405{
406 IPC_TRACE_MARKER();
407
408 *out_info = ics->server->xsysc->info;
409
410 return XRT_SUCCESS;
411}
412
413xrt_result_t
414ipc_handle_session_create(volatile struct ipc_client_state *ics,
415 const struct xrt_session_info *xsi,
416 bool create_native_compositor)
417{
418 IPC_TRACE_MARKER();
419
420 struct xrt_session *xs = NULL;
421 struct xrt_compositor_native *xcn = NULL;
422
423 if (ics->xs != NULL) {
424 return XRT_ERROR_IPC_SESSION_ALREADY_CREATED;
425 }
426
427 if (!create_native_compositor) {
428 IPC_INFO(ics->server, "App asked for headless session, creating native compositor anyways");
429 }
430
431 xrt_result_t xret = xrt_system_create_session(ics->server->xsys, xsi, &xs, &xcn);
432 if (xret != XRT_SUCCESS) {
433 return xret;
434 }
435
436 ics->client_state.session_overlay = xsi->is_overlay;
437 ics->client_state.z_order = xsi->z_order;
438
439 ics->xs = xs;
440 ics->xc = &xcn->base;
441
442 xrt_syscomp_set_state(ics->server->xsysc, ics->xc, ics->client_state.session_visible,
443 ics->client_state.session_focused);
444 xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, ics->client_state.z_order);
445
446 return XRT_SUCCESS;
447}
448
449xrt_result_t
450ipc_handle_session_poll_events(volatile struct ipc_client_state *ics, union xrt_session_event *out_xse)
451{
452 // Have we created the session?
453 if (ics->xs == NULL) {
454 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
455 }
456
457 return xrt_session_poll_events(ics->xs, out_xse);
458}
459
460xrt_result_t
461ipc_handle_session_begin(volatile struct ipc_client_state *ics)
462{
463 IPC_TRACE_MARKER();
464
465 // Have we created the session?
466 if (ics->xs == NULL) {
467 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
468 }
469
470 // Need to check both because begin session is handled by compositor.
471 if (ics->xc == NULL) {
472 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
473 }
474
475 //! @todo Pass the view type down.
476 const struct xrt_begin_session_info begin_session_info = {
477 .view_type = XRT_VIEW_TYPE_STEREO,
478 .ext_hand_tracking_enabled = ics->client_state.info.ext_hand_tracking_enabled,
479 .ext_hand_tracking_data_source_enabled = ics->client_state.info.ext_hand_tracking_data_source_enabled,
480 .ext_eye_gaze_interaction_enabled = ics->client_state.info.ext_eye_gaze_interaction_enabled,
481 .ext_future_enabled = ics->client_state.info.ext_future_enabled,
482 .ext_hand_interaction_enabled = ics->client_state.info.ext_hand_interaction_enabled,
483 .htc_facial_tracking_enabled = ics->client_state.info.htc_facial_tracking_enabled,
484 .fb_body_tracking_enabled = ics->client_state.info.fb_body_tracking_enabled,
485 .fb_face_tracking2_enabled = ics->client_state.info.fb_face_tracking2_enabled,
486 .meta_body_tracking_full_body_enabled = ics->client_state.info.meta_body_tracking_full_body_enabled,
487 .meta_body_tracking_calibration_enabled = ics->client_state.info.meta_body_tracking_calibration_enabled,
488 };
489
490 return xrt_comp_begin_session(ics->xc, &begin_session_info);
491}
492
493xrt_result_t
494ipc_handle_session_end(volatile struct ipc_client_state *ics)
495{
496 IPC_TRACE_MARKER();
497
498 // Have we created the session?
499 if (ics->xs == NULL) {
500 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
501 }
502
503 // Need to check both because end session is handled by compositor.
504 if (ics->xc == NULL) {
505 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
506 }
507
508 return xrt_comp_end_session(ics->xc);
509}
510
511xrt_result_t
512ipc_handle_session_destroy(volatile struct ipc_client_state *ics)
513{
514 IPC_TRACE_MARKER();
515
516 // Have we created the session?
517 if (ics->xs == NULL) {
518 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
519 }
520
521 ipc_server_client_destroy_session_and_compositor(ics);
522
523 return XRT_SUCCESS;
524}
525
526xrt_result_t
527ipc_handle_space_create_semantic_ids(volatile struct ipc_client_state *ics,
528 uint32_t *out_root_id,
529 uint32_t *out_view_id,
530 uint32_t *out_local_id,
531 uint32_t *out_local_floor_id,
532 uint32_t *out_stage_id,
533 uint32_t *out_unbounded_id)
534{
535 IPC_TRACE_MARKER();
536
537 struct xrt_space_overseer *xso = ics->server->xso;
538
539#define CREATE(NAME) \
540 do { \
541 *out_##NAME##_id = UINT32_MAX; \
542 if (xso->semantic.NAME == NULL) { \
543 break; \
544 } \
545 uint32_t id = 0; \
546 xrt_result_t xret = track_space(ics, xso->semantic.NAME, &id); \
547 if (xret != XRT_SUCCESS) { \
548 break; \
549 } \
550 *out_##NAME##_id = id; \
551 } while (false)
552
553 CREATE(root);
554 CREATE(view);
555 CREATE(stage);
556 CREATE(unbounded);
557
558#undef CREATE
559 return create_localspace(ics, out_local_id, out_local_floor_id);
560}
561
562xrt_result_t
563ipc_handle_space_create_offset(volatile struct ipc_client_state *ics,
564 uint32_t parent_id,
565 const struct xrt_pose *offset,
566 uint32_t *out_space_id)
567{
568 IPC_TRACE_MARKER();
569
570 struct xrt_space_overseer *xso = ics->server->xso;
571
572 struct xrt_space *parent = NULL;
573 xrt_result_t xret = validate_space_id(ics, parent_id, &parent);
574 if (xret != XRT_SUCCESS) {
575 return xret;
576 }
577
578
579 struct xrt_space *xs = NULL;
580 xret = xrt_space_overseer_create_offset_space(xso, parent, offset, &xs);
581 if (xret != XRT_SUCCESS) {
582 return xret;
583 }
584
585 uint32_t space_id = UINT32_MAX;
586 xret = track_space(ics, xs, &space_id);
587
588 // Track space grabs a reference, or it errors and we don't want to keep it around.
589 xrt_space_reference(&xs, NULL);
590
591 if (xret != XRT_SUCCESS) {
592 return xret;
593 }
594
595 *out_space_id = space_id;
596
597 return XRT_SUCCESS;
598}
599
600xrt_result_t
601ipc_handle_space_create_pose(volatile struct ipc_client_state *ics,
602 uint32_t xdev_id,
603 enum xrt_input_name name,
604 uint32_t *out_space_id)
605{
606 IPC_TRACE_MARKER();
607
608 struct xrt_space_overseer *xso = ics->server->xso;
609
610 struct xrt_device *xdev = NULL;
611 GET_XDEV_OR_RETURN(ics, xdev_id, xdev);
612
613 struct xrt_space *xs = NULL;
614 xrt_result_t xret = xrt_space_overseer_create_pose_space(xso, xdev, name, &xs);
615 if (xret != XRT_SUCCESS) {
616 return xret;
617 }
618
619 uint32_t space_id = UINT32_MAX;
620 xret = track_space(ics, xs, &space_id);
621
622 // Track space grabs a reference, or it errors and we don't want to keep it around.
623 xrt_space_reference(&xs, NULL);
624
625 if (xret != XRT_SUCCESS) {
626 return xret;
627 }
628
629 *out_space_id = space_id;
630
631 return xret;
632}
633
634xrt_result_t
635ipc_handle_space_locate_space(volatile struct ipc_client_state *ics,
636 uint32_t base_space_id,
637 const struct xrt_pose *base_offset,
638 int64_t at_timestamp,
639 uint32_t space_id,
640 const struct xrt_pose *offset,
641 struct xrt_space_relation *out_relation)
642{
643 IPC_TRACE_MARKER();
644
645 struct xrt_space_overseer *xso = ics->server->xso;
646 struct xrt_space *base_space = NULL;
647 struct xrt_space *space = NULL;
648 xrt_result_t xret;
649
650 xret = validate_space_id(ics, base_space_id, &base_space);
651 if (xret != XRT_SUCCESS) {
652 U_LOG_E("Invalid base_space_id!");
653 return xret;
654 }
655
656 xret = validate_space_id(ics, space_id, &space);
657 if (xret != XRT_SUCCESS) {
658 U_LOG_E("Invalid space_id!");
659 return xret;
660 }
661
662 return xrt_space_overseer_locate_space( //
663 xso, //
664 base_space, //
665 base_offset, //
666 at_timestamp, //
667 space, //
668 offset, //
669 out_relation); //
670}
671
672xrt_result_t
673ipc_handle_space_locate_spaces(volatile struct ipc_client_state *ics,
674 uint32_t base_space_id,
675 const struct xrt_pose *base_offset,
676 uint32_t space_count,
677 int64_t at_timestamp)
678{
679 IPC_TRACE_MARKER();
680 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
681 struct ipc_server *s = ics->server;
682
683 struct xrt_space_overseer *xso = ics->server->xso;
684 struct xrt_space *base_space = NULL;
685
686 struct xrt_space **xspaces = U_TYPED_ARRAY_CALLOC(struct xrt_space *, space_count);
687 struct xrt_pose *offsets = U_TYPED_ARRAY_CALLOC(struct xrt_pose, space_count);
688 struct xrt_space_relation *out_relations = U_TYPED_ARRAY_CALLOC(struct xrt_space_relation, space_count);
689
690 xrt_result_t xret;
691
692 os_mutex_lock(&ics->server->global_state.lock);
693
694 uint32_t *space_ids = U_TYPED_ARRAY_CALLOC(uint32_t, space_count);
695
696 // we need to send back whether allocation succeeded so the client knows whether to send more data
697 if (space_ids == NULL) {
698 xret = XRT_ERROR_ALLOCATION;
699 } else {
700 xret = XRT_SUCCESS;
701 }
702
703 xret = ipc_send(imc, &xret, sizeof(enum xrt_result));
704 if (xret != XRT_SUCCESS) {
705 IPC_ERROR(ics->server, "Failed to send spaces allocate result");
706 // Nothing else we can do
707 goto out_locate_spaces;
708 }
709
710 // only after sending the allocation result can we skip to the end in the allocation error case
711 if (space_ids == NULL) {
712 IPC_ERROR(s, "Failed to allocate space for receiving spaces ids");
713 xret = XRT_ERROR_ALLOCATION;
714 goto out_locate_spaces;
715 }
716
717 xret = ipc_receive(imc, space_ids, space_count * sizeof(uint32_t));
718 if (xret != XRT_SUCCESS) {
719 IPC_ERROR(ics->server, "Failed to receive spaces ids");
720 // assume early abort is possible, i.e. client will not send more data for this request
721 goto out_locate_spaces;
722 }
723
724 xret = ipc_receive(imc, offsets, space_count * sizeof(struct xrt_pose));
725 if (xret != XRT_SUCCESS) {
726 IPC_ERROR(ics->server, "Failed to receive spaces offsets");
727 // assume early abort is possible, i.e. client will not send more data for this request
728 goto out_locate_spaces;
729 }
730
731 xret = validate_space_id(ics, base_space_id, &base_space);
732 if (xret != XRT_SUCCESS) {
733 U_LOG_E("Invalid base_space_id %d!", base_space_id);
734 // Client is receiving out_relations now, it will get xret on this receive.
735 goto out_locate_spaces;
736 }
737
738 for (uint32_t i = 0; i < space_count; i++) {
739 if (space_ids[i] == UINT32_MAX) {
740 xspaces[i] = NULL;
741 } else {
742 xret = validate_space_id(ics, space_ids[i], &xspaces[i]);
743 if (xret != XRT_SUCCESS) {
744 U_LOG_E("Invalid space_id space_ids[%d] = %d!", i, space_ids[i]);
745 // Client is receiving out_relations now, it will get xret on this receive.
746 goto out_locate_spaces;
747 }
748 }
749 }
750 xret = xrt_space_overseer_locate_spaces( //
751 xso, //
752 base_space, //
753 base_offset, //
754 at_timestamp, //
755 xspaces, //
756 space_count, //
757 offsets, //
758 out_relations); //
759
760 xret = ipc_send(imc, out_relations, sizeof(struct xrt_space_relation) * space_count);
761 if (xret != XRT_SUCCESS) {
762 IPC_ERROR(ics->server, "Failed to send spaces relations");
763 // Nothing else we can do
764 goto out_locate_spaces;
765 }
766
767out_locate_spaces:
768 free(xspaces);
769 free(offsets);
770 free(out_relations);
771 os_mutex_unlock(&ics->server->global_state.lock);
772 return xret;
773}
774
775xrt_result_t
776ipc_handle_space_locate_device(volatile struct ipc_client_state *ics,
777 uint32_t base_space_id,
778 const struct xrt_pose *base_offset,
779 int64_t at_timestamp,
780 uint32_t xdev_id,
781 struct xrt_space_relation *out_relation)
782{
783 IPC_TRACE_MARKER();
784
785 struct xrt_space_overseer *xso = ics->server->xso;
786 struct xrt_space *base_space = NULL;
787 struct xrt_device *xdev = NULL;
788 xrt_result_t xret;
789
790 xret = validate_space_id(ics, base_space_id, &base_space);
791 if (xret != XRT_SUCCESS) {
792 U_LOG_E("Invalid base_space_id!");
793 return xret;
794 }
795
796 xret = validate_device_id(ics, xdev_id, &xdev);
797 if (xret != XRT_SUCCESS) {
798 U_LOG_E("Invalid device_id!");
799 return xret;
800 }
801
802 return xrt_space_overseer_locate_device( //
803 xso, //
804 base_space, //
805 base_offset, //
806 at_timestamp, //
807 xdev, //
808 out_relation); //
809}
810
811xrt_result_t
812ipc_handle_space_destroy(volatile struct ipc_client_state *ics, uint32_t space_id)
813{
814 struct xrt_space *xs = NULL;
815 xrt_result_t xret;
816
817 xret = validate_space_id(ics, space_id, &xs);
818 if (xret != XRT_SUCCESS) {
819 U_LOG_E("Invalid space_id!");
820 return xret;
821 }
822
823 assert(xs != NULL);
824 xs = NULL;
825
826 // Remove volatile
827 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[space_id];
828 xrt_space_reference(xs_ptr, NULL);
829
830 if (space_id == ics->local_space_index) {
831 struct xrt_space **xslocal_ptr =
832 (struct xrt_space **)&ics->server->xso->localspace[ics->local_space_overseer_index];
833 xrt_space_reference(xslocal_ptr, NULL);
834 }
835
836 if (space_id == ics->local_floor_space_index) {
837 struct xrt_space **xslocalfloor_ptr =
838 (struct xrt_space **)&ics->server->xso->localfloorspace[ics->local_floor_space_overseer_index];
839 xrt_space_reference(xslocalfloor_ptr, NULL);
840 }
841
842 return XRT_SUCCESS;
843}
844
845xrt_result_t
846ipc_handle_space_mark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
847{
848 struct xrt_space_overseer *xso = ics->server->xso;
849 xrt_result_t xret;
850
851 xret = validate_reference_space_type(ics, type);
852 if (xret != XRT_SUCCESS) {
853 return XRT_ERROR_IPC_FAILURE;
854 }
855
856 // Is this space already used?
857 if (ics->ref_space_used[type]) {
858 IPC_ERROR(ics->server, "Space '%u' already used!", type);
859 return XRT_ERROR_IPC_FAILURE;
860 }
861
862 xret = xrt_space_overseer_ref_space_inc(xso, type);
863 if (xret != XRT_SUCCESS) {
864 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_inc failed");
865 return xret;
866 }
867
868 // Can now mark it as used.
869 ics->ref_space_used[type] = true;
870
871 return XRT_SUCCESS;
872}
873
874xrt_result_t
875ipc_handle_space_unmark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
876{
877 struct xrt_space_overseer *xso = ics->server->xso;
878 xrt_result_t xret;
879
880 xret = validate_reference_space_type(ics, type);
881 if (xret != XRT_SUCCESS) {
882 return XRT_ERROR_IPC_FAILURE;
883 }
884
885 if (!ics->ref_space_used[type]) {
886 IPC_ERROR(ics->server, "Space '%u' not used!", type);
887 return XRT_ERROR_IPC_FAILURE;
888 }
889
890 xret = xrt_space_overseer_ref_space_dec(xso, type);
891 if (xret != XRT_SUCCESS) {
892 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_dec failed");
893 return xret;
894 }
895
896 // Now we can mark it as not used.
897 ics->ref_space_used[type] = false;
898
899 return XRT_SUCCESS;
900}
901
902xrt_result_t
903ipc_handle_space_recenter_local_spaces(volatile struct ipc_client_state *ics)
904{
905 struct xrt_space_overseer *xso = ics->server->xso;
906
907 return xrt_space_overseer_recenter_local_spaces(xso);
908}
909
910xrt_result_t
911ipc_handle_space_get_tracking_origin_offset(volatile struct ipc_client_state *ics,
912 uint32_t origin_id,
913 struct xrt_pose *out_offset)
914{
915 struct xrt_space_overseer *xso = ics->server->xso;
916 struct xrt_tracking_origin *xto;
917 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
918 if (xret != XRT_SUCCESS) {
919 return xret;
920 }
921 return xrt_space_overseer_get_tracking_origin_offset(xso, xto, out_offset);
922}
923
924xrt_result_t
925ipc_handle_space_set_tracking_origin_offset(volatile struct ipc_client_state *ics,
926 uint32_t origin_id,
927 const struct xrt_pose *offset)
928{
929 struct xrt_space_overseer *xso = ics->server->xso;
930 struct xrt_tracking_origin *xto;
931 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
932 if (xret != XRT_SUCCESS) {
933 return xret;
934 }
935 return xrt_space_overseer_set_tracking_origin_offset(xso, xto, offset);
936}
937
938xrt_result_t
939ipc_handle_space_get_reference_space_offset(volatile struct ipc_client_state *ics,
940 enum xrt_reference_space_type type,
941 struct xrt_pose *out_offset)
942{
943 struct xrt_space_overseer *xso = ics->server->xso;
944 return xrt_space_overseer_get_reference_space_offset(xso, type, out_offset);
945}
946
947xrt_result_t
948ipc_handle_space_set_reference_space_offset(volatile struct ipc_client_state *ics,
949 enum xrt_reference_space_type type,
950 const struct xrt_pose *offset)
951{
952 struct xrt_space_overseer *xso = ics->server->xso;
953 return xrt_space_overseer_set_reference_space_offset(xso, type, offset);
954}
955
956xrt_result_t
957ipc_handle_compositor_get_info(volatile struct ipc_client_state *ics, struct xrt_compositor_info *out_info)
958{
959 IPC_TRACE_MARKER();
960
961 if (ics->xc == NULL) {
962 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
963 }
964
965 *out_info = ics->xc->info;
966
967 return XRT_SUCCESS;
968}
969
970xrt_result_t
971ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics,
972 int64_t *out_frame_id,
973 int64_t *out_wake_up_time_ns,
974 int64_t *out_predicted_display_time_ns,
975 int64_t *out_predicted_display_period_ns)
976{
977 IPC_TRACE_MARKER();
978
979 if (ics->xc == NULL) {
980 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
981 }
982
983 /*
984 * We use this to signal that the session has started, this is needed
985 * to make this client/session active/visible/focused.
986 */
987 ipc_server_activate_session(ics);
988
989 int64_t gpu_time_ns = 0;
990 return xrt_comp_predict_frame( //
991 ics->xc, //
992 out_frame_id, //
993 out_wake_up_time_ns, //
994 &gpu_time_ns, //
995 out_predicted_display_time_ns, //
996 out_predicted_display_period_ns); //
997}
998
999xrt_result_t
1000ipc_handle_compositor_wait_woke(volatile struct ipc_client_state *ics, int64_t frame_id)
1001{
1002 IPC_TRACE_MARKER();
1003
1004 if (ics->xc == NULL) {
1005 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1006 }
1007
1008 return xrt_comp_mark_frame(ics->xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, os_monotonic_get_ns());
1009}
1010
1011xrt_result_t
1012ipc_handle_compositor_begin_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1013{
1014 IPC_TRACE_MARKER();
1015
1016 if (ics->xc == NULL) {
1017 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1018 }
1019
1020 return xrt_comp_begin_frame(ics->xc, frame_id);
1021}
1022
1023xrt_result_t
1024ipc_handle_compositor_discard_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1025{
1026 IPC_TRACE_MARKER();
1027
1028 if (ics->xc == NULL) {
1029 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1030 }
1031
1032 return xrt_comp_discard_frame(ics->xc, frame_id);
1033}
1034
1035xrt_result_t
1036ipc_handle_compositor_get_display_refresh_rate(volatile struct ipc_client_state *ics,
1037 float *out_display_refresh_rate_hz)
1038{
1039 IPC_TRACE_MARKER();
1040
1041 if (ics->xc == NULL) {
1042 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1043 }
1044
1045 return xrt_comp_get_display_refresh_rate(ics->xc, out_display_refresh_rate_hz);
1046}
1047
1048xrt_result_t
1049ipc_handle_compositor_request_display_refresh_rate(volatile struct ipc_client_state *ics, float display_refresh_rate_hz)
1050{
1051 IPC_TRACE_MARKER();
1052
1053 if (ics->xc == NULL) {
1054 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1055 }
1056
1057 return xrt_comp_request_display_refresh_rate(ics->xc, display_refresh_rate_hz);
1058}
1059
1060xrt_result_t
1061ipc_handle_compositor_set_performance_level(volatile struct ipc_client_state *ics,
1062 enum xrt_perf_domain domain,
1063 enum xrt_perf_set_level level)
1064{
1065 IPC_TRACE_MARKER();
1066
1067 if (ics->xc == NULL) {
1068 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
1069 }
1070
1071 if (ics->xc->set_performance_level == NULL) {
1072 return XRT_ERROR_IPC_FAILURE;
1073 }
1074
1075 return xrt_comp_set_performance_level(ics->xc, domain, level);
1076}
1077
1078static bool
1079_update_projection_layer(struct xrt_compositor *xc,
1080 volatile struct ipc_client_state *ics,
1081 volatile struct ipc_layer_entry *layer,
1082 uint32_t i)
1083{
1084 // xdev
1085 uint32_t device_id = layer->xdev_id;
1086 struct xrt_device *xdev = NULL;
1087 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1088
1089 if (xdev == NULL) {
1090 U_LOG_E("Invalid xdev for projection layer!");
1091 return false;
1092 }
1093
1094 uint32_t view_count = xdev->hmd->view_count;
1095
1096 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1097 for (uint32_t k = 0; k < view_count; k++) {
1098 const uint32_t xsci = layer->swapchain_ids[k];
1099 xcs[k] = ics->xscs[xsci];
1100 if (xcs[k] == NULL) {
1101 U_LOG_E("Invalid swap chain for projection layer!");
1102 return false;
1103 }
1104 }
1105
1106
1107 // Cast away volatile.
1108 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1109
1110 xrt_comp_layer_projection(xc, xdev, xcs, data);
1111
1112 return true;
1113}
1114
1115static bool
1116_update_projection_layer_depth(struct xrt_compositor *xc,
1117 volatile struct ipc_client_state *ics,
1118 volatile struct ipc_layer_entry *layer,
1119 uint32_t i)
1120{
1121 // xdev
1122 uint32_t xdevi = layer->xdev_id;
1123
1124 // Cast away volatile.
1125 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1126
1127 struct xrt_device *xdev = NULL;
1128 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1129 if (xdev == NULL) {
1130 U_LOG_E("Invalid xdev for projection layer #%u!", i);
1131 return false;
1132 }
1133
1134 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1135 struct xrt_swapchain *d_xcs[XRT_MAX_VIEWS];
1136
1137 for (uint32_t j = 0; j < data->view_count; j++) {
1138 int xsci = layer->swapchain_ids[j];
1139 int d_xsci = layer->swapchain_ids[j + data->view_count];
1140
1141 xcs[j] = ics->xscs[xsci];
1142 d_xcs[j] = ics->xscs[d_xsci];
1143 if (xcs[j] == NULL || d_xcs[j] == NULL) {
1144 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
1145 return false;
1146 }
1147 }
1148
1149 xrt_comp_layer_projection_depth(xc, xdev, xcs, d_xcs, data);
1150
1151 return true;
1152}
1153
1154static bool
1155do_single(struct xrt_compositor *xc,
1156 volatile struct ipc_client_state *ics,
1157 volatile struct ipc_layer_entry *layer,
1158 uint32_t i,
1159 const char *name,
1160 struct xrt_device **out_xdev,
1161 struct xrt_swapchain **out_xcs,
1162 struct xrt_layer_data **out_data)
1163{
1164 uint32_t device_id = layer->xdev_id;
1165 uint32_t sci = layer->swapchain_ids[0];
1166
1167 struct xrt_device *xdev = NULL;
1168 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1169 struct xrt_swapchain *xcs = ics->xscs[sci];
1170
1171 if (xcs == NULL) {
1172 U_LOG_E("Invalid swapchain for layer #%u, '%s'!", i, name);
1173 return false;
1174 }
1175
1176 if (xdev == NULL) {
1177 U_LOG_E("Invalid xdev for layer #%u, '%s'!", i, name);
1178 return false;
1179 }
1180
1181 // Cast away volatile.
1182 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1183
1184 *out_xdev = xdev;
1185 *out_xcs = xcs;
1186 *out_data = data;
1187
1188 return true;
1189}
1190
1191static bool
1192_update_quad_layer(struct xrt_compositor *xc,
1193 volatile struct ipc_client_state *ics,
1194 volatile struct ipc_layer_entry *layer,
1195 uint32_t i)
1196{
1197 struct xrt_device *xdev;
1198 struct xrt_swapchain *xcs;
1199 struct xrt_layer_data *data;
1200
1201 if (!do_single(xc, ics, layer, i, "quad", &xdev, &xcs, &data)) {
1202 return false;
1203 }
1204
1205 xrt_comp_layer_quad(xc, xdev, xcs, data);
1206
1207 return true;
1208}
1209
1210static bool
1211_update_cube_layer(struct xrt_compositor *xc,
1212 volatile struct ipc_client_state *ics,
1213 volatile struct ipc_layer_entry *layer,
1214 uint32_t i)
1215{
1216 struct xrt_device *xdev;
1217 struct xrt_swapchain *xcs;
1218 struct xrt_layer_data *data;
1219
1220 if (!do_single(xc, ics, layer, i, "cube", &xdev, &xcs, &data)) {
1221 return false;
1222 }
1223
1224 xrt_comp_layer_cube(xc, xdev, xcs, data);
1225
1226 return true;
1227}
1228
1229static bool
1230_update_cylinder_layer(struct xrt_compositor *xc,
1231 volatile struct ipc_client_state *ics,
1232 volatile struct ipc_layer_entry *layer,
1233 uint32_t i)
1234{
1235 struct xrt_device *xdev;
1236 struct xrt_swapchain *xcs;
1237 struct xrt_layer_data *data;
1238
1239 if (!do_single(xc, ics, layer, i, "cylinder", &xdev, &xcs, &data)) {
1240 return false;
1241 }
1242
1243 xrt_comp_layer_cylinder(xc, xdev, xcs, data);
1244
1245 return true;
1246}
1247
1248static bool
1249_update_equirect1_layer(struct xrt_compositor *xc,
1250 volatile struct ipc_client_state *ics,
1251 volatile struct ipc_layer_entry *layer,
1252 uint32_t i)
1253{
1254 struct xrt_device *xdev;
1255 struct xrt_swapchain *xcs;
1256 struct xrt_layer_data *data;
1257
1258 if (!do_single(xc, ics, layer, i, "equirect1", &xdev, &xcs, &data)) {
1259 return false;
1260 }
1261
1262 xrt_comp_layer_equirect1(xc, xdev, xcs, data);
1263
1264 return true;
1265}
1266
1267static bool
1268_update_equirect2_layer(struct xrt_compositor *xc,
1269 volatile struct ipc_client_state *ics,
1270 volatile struct ipc_layer_entry *layer,
1271 uint32_t i)
1272{
1273 struct xrt_device *xdev;
1274 struct xrt_swapchain *xcs;
1275 struct xrt_layer_data *data;
1276
1277 if (!do_single(xc, ics, layer, i, "equirect2", &xdev, &xcs, &data)) {
1278 return false;
1279 }
1280
1281 xrt_comp_layer_equirect2(xc, xdev, xcs, data);
1282
1283 return true;
1284}
1285
1286static bool
1287_update_passthrough_layer(struct xrt_compositor *xc,
1288 volatile struct ipc_client_state *ics,
1289 volatile struct ipc_layer_entry *layer,
1290 uint32_t i)
1291{
1292 // xdev
1293 uint32_t xdevi = layer->xdev_id;
1294
1295 struct xrt_device *xdev = NULL;
1296 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1297
1298 if (xdev == NULL) {
1299 U_LOG_E("Invalid xdev for passthrough layer #%u!", i);
1300 return false;
1301 }
1302
1303 // Cast away volatile.
1304 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1305
1306 xrt_comp_layer_passthrough(xc, xdev, data);
1307
1308 return true;
1309}
1310
1311static bool
1312_update_layers(volatile struct ipc_client_state *ics, struct xrt_compositor *xc, struct ipc_layer_slot *slot)
1313{
1314 IPC_TRACE_MARKER();
1315
1316 for (uint32_t i = 0; i < slot->layer_count; i++) {
1317 volatile struct ipc_layer_entry *layer = &slot->layers[i];
1318
1319 switch (layer->data.type) {
1320 case XRT_LAYER_PROJECTION:
1321 if (!_update_projection_layer(xc, ics, layer, i)) {
1322 return false;
1323 }
1324 break;
1325 case XRT_LAYER_PROJECTION_DEPTH:
1326 if (!_update_projection_layer_depth(xc, ics, layer, i)) {
1327 return false;
1328 }
1329 break;
1330 case XRT_LAYER_QUAD:
1331 if (!_update_quad_layer(xc, ics, layer, i)) {
1332 return false;
1333 }
1334 break;
1335 case XRT_LAYER_CUBE:
1336 if (!_update_cube_layer(xc, ics, layer, i)) {
1337 return false;
1338 }
1339 break;
1340 case XRT_LAYER_CYLINDER:
1341 if (!_update_cylinder_layer(xc, ics, layer, i)) {
1342 return false;
1343 }
1344 break;
1345 case XRT_LAYER_EQUIRECT1:
1346 if (!_update_equirect1_layer(xc, ics, layer, i)) {
1347 return false;
1348 }
1349 break;
1350 case XRT_LAYER_EQUIRECT2:
1351 if (!_update_equirect2_layer(xc, ics, layer, i)) {
1352 return false;
1353 }
1354 break;
1355 case XRT_LAYER_PASSTHROUGH:
1356 if (!_update_passthrough_layer(xc, ics, layer, i)) {
1357 return false;
1358 }
1359 break;
1360 default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
1361 }
1362 }
1363
1364 return true;
1365}
1366
1367xrt_result_t
1368ipc_handle_compositor_layer_sync(volatile struct ipc_client_state *ics,
1369 uint32_t slot_id,
1370 uint32_t *out_free_slot_id,
1371 const xrt_graphics_sync_handle_t *handles,
1372 const uint32_t handle_count)
1373{
1374 IPC_TRACE_MARKER();
1375
1376 if (ics->xc == NULL) {
1377 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1378 }
1379
1380 struct ipc_shared_memory *ism = get_ism(ics);
1381 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1382 xrt_graphics_sync_handle_t sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1383
1384 // If we have one or more save the first handle.
1385 if (handle_count >= 1) {
1386 sync_handle = handles[0];
1387 }
1388
1389 // Free all sync handles after the first one.
1390 for (uint32_t i = 1; i < handle_count; i++) {
1391 // Checks for valid handle.
1392 xrt_graphics_sync_handle_t tmp = handles[i];
1393 u_graphics_sync_unref(&tmp);
1394 }
1395
1396 // Copy current slot data.
1397 struct ipc_layer_slot copy = *slot;
1398
1399
1400 /*
1401 * Transfer data to underlying compositor.
1402 */
1403
1404 xrt_comp_layer_begin(ics->xc, ©.data);
1405
1406 _update_layers(ics, ics->xc, ©);
1407
1408 xrt_comp_layer_commit(ics->xc, sync_handle);
1409
1410
1411 /*
1412 * Manage shared state.
1413 */
1414
1415 os_mutex_lock(&ics->server->global_state.lock);
1416
1417 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1418 ics->server->current_slot_index = *out_free_slot_id;
1419
1420 os_mutex_unlock(&ics->server->global_state.lock);
1421
1422 return XRT_SUCCESS;
1423}
1424
1425xrt_result_t
1426ipc_handle_compositor_layer_sync_with_semaphore(volatile struct ipc_client_state *ics,
1427 uint32_t slot_id,
1428 uint32_t semaphore_id,
1429 uint64_t semaphore_value,
1430 uint32_t *out_free_slot_id)
1431{
1432 IPC_TRACE_MARKER();
1433
1434 if (ics->xc == NULL) {
1435 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1436 }
1437 if (semaphore_id >= IPC_MAX_CLIENT_SEMAPHORES) {
1438 IPC_ERROR(ics->server, "Invalid semaphore_id");
1439 return XRT_ERROR_IPC_FAILURE;
1440 }
1441 if (ics->xcsems[semaphore_id] == NULL) {
1442 IPC_ERROR(ics->server, "Semaphore of id %u not created!", semaphore_id);
1443 return XRT_ERROR_IPC_FAILURE;
1444 }
1445
1446 struct xrt_compositor_semaphore *xcsem = ics->xcsems[semaphore_id];
1447
1448 struct ipc_shared_memory *ism = get_ism(ics);
1449 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1450
1451 // Copy current slot data.
1452 struct ipc_layer_slot copy = *slot;
1453
1454
1455
1456 /*
1457 * Transfer data to underlying compositor.
1458 */
1459
1460 xrt_comp_layer_begin(ics->xc, ©.data);
1461
1462 _update_layers(ics, ics->xc, ©);
1463
1464 xrt_comp_layer_commit_with_semaphore(ics->xc, xcsem, semaphore_value);
1465
1466
1467 /*
1468 * Manage shared state.
1469 */
1470
1471 os_mutex_lock(&ics->server->global_state.lock);
1472
1473 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1474 ics->server->current_slot_index = *out_free_slot_id;
1475
1476 os_mutex_unlock(&ics->server->global_state.lock);
1477
1478 return XRT_SUCCESS;
1479}
1480
1481xrt_result_t
1482ipc_handle_compositor_create_passthrough(volatile struct ipc_client_state *ics,
1483 const struct xrt_passthrough_create_info *info)
1484{
1485 IPC_TRACE_MARKER();
1486
1487 if (ics->xc == NULL) {
1488 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1489 }
1490
1491 return xrt_comp_create_passthrough(ics->xc, info);
1492}
1493
1494xrt_result_t
1495ipc_handle_compositor_create_passthrough_layer(volatile struct ipc_client_state *ics,
1496 const struct xrt_passthrough_layer_create_info *info)
1497{
1498 IPC_TRACE_MARKER();
1499
1500 if (ics->xc == NULL) {
1501 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1502 }
1503
1504 return xrt_comp_create_passthrough_layer(ics->xc, info);
1505}
1506
1507xrt_result_t
1508ipc_handle_compositor_destroy_passthrough(volatile struct ipc_client_state *ics)
1509{
1510 IPC_TRACE_MARKER();
1511
1512 if (ics->xc == NULL) {
1513 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1514 }
1515
1516 xrt_comp_destroy_passthrough(ics->xc);
1517
1518 return XRT_SUCCESS;
1519}
1520
1521xrt_result_t
1522ipc_handle_compositor_set_thread_hint(volatile struct ipc_client_state *ics,
1523 enum xrt_thread_hint hint,
1524 uint32_t thread_id)
1525
1526{
1527 IPC_TRACE_MARKER();
1528
1529 if (ics->xc == NULL) {
1530 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1531 }
1532
1533 return xrt_comp_set_thread_hint(ics->xc, hint, thread_id);
1534}
1535
1536xrt_result_t
1537ipc_handle_compositor_get_reference_bounds_rect(volatile struct ipc_client_state *ics,
1538 enum xrt_reference_space_type reference_space_type,
1539 struct xrt_vec2 *bounds)
1540{
1541 IPC_TRACE_MARKER();
1542
1543 if (ics->xc == NULL) {
1544 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1545 }
1546
1547 return xrt_comp_get_reference_bounds_rect(ics->xc, reference_space_type, bounds);
1548}
1549
1550xrt_result_t
1551ipc_handle_system_get_clients(volatile struct ipc_client_state *_ics, struct ipc_client_list *list)
1552{
1553 struct ipc_server *s = _ics->server;
1554
1555 // Look client list.
1556 os_mutex_lock(&s->global_state.lock);
1557
1558 uint32_t count = 0;
1559 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
1560
1561 volatile struct ipc_client_state *ics = &s->threads[i].ics;
1562
1563 // Is this thread running?
1564 if (ics->server_thread_index < 0) {
1565 continue;
1566 }
1567
1568 list->ids[count++] = ics->client_state.id;
1569 }
1570
1571 list->id_count = count;
1572
1573 // Unlock now.
1574 os_mutex_unlock(&s->global_state.lock);
1575
1576 return XRT_SUCCESS;
1577}
1578
1579xrt_result_t
1580ipc_handle_system_get_properties(volatile struct ipc_client_state *_ics, struct xrt_system_properties *out_properties)
1581{
1582 struct ipc_server *s = _ics->server;
1583
1584 return ipc_server_get_system_properties(s, out_properties);
1585}
1586
1587xrt_result_t
1588ipc_handle_system_get_client_info(volatile struct ipc_client_state *_ics,
1589 uint32_t client_id,
1590 struct ipc_app_state *out_ias)
1591{
1592 struct ipc_server *s = _ics->server;
1593
1594 return ipc_server_get_client_app_state(s, client_id, out_ias);
1595}
1596
1597xrt_result_t
1598ipc_handle_system_set_primary_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1599{
1600 struct ipc_server *s = _ics->server;
1601
1602 IPC_INFO(s, "System setting active client to %d.", client_id);
1603
1604 return ipc_server_set_active_client(s, client_id);
1605}
1606
1607xrt_result_t
1608ipc_handle_system_set_focused_client(volatile struct ipc_client_state *ics, uint32_t client_id)
1609{
1610 IPC_INFO(ics->server, "UNIMPLEMENTED: system setting focused client to %d.", client_id);
1611
1612 return XRT_SUCCESS;
1613}
1614
1615xrt_result_t
1616ipc_handle_system_toggle_io_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1617{
1618 struct ipc_server *s = _ics->server;
1619
1620 IPC_INFO(s, "System toggling io for client %u.", client_id);
1621
1622 return ipc_server_toggle_io_client(s, client_id);
1623}
1624
1625xrt_result_t
1626ipc_handle_system_toggle_io_device(volatile struct ipc_client_state *ics, uint32_t device_id)
1627{
1628 if (device_id >= IPC_MAX_DEVICES) {
1629 return XRT_ERROR_IPC_FAILURE;
1630 }
1631
1632 struct ipc_device *idev = &ics->server->idevs[device_id];
1633
1634 idev->io_active = !idev->io_active;
1635
1636 return XRT_SUCCESS;
1637}
1638
1639xrt_result_t
1640ipc_handle_swapchain_get_properties(volatile struct ipc_client_state *ics,
1641 const struct xrt_swapchain_create_info *info,
1642 struct xrt_swapchain_create_properties *xsccp)
1643{
1644 IPC_TRACE_MARKER();
1645
1646 if (ics->xc == NULL) {
1647 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1648 }
1649
1650 return xrt_comp_get_swapchain_create_properties(ics->xc, info, xsccp);
1651}
1652
1653xrt_result_t
1654ipc_handle_swapchain_create(volatile struct ipc_client_state *ics,
1655 const struct xrt_swapchain_create_info *info,
1656 uint32_t *out_id,
1657 uint32_t *out_image_count,
1658 uint64_t *out_size,
1659 bool *out_use_dedicated_allocation,
1660 uint32_t max_handle_capacity,
1661 xrt_graphics_buffer_handle_t *out_handles,
1662 uint32_t *out_handle_count)
1663{
1664 IPC_TRACE_MARKER();
1665
1666 xrt_result_t xret = XRT_SUCCESS;
1667 uint32_t index = 0;
1668
1669 xret = validate_swapchain_state(ics, &index);
1670 if (xret != XRT_SUCCESS) {
1671 return xret;
1672 }
1673
1674 // Create the swapchain
1675 struct xrt_swapchain *xsc = NULL; // Has to be NULL.
1676 xret = xrt_comp_create_swapchain(ics->xc, info, &xsc);
1677 if (xret != XRT_SUCCESS) {
1678 if (xret == XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED) {
1679 IPC_WARN(ics->server,
1680 "xrt_comp_create_swapchain: Attempted to create valid, but unsupported swapchain");
1681 } else {
1682 IPC_ERROR(ics->server, "Error xrt_comp_create_swapchain failed!");
1683 }
1684 return xret;
1685 }
1686
1687 // It's now safe to increment the number of swapchains.
1688 ics->swapchain_count++;
1689
1690 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1691
1692 set_swapchain_info(ics, index, info, xsc);
1693
1694 // return our result to the caller.
1695 struct xrt_swapchain_native *xscn = (struct xrt_swapchain_native *)xsc;
1696
1697 // Limit checking
1698 assert(xsc->image_count <= XRT_MAX_SWAPCHAIN_IMAGES);
1699 assert(xsc->image_count <= max_handle_capacity);
1700
1701 for (size_t i = 1; i < xsc->image_count; i++) {
1702 assert(xscn->images[0].size == xscn->images[i].size);
1703 assert(xscn->images[0].use_dedicated_allocation == xscn->images[i].use_dedicated_allocation);
1704 }
1705
1706 // Assuming all images allocated in the same swapchain have the same allocation requirements.
1707 *out_size = xscn->images[0].size;
1708 *out_use_dedicated_allocation = xscn->images[0].use_dedicated_allocation;
1709 *out_id = index;
1710 *out_image_count = xsc->image_count;
1711
1712 // Setup the fds.
1713 *out_handle_count = xsc->image_count;
1714 for (size_t i = 0; i < xsc->image_count; i++) {
1715 out_handles[i] = xscn->images[i].handle;
1716 }
1717
1718 return XRT_SUCCESS;
1719}
1720
1721xrt_result_t
1722ipc_handle_swapchain_import(volatile struct ipc_client_state *ics,
1723 const struct xrt_swapchain_create_info *info,
1724 const struct ipc_arg_swapchain_from_native *args,
1725 uint32_t *out_id,
1726 const xrt_graphics_buffer_handle_t *handles,
1727 uint32_t handle_count)
1728{
1729 IPC_TRACE_MARKER();
1730
1731 xrt_result_t xret = XRT_SUCCESS;
1732 uint32_t index = 0;
1733
1734 xret = validate_swapchain_state(ics, &index);
1735 if (xret != XRT_SUCCESS) {
1736 return xret;
1737 }
1738
1739 struct xrt_image_native xins[XRT_MAX_SWAPCHAIN_IMAGES] = XRT_STRUCT_INIT;
1740 for (uint32_t i = 0; i < handle_count; i++) {
1741 xins[i].handle = handles[i];
1742 xins[i].size = args->sizes[i];
1743#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
1744 // DXGI handles need to be dealt with differently, they are identified
1745 // by having their lower bit set to 1 during transfer
1746 if ((size_t)xins[i].handle & 1) {
1747 xins[i].handle = (HANDLE)((size_t)xins[i].handle - 1);
1748 xins[i].is_dxgi_handle = true;
1749 }
1750#endif
1751 }
1752
1753 // create the swapchain
1754 struct xrt_swapchain *xsc = NULL;
1755 xret = xrt_comp_import_swapchain(ics->xc, info, xins, handle_count, &xsc);
1756 if (xret != XRT_SUCCESS) {
1757 return xret;
1758 }
1759
1760 // It's now safe to increment the number of swapchains.
1761 ics->swapchain_count++;
1762
1763 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1764
1765 set_swapchain_info(ics, index, info, xsc);
1766 *out_id = index;
1767
1768 return XRT_SUCCESS;
1769}
1770
1771xrt_result_t
1772ipc_handle_swapchain_wait_image(volatile struct ipc_client_state *ics, uint32_t id, int64_t timeout_ns, uint32_t index)
1773{
1774 if (ics->xc == NULL) {
1775 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1776 }
1777
1778 //! @todo Look up the index.
1779 uint32_t sc_index = id;
1780 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1781
1782 return xrt_swapchain_wait_image(xsc, timeout_ns, index);
1783}
1784
1785xrt_result_t
1786ipc_handle_swapchain_acquire_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t *out_index)
1787{
1788 if (ics->xc == NULL) {
1789 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1790 }
1791
1792 //! @todo Look up the index.
1793 uint32_t sc_index = id;
1794 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1795
1796 xrt_swapchain_acquire_image(xsc, out_index);
1797
1798 return XRT_SUCCESS;
1799}
1800
1801xrt_result_t
1802ipc_handle_swapchain_release_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t index)
1803{
1804 if (ics->xc == NULL) {
1805 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1806 }
1807
1808 //! @todo Look up the index.
1809 uint32_t sc_index = id;
1810 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1811
1812 xrt_swapchain_release_image(xsc, index);
1813
1814 return XRT_SUCCESS;
1815}
1816
1817xrt_result_t
1818ipc_handle_swapchain_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1819{
1820 if (ics->xc == NULL) {
1821 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1822 }
1823
1824 ics->swapchain_count--;
1825
1826 // Drop our reference, does NULL checking. Cast away volatile.
1827 xrt_swapchain_reference((struct xrt_swapchain **)&ics->xscs[id], NULL);
1828 ics->swapchain_data[id].active = false;
1829
1830 return XRT_SUCCESS;
1831}
1832
1833
1834/*
1835 *
1836 * Compositor semaphore function..
1837 *
1838 */
1839
1840xrt_result_t
1841ipc_handle_compositor_semaphore_create(volatile struct ipc_client_state *ics,
1842 uint32_t *out_id,
1843 uint32_t max_handle_count,
1844 xrt_graphics_sync_handle_t *out_handles,
1845 uint32_t *out_handle_count)
1846{
1847 xrt_result_t xret;
1848
1849 if (ics->xc == NULL) {
1850 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1851 }
1852
1853 int id = 0;
1854 for (; id < IPC_MAX_CLIENT_SEMAPHORES; id++) {
1855 if (ics->xcsems[id] == NULL) {
1856 break;
1857 }
1858 }
1859
1860 if (id == IPC_MAX_CLIENT_SEMAPHORES) {
1861 IPC_ERROR(ics->server, "Too many compositor semaphores alive!");
1862 return XRT_ERROR_IPC_FAILURE;
1863 }
1864
1865 struct xrt_compositor_semaphore *xcsem = NULL;
1866 xrt_graphics_sync_handle_t handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1867
1868 xret = xrt_comp_create_semaphore(ics->xc, &handle, &xcsem);
1869 if (xret != XRT_SUCCESS) {
1870 IPC_ERROR(ics->server, "Failed to create compositor semaphore!");
1871 return xret;
1872 }
1873
1874 // Set it directly, no need to use reference here.
1875 ics->xcsems[id] = xcsem;
1876
1877 // Set out parameters.
1878 *out_id = id;
1879 out_handles[0] = handle;
1880 *out_handle_count = 1;
1881
1882 return XRT_SUCCESS;
1883}
1884
1885xrt_result_t
1886ipc_handle_compositor_semaphore_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1887{
1888 if (ics->xc == NULL) {
1889 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1890 }
1891
1892 if (ics->xcsems[id] == NULL) {
1893 IPC_ERROR(ics->server, "Client tried to delete non-existent compositor semaphore!");
1894 return XRT_ERROR_IPC_FAILURE;
1895 }
1896
1897 ics->compositor_semaphore_count--;
1898
1899 // Drop our reference, does NULL checking. Cast away volatile.
1900 xrt_compositor_semaphore_reference((struct xrt_compositor_semaphore **)&ics->xcsems[id], NULL);
1901
1902 return XRT_SUCCESS;
1903}
1904
1905
1906/*
1907 *
1908 * Device functions.
1909 *
1910 */
1911
1912xrt_result_t
1913ipc_handle_device_update_input(volatile struct ipc_client_state *ics, uint32_t id)
1914{
1915 // To make the code a bit more readable.
1916 uint32_t device_id = id;
1917 struct ipc_shared_memory *ism = get_ism(ics);
1918 struct ipc_device *idev = get_idev(ics, device_id);
1919 struct xrt_device *xdev = idev->xdev;
1920 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1921
1922 // Update inputs.
1923 xrt_result_t xret = xrt_device_update_inputs(xdev);
1924 if (xret != XRT_SUCCESS) {
1925 IPC_ERROR(ics->server, "Failed to update input");
1926 return xret;
1927 }
1928
1929 // Copy data into the shared memory.
1930 struct xrt_input *src = xdev->inputs;
1931 struct xrt_input *dst = &ism->inputs[isdev->first_input_index];
1932 size_t size = sizeof(struct xrt_input) * isdev->input_count;
1933
1934 bool io_active = ics->io_active && idev->io_active;
1935 if (io_active) {
1936 memcpy(dst, src, size);
1937 } else {
1938 memset(dst, 0, size);
1939
1940 for (uint32_t i = 0; i < isdev->input_count; i++) {
1941 dst[i].name = src[i].name;
1942
1943 // Special case the rotation of the head.
1944 if (dst[i].name == XRT_INPUT_GENERIC_HEAD_POSE) {
1945 dst[i].active = src[i].active;
1946 }
1947 }
1948 }
1949
1950 // Reply.
1951 return XRT_SUCCESS;
1952}
1953
1954static struct xrt_input *
1955find_input(volatile struct ipc_client_state *ics, uint32_t device_id, enum xrt_input_name name)
1956{
1957 struct ipc_shared_memory *ism = get_ism(ics);
1958 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1959 struct xrt_input *io = &ism->inputs[isdev->first_input_index];
1960
1961 for (uint32_t i = 0; i < isdev->input_count; i++) {
1962 if (io[i].name == name) {
1963 return &io[i];
1964 }
1965 }
1966
1967 return NULL;
1968}
1969
1970xrt_result_t
1971ipc_handle_device_get_tracked_pose(volatile struct ipc_client_state *ics,
1972 uint32_t id,
1973 enum xrt_input_name name,
1974 int64_t at_timestamp,
1975 struct xrt_space_relation *out_relation)
1976{
1977 // To make the code a bit more readable.
1978 uint32_t device_id = id;
1979 struct ipc_device *isdev = &ics->server->idevs[device_id];
1980 struct xrt_device *xdev = isdev->xdev;
1981
1982 // Find the input
1983 struct xrt_input *input = find_input(ics, device_id, name);
1984 if (input == NULL) {
1985 return XRT_ERROR_IPC_FAILURE;
1986 }
1987
1988 // Special case the headpose.
1989 bool disabled = (!isdev->io_active || !ics->io_active) && name != XRT_INPUT_GENERIC_HEAD_POSE;
1990 bool active_on_client = input->active;
1991
1992 // We have been disabled but the client hasn't called update.
1993 if (disabled && active_on_client) {
1994 U_ZERO(out_relation);
1995 return XRT_SUCCESS;
1996 }
1997
1998 if (disabled || !active_on_client) {
1999 return XRT_ERROR_POSE_NOT_ACTIVE;
2000 }
2001
2002 // Get the pose.
2003 return xrt_device_get_tracked_pose(xdev, name, at_timestamp, out_relation);
2004}
2005
2006xrt_result_t
2007ipc_handle_device_get_hand_tracking(volatile struct ipc_client_state *ics,
2008 uint32_t id,
2009 enum xrt_input_name name,
2010 int64_t at_timestamp,
2011 struct xrt_hand_joint_set *out_value,
2012 int64_t *out_timestamp)
2013{
2014
2015 // To make the code a bit more readable.
2016 uint32_t device_id = id;
2017 struct xrt_device *xdev = NULL;
2018 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2019
2020 // Get the pose.
2021 return xrt_device_get_hand_tracking(xdev, name, at_timestamp, out_value, out_timestamp);
2022}
2023
2024xrt_result_t
2025ipc_handle_device_get_view_poses(volatile struct ipc_client_state *ics,
2026 uint32_t id,
2027 const struct xrt_vec3 *fallback_eye_relation,
2028 int64_t at_timestamp_ns,
2029 uint32_t view_count)
2030{
2031 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2032 struct ipc_device_get_view_poses_reply reply = XRT_STRUCT_INIT;
2033 struct ipc_server *s = ics->server;
2034 xrt_result_t xret;
2035
2036 // To make the code a bit more readable.
2037 uint32_t device_id = id;
2038 struct xrt_device *xdev = NULL;
2039 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2040
2041
2042 if (view_count == 0 || view_count > IPC_MAX_RAW_VIEWS) {
2043 IPC_ERROR(s, "Client asked for zero or too many views! (%u)", view_count);
2044
2045 reply.result = XRT_ERROR_IPC_FAILURE;
2046 // Send the full reply, the client expects it.
2047 return ipc_send(imc, &reply, sizeof(reply));
2048 }
2049
2050 // Data to get.
2051 struct xrt_fov fovs[IPC_MAX_RAW_VIEWS];
2052 struct xrt_pose poses[IPC_MAX_RAW_VIEWS];
2053
2054 reply.result = xrt_device_get_view_poses( //
2055 xdev, //
2056 fallback_eye_relation, //
2057 at_timestamp_ns, //
2058 view_count, //
2059 &reply.head_relation, //
2060 fovs, //
2061 poses); //
2062
2063 /*
2064 * This isn't really needed, but demonstrates the server sending the
2065 * length back in the reply, a common pattern for other functions.
2066 */
2067 reply.view_count = view_count;
2068
2069 /*
2070 * Send the reply first isn't required for functions in general, but it
2071 * will need to match what the client expects. This demonstrates the
2072 * server sending the length back in the reply, a common pattern for
2073 * other functions.
2074 */
2075 xret = ipc_send(imc, &reply, sizeof(reply));
2076 if (xret != XRT_SUCCESS) {
2077 IPC_ERROR(s, "Failed to send reply!");
2078 return xret;
2079 }
2080
2081 // Send the fovs that we got.
2082 xret = ipc_send(imc, fovs, sizeof(struct xrt_fov) * view_count);
2083 if (xret != XRT_SUCCESS) {
2084 IPC_ERROR(s, "Failed to send fovs!");
2085 return xret;
2086 }
2087
2088 // And finally the poses.
2089 xret = ipc_send(imc, poses, sizeof(struct xrt_pose) * view_count);
2090 if (xret != XRT_SUCCESS) {
2091 IPC_ERROR(s, "Failed to send poses!");
2092 return xret;
2093 }
2094
2095 return XRT_SUCCESS;
2096}
2097
2098xrt_result_t
2099ipc_handle_device_get_view_poses_2(volatile struct ipc_client_state *ics,
2100 uint32_t id,
2101 const struct xrt_vec3 *default_eye_relation,
2102 int64_t at_timestamp_ns,
2103 uint32_t view_count,
2104 struct ipc_info_get_view_poses_2 *out_info)
2105{
2106 // To make the code a bit more readable.
2107 uint32_t device_id = id;
2108 struct xrt_device *xdev = NULL;
2109 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2110 return xrt_device_get_view_poses( //
2111 xdev, //
2112 default_eye_relation, //
2113 at_timestamp_ns, //
2114 view_count, //
2115 &out_info->head_relation, //
2116 out_info->fovs, //
2117 out_info->poses); //
2118}
2119
2120xrt_result_t
2121ipc_handle_device_compute_distortion(volatile struct ipc_client_state *ics,
2122 uint32_t id,
2123 uint32_t view,
2124 float u,
2125 float v,
2126 struct xrt_uv_triplet *out_triplet)
2127{
2128 // To make the code a bit more readable.
2129 uint32_t device_id = id;
2130 struct xrt_device *xdev = NULL;
2131 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2132
2133 return xrt_device_compute_distortion(xdev, view, u, v, out_triplet);
2134}
2135
2136xrt_result_t
2137ipc_handle_device_begin_plane_detection_ext(volatile struct ipc_client_state *ics,
2138 uint32_t id,
2139 uint64_t plane_detection_id,
2140 uint64_t *out_plane_detection_id)
2141{
2142 // To make the code a bit more readable.
2143 uint32_t device_id = id;
2144 struct xrt_device *xdev = NULL;
2145 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2146
2147 uint64_t new_count = ics->plane_detection_count + 1;
2148
2149 if (new_count > ics->plane_detection_size) {
2150 IPC_TRACE(ics->server, "Plane detections tracking size: %u -> %u", (uint32_t)ics->plane_detection_count,
2151 (uint32_t)new_count);
2152
2153 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_ids, uint64_t, new_count);
2154 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_xdev, struct xrt_device *, new_count);
2155 ics->plane_detection_size = new_count;
2156 }
2157
2158 struct xrt_plane_detector_begin_info_ext *begin_info = &get_ism(ics)->plane_begin_info_ext;
2159
2160 enum xrt_result xret =
2161 xrt_device_begin_plane_detection_ext(xdev, begin_info, plane_detection_id, out_plane_detection_id);
2162 if (xret != XRT_SUCCESS) {
2163 IPC_TRACE(ics->server, "xrt_device_begin_plane_detection_ext error: %d", xret);
2164 return xret;
2165 }
2166
2167 if (*out_plane_detection_id != 0) {
2168 uint64_t index = ics->plane_detection_count;
2169 ics->plane_detection_ids[index] = *out_plane_detection_id;
2170 ics->plane_detection_xdev[index] = xdev;
2171 ics->plane_detection_count = new_count;
2172 }
2173
2174 return XRT_SUCCESS;
2175}
2176
2177xrt_result_t
2178ipc_handle_device_destroy_plane_detection_ext(volatile struct ipc_client_state *ics,
2179 uint32_t id,
2180 uint64_t plane_detection_id)
2181{
2182 // To make the code a bit more readable.
2183 uint32_t device_id = id;
2184 struct xrt_device *xdev = NULL;
2185 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2186
2187 enum xrt_result xret = xrt_device_destroy_plane_detection_ext(xdev, plane_detection_id);
2188
2189 // Iterate through plane detection ids. Once found, move every item one slot to the left.
2190 bool compact_right = false;
2191 for (uint32_t i = 0; i < ics->plane_detection_count; i++) {
2192 if (ics->plane_detection_ids[i] == plane_detection_id) {
2193 compact_right = true;
2194 }
2195 if (compact_right && (i + 1) < ics->plane_detection_count) {
2196 ics->plane_detection_ids[i] = ics->plane_detection_ids[i + 1];
2197 ics->plane_detection_xdev[i] = ics->plane_detection_xdev[i + 1];
2198 }
2199 }
2200 // if the plane detection was correctly tracked compact_right should always be true
2201 if (compact_right) {
2202 ics->plane_detection_count -= 1;
2203 } else {
2204 IPC_ERROR(ics->server, "Destroyed plane detection that was not tracked");
2205 }
2206
2207 if (xret != XRT_SUCCESS) {
2208 IPC_ERROR(ics->server, "xrt_device_destroy_plane_detection_ext error: %d", xret);
2209 return xret;
2210 }
2211
2212 return XRT_SUCCESS;
2213}
2214
2215xrt_result_t
2216ipc_handle_device_get_plane_detection_state_ext(volatile struct ipc_client_state *ics,
2217 uint32_t id,
2218 uint64_t plane_detection_id,
2219 enum xrt_plane_detector_state_ext *out_state)
2220{
2221 // To make the code a bit more readable.
2222 uint32_t device_id = id;
2223 struct xrt_device *xdev = NULL;
2224 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2225
2226 xrt_result_t xret = xrt_device_get_plane_detection_state_ext(xdev, plane_detection_id, out_state);
2227 if (xret != XRT_SUCCESS) {
2228 IPC_ERROR(ics->server, "xrt_device_get_plane_detection_state_ext error: %d", xret);
2229 return xret;
2230 }
2231
2232 return XRT_SUCCESS;
2233}
2234
2235xrt_result_t
2236ipc_handle_device_get_plane_detections_ext(volatile struct ipc_client_state *ics,
2237 uint32_t id,
2238 uint64_t plane_detection_id)
2239
2240{
2241 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2242 struct ipc_device_get_plane_detections_ext_reply reply = XRT_STRUCT_INIT;
2243 struct ipc_server *s = ics->server;
2244
2245 // To make the code a bit more readable.
2246 uint32_t device_id = id;
2247 struct xrt_device *xdev = NULL;
2248 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2249
2250 struct xrt_plane_detections_ext out = {0};
2251
2252 xrt_result_t xret = xrt_device_get_plane_detections_ext(xdev, plane_detection_id, &out);
2253 if (xret != XRT_SUCCESS) {
2254 IPC_ERROR(ics->server, "xrt_device_get_plane_detections_ext error: %d", xret);
2255 // probably nothing allocated on error, but make sure
2256 xrt_plane_detections_ext_clear(&out);
2257 return xret;
2258 }
2259
2260 reply.result = XRT_SUCCESS;
2261 reply.location_size = out.location_count; // because we initialized to 0, now size == count
2262 reply.polygon_size = out.polygon_info_size;
2263 reply.vertex_size = out.vertex_size;
2264
2265 xret = ipc_send(imc, &reply, sizeof(reply));
2266 if (xret != XRT_SUCCESS) {
2267 IPC_ERROR(s, "Failed to send reply!");
2268 goto out;
2269 }
2270
2271 // send expected contents
2272
2273 if (out.location_count > 0) {
2274 xret =
2275 ipc_send(imc, out.locations, sizeof(struct xrt_plane_detector_location_ext) * out.location_count);
2276 if (xret != XRT_SUCCESS) {
2277 IPC_ERROR(s, "Failed to send locations!");
2278 goto out;
2279 }
2280
2281 xret = ipc_send(imc, out.polygon_info_start_index, sizeof(uint32_t) * out.location_count);
2282 if (xret != XRT_SUCCESS) {
2283 IPC_ERROR(s, "Failed to send locations!");
2284 goto out;
2285 }
2286 }
2287
2288 if (out.polygon_info_size > 0) {
2289 xret =
2290 ipc_send(imc, out.polygon_infos, sizeof(struct xrt_plane_polygon_info_ext) * out.polygon_info_size);
2291 if (xret != XRT_SUCCESS) {
2292 IPC_ERROR(s, "Failed to send polygon_infos!");
2293 goto out;
2294 }
2295 }
2296
2297 if (out.vertex_size > 0) {
2298 xret = ipc_send(imc, out.vertices, sizeof(struct xrt_vec2) * out.vertex_size);
2299 if (xret != XRT_SUCCESS) {
2300 IPC_ERROR(s, "Failed to send vertices!");
2301 goto out;
2302 }
2303 }
2304
2305out:
2306 xrt_plane_detections_ext_clear(&out);
2307 return xret;
2308}
2309
2310xrt_result_t
2311ipc_handle_device_get_presence(volatile struct ipc_client_state *ics, uint32_t id, bool *presence)
2312{
2313 struct xrt_device *xdev = NULL;
2314 GET_XDEV_OR_RETURN(ics, id, xdev);
2315 return xrt_device_get_presence(xdev, presence);
2316}
2317
2318xrt_result_t
2319ipc_handle_device_set_output(volatile struct ipc_client_state *ics,
2320 uint32_t id,
2321 enum xrt_output_name name,
2322 const struct xrt_output_value *value)
2323{
2324 // To make the code a bit more readable.
2325 uint32_t device_id = id;
2326 struct xrt_device *xdev = NULL;
2327 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2328
2329 // Set the output.
2330 return xrt_device_set_output(xdev, name, value);
2331}
2332
2333xrt_result_t
2334ipc_handle_device_set_haptic_output(volatile struct ipc_client_state *ics,
2335 uint32_t id,
2336 enum xrt_output_name name,
2337 const struct ipc_pcm_haptic_buffer *buffer)
2338{
2339 IPC_TRACE_MARKER();
2340 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2341 struct ipc_server *s = ics->server;
2342
2343 xrt_result_t xret;
2344
2345 // To make the code a bit more readable.
2346 uint32_t device_id = id;
2347 struct xrt_device *xdev = NULL;
2348 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2349
2350 os_mutex_lock(&ics->server->global_state.lock);
2351
2352 float *samples = U_TYPED_ARRAY_CALLOC(float, buffer->num_samples);
2353
2354 // send the allocation result
2355 xret = samples ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
2356 xret = ipc_send(imc, &xret, sizeof xret);
2357 if (xret != XRT_SUCCESS) {
2358 IPC_ERROR(ics->server, "Failed to send samples allocate result");
2359 goto set_haptic_output_end;
2360 }
2361
2362 if (!samples) {
2363 IPC_ERROR(s, "Failed to allocate samples for haptic output");
2364 xret = XRT_ERROR_ALLOCATION;
2365 goto set_haptic_output_end;
2366 }
2367
2368 xret = ipc_receive(imc, samples, sizeof(float) * buffer->num_samples);
2369 if (xret != XRT_SUCCESS) {
2370 IPC_ERROR(s, "Failed to receive samples");
2371 goto set_haptic_output_end;
2372 }
2373
2374 uint32_t samples_consumed;
2375 struct xrt_output_value value = {
2376 .type = XRT_OUTPUT_VALUE_TYPE_PCM_VIBRATION,
2377 .pcm_vibration =
2378 {
2379 .append = buffer->append,
2380 .buffer_size = buffer->num_samples,
2381 .sample_rate = buffer->sample_rate,
2382 .samples_consumed = &samples_consumed,
2383 .buffer = samples,
2384 },
2385 };
2386
2387 // Set the output.
2388 xrt_device_set_output(xdev, name, &value);
2389
2390 xret = ipc_send(imc, &samples_consumed, sizeof samples_consumed);
2391 if (xret != XRT_SUCCESS) {
2392 IPC_ERROR(ics->server, "Failed to send samples consumed");
2393 goto set_haptic_output_end;
2394 }
2395
2396 xret = XRT_SUCCESS;
2397
2398set_haptic_output_end:
2399 os_mutex_unlock(&ics->server->global_state.lock);
2400
2401 free(samples);
2402
2403 return xret;
2404}
2405
2406xrt_result_t
2407ipc_handle_device_get_output_limits(volatile struct ipc_client_state *ics,
2408 uint32_t id,
2409 struct xrt_output_limits *limits)
2410{
2411 // To make the code a bit more readable.
2412 uint32_t device_id = id;
2413 struct xrt_device *xdev = NULL;
2414 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2415
2416 // Set the output.
2417 return xrt_device_get_output_limits(xdev, limits);
2418}
2419
2420xrt_result_t
2421ipc_handle_device_get_visibility_mask(volatile struct ipc_client_state *ics,
2422 uint32_t device_id,
2423 enum xrt_visibility_mask_type type,
2424 uint32_t view_index)
2425{
2426 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2427 struct ipc_device_get_visibility_mask_reply reply = XRT_STRUCT_INIT;
2428 struct ipc_server *s = ics->server;
2429 xrt_result_t xret;
2430
2431 // @todo verify
2432 struct xrt_device *xdev = NULL;
2433 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2434 struct xrt_visibility_mask *mask = NULL;
2435 if (xdev->get_visibility_mask) {
2436 xret = xrt_device_get_visibility_mask(xdev, type, view_index, &mask);
2437 if (xret != XRT_SUCCESS) {
2438 IPC_ERROR(s, "Failed to get visibility mask");
2439 return xret;
2440 }
2441 } else {
2442 struct xrt_fov fov = xdev->hmd->distortion.fov[view_index];
2443 u_visibility_mask_get_default(type, &fov, &mask);
2444 }
2445
2446 if (mask == NULL) {
2447 IPC_ERROR(s, "Failed to get visibility mask");
2448 reply.mask_size = 0;
2449 } else {
2450 reply.mask_size = xrt_visibility_mask_get_size(mask);
2451 }
2452
2453 xret = ipc_send(imc, &reply, sizeof(reply));
2454 if (xret != XRT_SUCCESS) {
2455 IPC_ERROR(s, "Failed to send reply");
2456 goto out_free;
2457 }
2458
2459 xret = ipc_send(imc, mask, reply.mask_size);
2460 if (xret != XRT_SUCCESS) {
2461 IPC_ERROR(s, "Failed to send mask");
2462 goto out_free;
2463 }
2464
2465out_free:
2466 free(mask);
2467 return xret;
2468}
2469
2470xrt_result_t
2471ipc_handle_device_is_form_factor_available(volatile struct ipc_client_state *ics,
2472 uint32_t id,
2473 enum xrt_form_factor form_factor,
2474 bool *out_available)
2475{
2476 // To make the code a bit more readable.
2477 uint32_t device_id = id;
2478 struct xrt_device *xdev = NULL;
2479 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2480 *out_available = xrt_device_is_form_factor_available(xdev, form_factor);
2481 return XRT_SUCCESS;
2482}
2483
2484xrt_result_t
2485ipc_handle_system_devices_get_roles(volatile struct ipc_client_state *ics, struct xrt_system_roles *out_roles)
2486{
2487 return xrt_system_devices_get_roles(ics->server->xsysd, out_roles);
2488}
2489
2490xrt_result_t
2491ipc_handle_system_devices_begin_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2492{
2493 struct xrt_system_devices *xsysd = ics->server->xsysd;
2494 xrt_result_t xret;
2495
2496 xret = validate_device_feature_type(ics, type);
2497 if (xret != XRT_SUCCESS) {
2498 return XRT_ERROR_IPC_FAILURE;
2499 }
2500
2501 // Is this feature already used?
2502 if (ics->device_feature_used[type]) {
2503 IPC_ERROR(ics->server, "feature '%u' already used!", type);
2504 return XRT_ERROR_IPC_FAILURE;
2505 }
2506
2507 xret = xrt_system_devices_feature_inc(xsysd, type);
2508 if (xret != XRT_SUCCESS) {
2509 IPC_ERROR(ics->server, "xrt_system_devices_feature_inc failed");
2510 return xret;
2511 }
2512
2513 // Can now mark it as used.
2514 ics->device_feature_used[type] = true;
2515
2516 return XRT_SUCCESS;
2517}
2518
2519xrt_result_t
2520ipc_handle_system_devices_end_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2521{
2522 struct xrt_system_devices *xsysd = ics->server->xsysd;
2523 xrt_result_t xret;
2524
2525 xret = validate_device_feature_type(ics, type);
2526 if (xret != XRT_SUCCESS) {
2527 return XRT_ERROR_IPC_FAILURE;
2528 }
2529
2530 if (!ics->device_feature_used[type]) {
2531 IPC_ERROR(ics->server, "feature '%u' not used!", type);
2532 return XRT_ERROR_IPC_FAILURE;
2533 }
2534
2535 xret = xrt_system_devices_feature_dec(xsysd, type);
2536 if (xret != XRT_SUCCESS) {
2537 IPC_ERROR(ics->server, "xrt_system_devices_feature_dec failed");
2538 return xret;
2539 }
2540
2541 // Now we can mark it as not used.
2542 ics->device_feature_used[type] = false;
2543
2544 return XRT_SUCCESS;
2545}
2546
2547xrt_result_t
2548ipc_handle_device_get_face_tracking(volatile struct ipc_client_state *ics,
2549 uint32_t id,
2550 enum xrt_input_name facial_expression_type,
2551 int64_t at_timestamp_ns,
2552 struct xrt_facial_expression_set *out_value)
2553{
2554 const uint32_t device_id = id;
2555 struct xrt_device *xdev = NULL;
2556 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2557 // Get facial expression data.
2558 return xrt_device_get_face_tracking(xdev, facial_expression_type, at_timestamp_ns, out_value);
2559}
2560
2561xrt_result_t
2562ipc_handle_device_get_body_skeleton(volatile struct ipc_client_state *ics,
2563 uint32_t id,
2564 enum xrt_input_name body_tracking_type,
2565 struct xrt_body_skeleton *out_value)
2566{
2567 struct xrt_device *xdev = NULL;
2568 GET_XDEV_OR_RETURN(ics, id, xdev);
2569 return xrt_device_get_body_skeleton(xdev, body_tracking_type, out_value);
2570}
2571
2572xrt_result_t
2573ipc_handle_device_get_body_joints(volatile struct ipc_client_state *ics,
2574 uint32_t id,
2575 enum xrt_input_name body_tracking_type,
2576 int64_t desired_timestamp_ns,
2577 struct xrt_body_joint_set *out_value)
2578{
2579 struct xrt_device *xdev = NULL;
2580 GET_XDEV_OR_RETURN(ics, id, xdev);
2581 return xrt_device_get_body_joints(xdev, body_tracking_type, desired_timestamp_ns, out_value);
2582}
2583
2584xrt_result_t
2585ipc_handle_device_reset_body_tracking_calibration_meta(volatile struct ipc_client_state *ics, uint32_t id)
2586{
2587 struct xrt_device *xdev = get_xdev(ics, id);
2588 return xrt_device_reset_body_tracking_calibration_meta(xdev);
2589}
2590
2591xrt_result_t
2592ipc_handle_device_set_body_tracking_calibration_override_meta(volatile struct ipc_client_state *ics,
2593 uint32_t id,
2594 float new_body_height)
2595{
2596 struct xrt_device *xdev = get_xdev(ics, id);
2597 return xrt_device_set_body_tracking_calibration_override_meta(xdev, new_body_height);
2598}
2599
2600xrt_result_t
2601ipc_handle_device_get_battery_status(
2602 volatile struct ipc_client_state *ics, uint32_t id, bool *out_present, bool *out_charging, float *out_charge)
2603{
2604 struct xrt_device *xdev = NULL;
2605 GET_XDEV_OR_RETURN(ics, id, xdev);
2606 return xrt_device_get_battery_status(xdev, out_present, out_charging, out_charge);
2607}
2608
2609xrt_result_t
2610ipc_handle_device_get_brightness(volatile struct ipc_client_state *ics, uint32_t id, float *out_brightness)
2611{
2612 struct xrt_device *xdev = NULL;
2613 GET_XDEV_OR_RETURN(ics, id, xdev);
2614
2615 if (!xdev->supported.brightness_control) {
2616 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2617 }
2618
2619 return xrt_device_get_brightness(xdev, out_brightness);
2620}
2621
2622xrt_result_t
2623ipc_handle_device_set_brightness(volatile struct ipc_client_state *ics, uint32_t id, float brightness, bool relative)
2624{
2625 struct xrt_device *xdev = NULL;
2626 GET_XDEV_OR_RETURN(ics, id, xdev);
2627
2628 if (!xdev->supported.brightness_control) {
2629 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2630 }
2631
2632 return xrt_device_set_brightness(xdev, brightness, relative);
2633}
2634
2635xrt_result_t
2636ipc_handle_future_get_state(volatile struct ipc_client_state *ics, uint32_t future_id, enum xrt_future_state *out_state)
2637{
2638 struct xrt_future *xft = NULL;
2639 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2640 if (xret != XRT_SUCCESS) {
2641 return xret;
2642 }
2643 return xrt_future_get_state(xft, out_state);
2644}
2645
2646xrt_result_t
2647ipc_handle_future_get_result(volatile struct ipc_client_state *ics,
2648 uint32_t future_id,
2649 struct xrt_future_result *out_ft_result)
2650{
2651 struct xrt_future *xft = NULL;
2652 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2653 if (xret != XRT_SUCCESS) {
2654 return xret;
2655 }
2656 return xrt_future_get_result(xft, out_ft_result);
2657}
2658
2659xrt_result_t
2660ipc_handle_future_cancel(volatile struct ipc_client_state *ics, uint32_t future_id)
2661{
2662 struct xrt_future *xft = NULL;
2663 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2664 if (xret != XRT_SUCCESS) {
2665 return xret;
2666 }
2667 return xrt_future_cancel(xft);
2668}
2669
2670xrt_result_t
2671ipc_handle_future_destroy(volatile struct ipc_client_state *ics, uint32_t future_id)
2672{
2673 return release_future(ics, future_id);
2674}