The open source OpenXR runtime
1// Copyright 2020, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Client side wrapper of compositor.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @ingroup ipc_client
10 */
11
12#include "xrt/xrt_device.h"
13#include "xrt/xrt_compositor.h"
14#include "xrt/xrt_defines.h"
15#include "xrt/xrt_config_os.h"
16
17
18#include "os/os_time.h"
19
20#include "util/u_misc.h"
21#include "util/u_wait.h"
22#include "util/u_handles.h"
23#include "util/u_trace_marker.h"
24#include "util/u_limited_unique_id.h"
25
26#include "shared/ipc_protocol.h"
27#include "client/ipc_client.h"
28#include "ipc_client_generated.h"
29
30#include <string.h>
31#include <stdio.h>
32#if !defined(XRT_OS_WINDOWS)
33#include <unistd.h>
34#include <sys/socket.h>
35#include <sys/un.h>
36#endif
37#include <errno.h>
38#include <assert.h>
39
40#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
41#include <unistd.h>
42#endif
43
44
45/*
46 *
47 * Internal structs and helpers.
48 *
49 */
50
51//! Define to test the loopback allocator.
52#undef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
53
54/*!
55 * Client proxy for an xrt_compositor_native implementation over IPC.
56 * @implements xrt_compositor_native
57 */
58struct ipc_client_compositor
59{
60 struct xrt_compositor_native base;
61
62 //! Should be turned into its own object.
63 struct xrt_system_compositor system;
64
65 struct ipc_connection *ipc_c;
66
67 //! Optional image allocator.
68 struct xrt_image_native_allocator *xina;
69
70 struct
71 {
72 //! Id that we are currently using for submitting layers.
73 uint32_t slot_id;
74
75 uint32_t layer_count;
76 } layers;
77
78 //! Has the native compositor been created, only supports one for now.
79 bool compositor_created;
80
81 //! To get better wake up in wait frame.
82 struct os_precise_sleeper sleeper;
83
84#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
85 //! To test image allocator.
86 struct xrt_image_native_allocator loopback_xina;
87#endif
88};
89
90/*!
91 * Client proxy for an xrt_swapchain_native implementation over IPC.
92 * @implements xrt_swapchain_native
93 */
94struct ipc_client_swapchain
95{
96 struct xrt_swapchain_native base;
97
98 struct ipc_client_compositor *icc;
99
100 uint32_t id;
101};
102
103/*!
104 * Client proxy for an xrt_compositor_semaphore implementation over IPC.
105 * @implements xrt_compositor_semaphore
106 */
107struct ipc_client_compositor_semaphore
108{
109 struct xrt_compositor_semaphore base;
110
111 struct ipc_client_compositor *icc;
112
113 uint32_t id;
114};
115
116
117/*
118 *
119 * Helper functions.
120 *
121 */
122
123static inline struct ipc_client_compositor *
124ipc_client_compositor(struct xrt_compositor *xc)
125{
126 return (struct ipc_client_compositor *)xc;
127}
128
129static inline struct ipc_client_swapchain *
130ipc_client_swapchain(struct xrt_swapchain *xs)
131{
132 return (struct ipc_client_swapchain *)xs;
133}
134
135static inline struct ipc_client_compositor_semaphore *
136ipc_client_compositor_semaphore(struct xrt_compositor_semaphore *xcsem)
137{
138 return (struct ipc_client_compositor_semaphore *)xcsem;
139}
140
141
142/*
143 *
144 * Misc functions
145 *
146 */
147
148static xrt_result_t
149get_info(struct xrt_compositor *xc, struct xrt_compositor_info *out_info)
150{
151 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
152
153 xrt_result_t xret = ipc_call_compositor_get_info(icc->ipc_c, out_info);
154 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_info");
155}
156
157static xrt_result_t
158get_system_info(struct ipc_client_compositor *icc, struct xrt_system_compositor_info *out_info)
159{
160 xrt_result_t xret = ipc_call_system_compositor_get_info(icc->ipc_c, out_info);
161 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_system_compositor_get_info");
162}
163
164
165/*
166 *
167 * Swapchain.
168 *
169 */
170
171static void
172ipc_compositor_swapchain_destroy(struct xrt_swapchain *xsc)
173{
174 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
175 struct ipc_client_compositor *icc = ics->icc;
176 xrt_result_t xret;
177
178 xret = ipc_call_swapchain_destroy(icc->ipc_c, ics->id);
179
180 // Can't return anything here, just continue.
181 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_semaphore_destroy");
182
183 free(xsc);
184}
185
186static xrt_result_t
187ipc_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index)
188{
189 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
190 struct ipc_client_compositor *icc = ics->icc;
191 xrt_result_t xret;
192
193 xret = ipc_call_swapchain_wait_image(icc->ipc_c, ics->id, timeout_ns, index);
194 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_wait_image");
195}
196
197static xrt_result_t
198ipc_compositor_swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *out_index)
199{
200 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
201 struct ipc_client_compositor *icc = ics->icc;
202 xrt_result_t xret;
203
204 xret = ipc_call_swapchain_acquire_image(icc->ipc_c, ics->id, out_index);
205 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_acquire_image");
206}
207
208static xrt_result_t
209ipc_compositor_swapchain_release_image(struct xrt_swapchain *xsc, uint32_t index)
210{
211 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
212 struct ipc_client_compositor *icc = ics->icc;
213 xrt_result_t xret;
214
215 xret = ipc_call_swapchain_release_image(icc->ipc_c, ics->id, index);
216 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_release_image");
217}
218
219
220/*
221 *
222 * Compositor semaphore functions.
223 *
224 */
225
226static xrt_result_t
227ipc_client_compositor_semaphore_wait(struct xrt_compositor_semaphore *xcsem, uint64_t value, uint64_t timeout_ns)
228{
229 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
230 struct ipc_client_compositor *icc = iccs->icc;
231
232 IPC_ERROR(icc->ipc_c, "Cannot call wait on client side!");
233
234 return XRT_ERROR_IPC_FAILURE;
235}
236
237static void
238ipc_client_compositor_semaphore_destroy(struct xrt_compositor_semaphore *xcsem)
239{
240 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
241 struct ipc_client_compositor *icc = iccs->icc;
242 xrt_result_t xret;
243
244 xret = ipc_call_compositor_semaphore_destroy(icc->ipc_c, iccs->id);
245
246 // Can't return anything here, just continue.
247 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_semaphore_destroy");
248
249 free(iccs);
250}
251
252
253/*
254 *
255 * Compositor functions.
256 *
257 */
258
259static xrt_result_t
260ipc_compositor_get_swapchain_create_properties(struct xrt_compositor *xc,
261 const struct xrt_swapchain_create_info *info,
262 struct xrt_swapchain_create_properties *xsccp)
263{
264 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
265 xrt_result_t xret;
266
267 xret = ipc_call_swapchain_get_properties(icc->ipc_c, info, xsccp);
268 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_get_properties");
269}
270
271static xrt_result_t
272swapchain_server_create(struct ipc_client_compositor *icc,
273 const struct xrt_swapchain_create_info *info,
274 struct xrt_swapchain **out_xsc)
275{
276 xrt_graphics_buffer_handle_t remote_handles[XRT_MAX_SWAPCHAIN_IMAGES] = {0};
277 xrt_result_t xret;
278 uint32_t handle;
279 uint32_t image_count;
280 uint64_t size;
281 bool use_dedicated_allocation;
282
283 xret = ipc_call_swapchain_create( //
284 icc->ipc_c, // connection
285 info, // in
286 &handle, // out
287 &image_count, // out
288 &size, // out
289 &use_dedicated_allocation, // out
290 remote_handles, // handles
291 XRT_MAX_SWAPCHAIN_IMAGES); // handles
292 if (xret == XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED) {
293 // Don't error print this, will spam CTS logs.
294 IPC_DEBUG(icc->ipc_c, "Got XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED");
295 return xret;
296 }
297 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_create");
298
299 struct ipc_client_swapchain *ics = U_TYPED_CALLOC(struct ipc_client_swapchain);
300 ics->base.base.image_count = image_count;
301 ics->base.base.wait_image = ipc_compositor_swapchain_wait_image;
302 ics->base.base.acquire_image = ipc_compositor_swapchain_acquire_image;
303 ics->base.base.release_image = ipc_compositor_swapchain_release_image;
304 ics->base.base.destroy = ipc_compositor_swapchain_destroy;
305 ics->base.base.reference.count = 1;
306 ics->base.limited_unique_id = u_limited_unique_id_get();
307 ics->icc = icc;
308 ics->id = handle;
309
310 for (uint32_t i = 0; i < image_count; i++) {
311 ics->base.images[i].handle = remote_handles[i];
312 ics->base.images[i].size = size;
313 ics->base.images[i].use_dedicated_allocation = use_dedicated_allocation;
314 }
315
316 *out_xsc = &ics->base.base;
317
318 return XRT_SUCCESS;
319}
320
321static xrt_result_t
322swapchain_server_import(struct ipc_client_compositor *icc,
323 const struct xrt_swapchain_create_info *info,
324 struct xrt_image_native *native_images,
325 uint32_t image_count,
326 struct xrt_swapchain **out_xsc)
327{
328 struct ipc_arg_swapchain_from_native args = {0};
329 xrt_graphics_buffer_handle_t handles[XRT_MAX_SWAPCHAIN_IMAGES] = {0};
330 xrt_result_t xret;
331 uint32_t id = 0;
332
333 for (uint32_t i = 0; i < image_count; i++) {
334 handles[i] = native_images[i].handle;
335 args.sizes[i] = native_images[i].size;
336
337#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
338 // DXGI handles need to be dealt with differently, they are identified
339 // by having their lower bit set to 1 during transfer
340 if (native_images[i].is_dxgi_handle) {
341 handles[i] = (void *)((size_t)handles[i] | 1);
342 }
343#endif
344 }
345
346 // This does not consume the handles, it copies them.
347 xret = ipc_call_swapchain_import( //
348 icc->ipc_c, // connection
349 info, // in
350 &args, // in
351 handles, // handles
352 image_count, // handles
353 &id); // out
354 if (xret == XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED) {
355 // Don't error print this, not an error.
356 IPC_DEBUG(icc->ipc_c, "Got XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED");
357 return xret;
358 }
359 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_import");
360
361 struct ipc_client_swapchain *ics = U_TYPED_CALLOC(struct ipc_client_swapchain);
362 ics->base.base.image_count = image_count;
363 ics->base.base.wait_image = ipc_compositor_swapchain_wait_image;
364 ics->base.base.acquire_image = ipc_compositor_swapchain_acquire_image;
365 ics->base.base.release_image = ipc_compositor_swapchain_release_image;
366 ics->base.base.destroy = ipc_compositor_swapchain_destroy;
367 ics->base.base.reference.count = 1;
368 ics->base.limited_unique_id = u_limited_unique_id_get();
369 ics->icc = icc;
370 ics->id = id;
371
372 // The handles were copied in the IPC call so we can reuse them here.
373 for (uint32_t i = 0; i < image_count; i++) {
374 ics->base.images[i] = native_images[i];
375 }
376
377 *out_xsc = &ics->base.base;
378
379 return XRT_SUCCESS;
380}
381
382static xrt_result_t
383swapchain_allocator_create(struct ipc_client_compositor *icc,
384 struct xrt_image_native_allocator *xina,
385 const struct xrt_swapchain_create_info *info,
386 struct xrt_swapchain **out_xsc)
387{
388 struct xrt_swapchain_create_properties xsccp = {0};
389 struct xrt_image_native *images = NULL;
390 xrt_result_t xret;
391
392 // Get any needed properties.
393 xret = ipc_compositor_get_swapchain_create_properties(&icc->base.base, info, &xsccp);
394 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_compositor_get_swapchain_create_properties");
395
396 // Alloc the array of structs for the images.
397 images = U_TYPED_ARRAY_CALLOC(struct xrt_image_native, xsccp.image_count);
398
399 // Now allocate the images themselves
400 xret = xrt_images_allocate(xina, info, xsccp.image_count, images);
401 IPC_CHK_WITH_GOTO(icc->ipc_c, xret, "xrt_images_allocate", out_free);
402
403 /*
404 * The import function takes ownership of the handles,
405 * we do not need free them if the call succeeds.
406 */
407 xret = swapchain_server_import(icc, info, images, xsccp.image_count, out_xsc);
408 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "swapchain_server_import");
409 if (xret != XRT_SUCCESS) {
410 xrt_images_free(xina, xsccp.image_count, images);
411 }
412
413out_free:
414 free(images);
415
416 return xret;
417}
418
419static xrt_result_t
420ipc_compositor_swapchain_create(struct xrt_compositor *xc,
421 const struct xrt_swapchain_create_info *info,
422 struct xrt_swapchain **out_xsc)
423{
424 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
425 struct xrt_image_native_allocator *xina = icc->xina;
426 xrt_result_t xret;
427
428 if (xina == NULL) {
429 xret = swapchain_server_create(icc, info, out_xsc);
430 } else {
431 xret = swapchain_allocator_create(icc, xina, info, out_xsc);
432 }
433
434 // Errors already printed.
435 return xret;
436}
437
438static xrt_result_t
439ipc_compositor_create_passthrough(struct xrt_compositor *xc, const struct xrt_passthrough_create_info *info)
440{
441 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
442 xrt_result_t xret;
443
444 xret = ipc_call_compositor_create_passthrough(icc->ipc_c, info);
445 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_create_passthrough");
446}
447
448static xrt_result_t
449ipc_compositor_create_passthrough_layer(struct xrt_compositor *xc, const struct xrt_passthrough_layer_create_info *info)
450{
451 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
452 xrt_result_t xret;
453
454 xret = ipc_call_compositor_create_passthrough_layer(icc->ipc_c, info);
455 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_create_passthrough_layer");
456}
457
458static xrt_result_t
459ipc_compositor_destroy_passthrough(struct xrt_compositor *xc)
460{
461 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
462 xrt_result_t xret;
463
464 xret = ipc_call_compositor_destroy_passthrough(icc->ipc_c);
465 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_destroy_passthrough");
466}
467
468static xrt_result_t
469ipc_compositor_swapchain_import(struct xrt_compositor *xc,
470 const struct xrt_swapchain_create_info *info,
471 struct xrt_image_native *native_images,
472 uint32_t image_count,
473 struct xrt_swapchain **out_xsc)
474{
475 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
476
477 // Errors already printed.
478 return swapchain_server_import(icc, info, native_images, image_count, out_xsc);
479}
480
481static xrt_result_t
482ipc_compositor_semaphore_create(struct xrt_compositor *xc,
483 xrt_graphics_sync_handle_t *out_handle,
484 struct xrt_compositor_semaphore **out_xcsem)
485{
486 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
487 xrt_graphics_sync_handle_t handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
488 xrt_result_t xret;
489 uint32_t id = 0;
490
491 xret = ipc_call_compositor_semaphore_create(icc->ipc_c, &id, &handle, 1);
492 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_semaphore_create");
493
494 struct ipc_client_compositor_semaphore *iccs = U_TYPED_CALLOC(struct ipc_client_compositor_semaphore);
495 iccs->base.reference.count = 1;
496 iccs->base.wait = ipc_client_compositor_semaphore_wait;
497 iccs->base.destroy = ipc_client_compositor_semaphore_destroy;
498 iccs->id = id;
499 iccs->icc = icc;
500
501 *out_handle = handle;
502 *out_xcsem = &iccs->base;
503
504 return XRT_SUCCESS;
505}
506
507static xrt_result_t
508ipc_compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info)
509{
510 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
511 xrt_result_t xret;
512
513 IPC_TRACE(icc->ipc_c, "Compositor begin session.");
514
515 xret = ipc_call_session_begin(icc->ipc_c);
516 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_session_begin");
517}
518
519static xrt_result_t
520ipc_compositor_end_session(struct xrt_compositor *xc)
521{
522 IPC_TRACE_MARKER();
523
524 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
525 xrt_result_t xret;
526
527 IPC_TRACE(icc->ipc_c, "Compositor end session.");
528
529 xret = ipc_call_session_end(icc->ipc_c);
530 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_session_end");
531}
532
533static xrt_result_t
534ipc_compositor_wait_frame(struct xrt_compositor *xc,
535 int64_t *out_frame_id,
536 int64_t *out_predicted_display_time,
537 int64_t *out_predicted_display_period)
538{
539 IPC_TRACE_MARKER();
540 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
541 xrt_result_t xret;
542
543 int64_t frame_id = -1;
544 int64_t wake_up_time_ns = 0;
545 int64_t predicted_display_time = 0;
546 int64_t predicted_display_period = 0;
547
548 xret = ipc_call_compositor_predict_frame( //
549 icc->ipc_c, // Connection
550 &frame_id, // Frame id
551 &wake_up_time_ns, // When we should wake up
552 &predicted_display_time, // Display time
553 &predicted_display_period); // Current period
554 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_predict_frame");
555
556 // Wait until the given wake up time.
557 u_wait_until(&icc->sleeper, wake_up_time_ns);
558
559 // Signal that we woke up.
560 xret = ipc_call_compositor_wait_woke(icc->ipc_c, frame_id);
561 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_wait_woke");
562
563 // Only write arguments once we have fully waited.
564 *out_frame_id = frame_id;
565 *out_predicted_display_time = predicted_display_time;
566 *out_predicted_display_period = predicted_display_period;
567
568 return xret;
569}
570
571static xrt_result_t
572ipc_compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id)
573{
574 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
575 xrt_result_t xret;
576
577 xret = ipc_call_compositor_begin_frame(icc->ipc_c, frame_id);
578 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_begin_frame");
579}
580
581static xrt_result_t
582ipc_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_data *data)
583{
584 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
585
586 struct ipc_shared_memory *ism = icc->ipc_c->ism;
587 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
588
589 slot->data = *data;
590
591 return XRT_SUCCESS;
592}
593
594static xrt_result_t
595ipc_compositor_layer_projection(struct xrt_compositor *xc,
596 struct xrt_device *xdev,
597 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
598 const struct xrt_layer_data *data)
599{
600 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
601
602 assert(data->type == XRT_LAYER_PROJECTION);
603
604 struct ipc_shared_memory *ism = icc->ipc_c->ism;
605 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
606 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
607 layer->xdev_id = 0; //! @todo Real id.
608 layer->data = *data;
609 for (uint32_t i = 0; i < data->view_count; ++i) {
610 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc[i]);
611 layer->swapchain_ids[i] = ics->id;
612 }
613 // Increment the number of layers.
614 icc->layers.layer_count++;
615
616 return XRT_SUCCESS;
617}
618
619static xrt_result_t
620ipc_compositor_layer_projection_depth(struct xrt_compositor *xc,
621 struct xrt_device *xdev,
622 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
623 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS],
624 const struct xrt_layer_data *data)
625{
626 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
627
628 assert(data->type == XRT_LAYER_PROJECTION_DEPTH);
629
630 struct ipc_shared_memory *ism = icc->ipc_c->ism;
631 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
632 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
633 struct ipc_client_swapchain *xscn[XRT_MAX_VIEWS];
634 struct ipc_client_swapchain *d_xscn[XRT_MAX_VIEWS];
635 for (uint32_t i = 0; i < data->view_count; ++i) {
636 xscn[i] = ipc_client_swapchain(xsc[i]);
637 d_xscn[i] = ipc_client_swapchain(d_xsc[i]);
638
639 layer->swapchain_ids[i] = xscn[i]->id;
640 layer->swapchain_ids[i + data->view_count] = d_xscn[i]->id;
641 }
642
643 layer->xdev_id = 0; //! @todo Real id.
644
645 layer->data = *data;
646
647 // Increment the number of layers.
648 icc->layers.layer_count++;
649
650 return XRT_SUCCESS;
651}
652
653static xrt_result_t
654handle_layer(struct xrt_compositor *xc,
655 struct xrt_device *xdev,
656 struct xrt_swapchain *xsc,
657 const struct xrt_layer_data *data,
658 enum xrt_layer_type type)
659{
660 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
661
662 assert(data->type == type);
663
664 struct ipc_shared_memory *ism = icc->ipc_c->ism;
665 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
666 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
667 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
668
669 layer->xdev_id = 0; //! @todo Real id.
670 layer->swapchain_ids[0] = ics->id;
671 layer->swapchain_ids[1] = -1;
672 layer->swapchain_ids[2] = -1;
673 layer->swapchain_ids[3] = -1;
674 layer->data = *data;
675
676 // Increment the number of layers.
677 icc->layers.layer_count++;
678
679 return XRT_SUCCESS;
680}
681
682static xrt_result_t
683ipc_compositor_layer_quad(struct xrt_compositor *xc,
684 struct xrt_device *xdev,
685 struct xrt_swapchain *xsc,
686 const struct xrt_layer_data *data)
687{
688 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_QUAD);
689}
690
691static xrt_result_t
692ipc_compositor_layer_cube(struct xrt_compositor *xc,
693 struct xrt_device *xdev,
694 struct xrt_swapchain *xsc,
695 const struct xrt_layer_data *data)
696{
697 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_CUBE);
698}
699
700static xrt_result_t
701ipc_compositor_layer_cylinder(struct xrt_compositor *xc,
702 struct xrt_device *xdev,
703 struct xrt_swapchain *xsc,
704 const struct xrt_layer_data *data)
705{
706 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_CYLINDER);
707}
708
709static xrt_result_t
710ipc_compositor_layer_equirect1(struct xrt_compositor *xc,
711 struct xrt_device *xdev,
712 struct xrt_swapchain *xsc,
713 const struct xrt_layer_data *data)
714{
715 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_EQUIRECT1);
716}
717
718static xrt_result_t
719ipc_compositor_layer_equirect2(struct xrt_compositor *xc,
720 struct xrt_device *xdev,
721 struct xrt_swapchain *xsc,
722 const struct xrt_layer_data *data)
723{
724 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_EQUIRECT2);
725}
726
727static xrt_result_t
728ipc_compositor_layer_passthrough(struct xrt_compositor *xc, struct xrt_device *xdev, const struct xrt_layer_data *data)
729{
730 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
731
732 assert(data->type == XRT_LAYER_PASSTHROUGH);
733
734 struct ipc_shared_memory *ism = icc->ipc_c->ism;
735 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
736 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
737
738 layer->xdev_id = 0; //! @todo Real id.
739 layer->data = *data;
740
741 // Increment the number of layers.
742 icc->layers.layer_count++;
743
744 return XRT_SUCCESS;
745}
746
747static xrt_result_t
748ipc_compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle)
749{
750 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
751 xrt_result_t xret;
752
753 bool valid_sync = xrt_graphics_sync_handle_is_valid(sync_handle);
754
755 struct ipc_shared_memory *ism = icc->ipc_c->ism;
756 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
757
758 // Last bit of data to put in the shared memory area.
759 slot->layer_count = icc->layers.layer_count;
760
761 xret = ipc_call_compositor_layer_sync( //
762 icc->ipc_c, //
763 icc->layers.slot_id, //
764 &sync_handle, //
765 valid_sync ? 1 : 0, //
766 &icc->layers.slot_id); //
767
768 /*
769 * We are probably in a really bad state if we fail, at
770 * least print out the error and continue as best we can.
771 */
772 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_layer_sync_with_semaphore");
773
774 // Reset.
775 icc->layers.layer_count = 0;
776
777 // Need to consume this handle.
778 if (valid_sync) {
779 u_graphics_sync_unref(&sync_handle);
780 }
781
782 return xret;
783}
784
785static xrt_result_t
786ipc_compositor_layer_commit_with_semaphore(struct xrt_compositor *xc,
787 struct xrt_compositor_semaphore *xcsem,
788 uint64_t value)
789{
790 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
791 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
792 xrt_result_t xret;
793
794 struct ipc_shared_memory *ism = icc->ipc_c->ism;
795 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
796
797 // Last bit of data to put in the shared memory area.
798 slot->layer_count = icc->layers.layer_count;
799
800 xret = ipc_call_compositor_layer_sync_with_semaphore( //
801 icc->ipc_c, //
802 icc->layers.slot_id, //
803 iccs->id, //
804 value, //
805 &icc->layers.slot_id); //
806
807 /*
808 * We are probably in a really bad state if we fail, at
809 * least print out the error and continue as best we can.
810 */
811 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_layer_sync_with_semaphore");
812
813 // Reset.
814 icc->layers.layer_count = 0;
815
816 return xret;
817}
818
819static xrt_result_t
820ipc_compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id)
821{
822 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
823 xrt_result_t xret;
824
825 xret = ipc_call_compositor_discard_frame(icc->ipc_c, frame_id);
826 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_discard_frame");
827}
828
829static xrt_result_t
830ipc_compositor_set_performance_level(struct xrt_compositor *xc,
831 enum xrt_perf_domain domain,
832 enum xrt_perf_set_level level)
833{
834 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
835 xrt_result_t xret;
836 xret = ipc_call_compositor_set_performance_level(icc->ipc_c, domain, level);
837 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_set_performance_level");
838}
839
840static xrt_result_t
841ipc_compositor_set_thread_hint(struct xrt_compositor *xc, enum xrt_thread_hint hint, uint32_t thread_id)
842{
843 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
844 xrt_result_t xret;
845
846 xret = ipc_call_compositor_set_thread_hint(icc->ipc_c, hint, thread_id);
847 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_set_thread_hint");
848}
849
850static xrt_result_t
851ipc_compositor_get_display_refresh_rate(struct xrt_compositor *xc, float *out_display_refresh_rate_hz)
852{
853 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
854 xrt_result_t xret;
855
856 xret = ipc_call_compositor_get_display_refresh_rate(icc->ipc_c, out_display_refresh_rate_hz);
857 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_display_refresh_rate");
858}
859
860static xrt_result_t
861ipc_compositor_request_display_refresh_rate(struct xrt_compositor *xc, float display_refresh_rate_hz)
862{
863 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
864 xrt_result_t xret;
865
866 xret = ipc_call_compositor_request_display_refresh_rate(icc->ipc_c, display_refresh_rate_hz);
867 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_request_display_refresh_rate");
868}
869
870static xrt_result_t
871ipc_compositor_get_reference_bounds_rect(struct xrt_compositor *xc,
872 enum xrt_reference_space_type reference_space_type,
873 struct xrt_vec2 *bounds)
874{
875 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
876 xrt_result_t xret;
877
878 xret = ipc_call_compositor_get_reference_bounds_rect(icc->ipc_c, reference_space_type, bounds);
879 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_reference_bounds_rect");
880}
881
882static void
883ipc_compositor_destroy(struct xrt_compositor *xc)
884{
885 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
886
887 assert(icc->compositor_created);
888
889 os_precise_sleeper_deinit(&icc->sleeper);
890
891 icc->compositor_created = false;
892}
893
894static void
895ipc_compositor_init(struct ipc_client_compositor *icc, struct xrt_compositor_native **out_xcn)
896{
897 icc->base.base.get_swapchain_create_properties = ipc_compositor_get_swapchain_create_properties;
898 icc->base.base.create_swapchain = ipc_compositor_swapchain_create;
899 icc->base.base.import_swapchain = ipc_compositor_swapchain_import;
900 icc->base.base.create_semaphore = ipc_compositor_semaphore_create;
901 icc->base.base.create_passthrough = ipc_compositor_create_passthrough;
902 icc->base.base.create_passthrough_layer = ipc_compositor_create_passthrough_layer;
903 icc->base.base.destroy_passthrough = ipc_compositor_destroy_passthrough;
904 icc->base.base.begin_session = ipc_compositor_begin_session;
905 icc->base.base.end_session = ipc_compositor_end_session;
906 icc->base.base.wait_frame = ipc_compositor_wait_frame;
907 icc->base.base.begin_frame = ipc_compositor_begin_frame;
908 icc->base.base.discard_frame = ipc_compositor_discard_frame;
909 icc->base.base.layer_begin = ipc_compositor_layer_begin;
910 icc->base.base.layer_projection = ipc_compositor_layer_projection;
911 icc->base.base.layer_projection_depth = ipc_compositor_layer_projection_depth;
912 icc->base.base.layer_quad = ipc_compositor_layer_quad;
913 icc->base.base.layer_cube = ipc_compositor_layer_cube;
914 icc->base.base.layer_cylinder = ipc_compositor_layer_cylinder;
915 icc->base.base.layer_equirect1 = ipc_compositor_layer_equirect1;
916 icc->base.base.layer_equirect2 = ipc_compositor_layer_equirect2;
917 icc->base.base.layer_passthrough = ipc_compositor_layer_passthrough;
918 icc->base.base.layer_commit = ipc_compositor_layer_commit;
919 icc->base.base.layer_commit_with_semaphore = ipc_compositor_layer_commit_with_semaphore;
920 icc->base.base.destroy = ipc_compositor_destroy;
921 icc->base.base.set_thread_hint = ipc_compositor_set_thread_hint;
922 icc->base.base.get_display_refresh_rate = ipc_compositor_get_display_refresh_rate;
923 icc->base.base.request_display_refresh_rate = ipc_compositor_request_display_refresh_rate;
924 icc->base.base.set_performance_level = ipc_compositor_set_performance_level;
925 icc->base.base.get_reference_bounds_rect = ipc_compositor_get_reference_bounds_rect;
926
927 // Using in wait frame.
928 os_precise_sleeper_init(&icc->sleeper);
929
930 // Fetch info from the compositor, among it the format format list.
931 get_info(&(icc->base.base), &icc->base.base.info);
932
933 *out_xcn = &icc->base;
934}
935
936
937/*
938 *
939 * Loopback image allocator.
940 *
941 */
942
943#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
944static inline xrt_result_t
945ipc_compositor_images_allocate(struct xrt_image_native_allocator *xina,
946 const struct xrt_swapchain_create_info *xsci,
947 size_t in_image_count,
948 struct xrt_image_native *out_images)
949{
950 struct ipc_client_compositor *icc = container_of(xina, struct ipc_client_compositor, loopback_xina);
951
952 int remote_fds[IPC_MAX_SWAPCHAIN_FDS] = {0};
953 xrt_result_t xret;
954 uint32_t image_count;
955 uint32_t handle;
956 uint64_t size;
957
958 for (size_t i = 0; i < ARRAY_SIZE(remote_fds); i++) {
959 remote_fds[i] = -1;
960 }
961
962 for (size_t i = 0; i < in_image_count; i++) {
963 out_images[i].fd = -1;
964 out_images[i].size = 0;
965 }
966
967 xret = ipc_call_swapchain_create( //
968 icc->ipc_c, // connection
969 xsci, // in
970 &handle, // out
971 &image_count, // out
972 &size, // out
973 remote_fds, // fds
974 IPC_MAX_SWAPCHAIN_FDS); // fds
975 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_create");
976
977 /*
978 * It's okay to destroy it immediately, the native handles are
979 * now owned by us and we keep the buffers alive that way.
980 */
981 xret = ipc_call_swapchain_destroy(icc->ipc_c, handle);
982 assert(xret == XRT_SUCCESS);
983
984 // Clumsy way of handling this.
985 if (image_count < in_image_count) {
986 for (uint32_t k = 0; k < image_count && k < in_image_count; k++) {
987 /*
988 * Overly-broad condition: we know that any fd not touched by
989 * ipc_call_swapchain_create will be -1.
990 */
991 if (remote_fds[k] >= 0) {
992 close(remote_fds[k]);
993 remote_fds[k] = -1;
994 }
995 }
996
997 return XRT_ERROR_IPC_FAILURE;
998 }
999
1000 // Copy up to in_image_count, or image_count what ever is lowest.
1001 uint32_t i = 0;
1002 for (; i < image_count && i < in_image_count; i++) {
1003 out_images[i].fd = remote_fds[i];
1004 out_images[i].size = size;
1005 }
1006
1007 // Close any fds we are not interested in.
1008 for (; i < image_count; i++) {
1009 /*
1010 * Overly-broad condition: we know that any fd not touched by
1011 * ipc_call_swapchain_create will be -1.
1012 */
1013 if (remote_fds[i] >= 0) {
1014 close(remote_fds[i]);
1015 remote_fds[i] = -1;
1016 }
1017 }
1018
1019 return XRT_SUCCESS;
1020}
1021
1022static inline xrt_result_t
1023ipc_compositor_images_free(struct xrt_image_native_allocator *xina,
1024 size_t image_count,
1025 struct xrt_image_native *out_images)
1026{
1027 for (uint32_t i = 0; i < image_count; i++) {
1028 close(out_images[i].fd);
1029 out_images[i].fd = -1;
1030 out_images[i].size = 0;
1031 }
1032
1033 return XRT_SUCCESS;
1034}
1035
1036static inline void
1037ipc_compositor_images_destroy(struct xrt_image_native_allocator *xina)
1038{
1039 // Noop
1040}
1041#endif
1042
1043
1044/*
1045 *
1046 * System compositor.
1047 *
1048 */
1049
1050xrt_result_t
1051ipc_syscomp_create_native_compositor(struct xrt_system_compositor *xsc,
1052 const struct xrt_session_info *xsi,
1053 struct xrt_session_event_sink *xses,
1054 struct xrt_compositor_native **out_xcn)
1055{
1056 struct ipc_client_compositor *icc = container_of(xsc, struct ipc_client_compositor, system);
1057
1058 IPC_ERROR(icc->ipc_c, "This function shouldn't be called!");
1059
1060 return XRT_ERROR_IPC_FAILURE;
1061}
1062
1063void
1064ipc_syscomp_destroy(struct xrt_system_compositor *xsc)
1065{
1066 struct ipc_client_compositor *icc = container_of(xsc, struct ipc_client_compositor, system);
1067
1068 // Does null checking.
1069 xrt_images_destroy(&icc->xina);
1070
1071 //! @todo Implement
1072 IPC_TRACE(icc->ipc_c, "NOT IMPLEMENTED compositor destroy.");
1073
1074 free(icc);
1075}
1076
1077
1078/*
1079 *
1080 * 'Exported' functions.
1081 *
1082 */
1083
1084xrt_result_t
1085ipc_client_create_native_compositor(struct xrt_system_compositor *xsysc,
1086 const struct xrt_session_info *xsi,
1087 struct xrt_compositor_native **out_xcn)
1088{
1089 struct ipc_client_compositor *icc = container_of(xsysc, struct ipc_client_compositor, system);
1090 xrt_result_t xret;
1091
1092 if (icc->compositor_created) {
1093 return XRT_ERROR_MULTI_SESSION_NOT_IMPLEMENTED;
1094 }
1095
1096 /*
1097 * Needs to be done before init, we don't own the service side session
1098 * the session does. But we create it here in case any extra arguments
1099 * that only the compositor knows about needs to be sent.
1100 */
1101 xret = ipc_call_session_create( //
1102 icc->ipc_c, // ipc_c
1103 xsi, // xsi
1104 true); // create_native_compositor
1105 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_session_create");
1106
1107 // Needs to be done after session create call.
1108 ipc_compositor_init(icc, out_xcn);
1109
1110 icc->compositor_created = true;
1111
1112 return XRT_SUCCESS;
1113}
1114
1115xrt_result_t
1116ipc_client_create_system_compositor(struct ipc_connection *ipc_c,
1117 struct xrt_image_native_allocator *xina,
1118 struct xrt_device *xdev,
1119 struct xrt_system_compositor **out_xcs)
1120{
1121 struct ipc_client_compositor *c = U_TYPED_CALLOC(struct ipc_client_compositor);
1122
1123 c->system.create_native_compositor = ipc_syscomp_create_native_compositor;
1124 c->system.destroy = ipc_syscomp_destroy;
1125 c->ipc_c = ipc_c;
1126 c->xina = xina;
1127
1128
1129#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
1130 c->loopback_xina.images_allocate = ipc_compositor_images_allocate;
1131 c->loopback_xina.images_free = ipc_compositor_images_free;
1132 c->loopback_xina.destroy = ipc_compositor_images_destroy;
1133
1134 if (c->xina == NULL) {
1135 c->xina = &c->loopback_xina;
1136 }
1137#endif
1138
1139 // Fetch info from the system compositor.
1140 get_system_info(c, &c->system.info);
1141
1142 *out_xcs = &c->system;
1143
1144 return XRT_SUCCESS;
1145}