The open source OpenXR runtime
1// Copyright 2020, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Client side wrapper of compositor.
6 * @author Pete Black <pblack@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @ingroup ipc_client
9 */
10
11#include "xrt/xrt_device.h"
12#include "xrt/xrt_compositor.h"
13#include "xrt/xrt_defines.h"
14#include "xrt/xrt_config_os.h"
15
16
17#include "os/os_time.h"
18
19#include "util/u_misc.h"
20#include "util/u_wait.h"
21#include "util/u_handles.h"
22#include "util/u_trace_marker.h"
23#include "util/u_limited_unique_id.h"
24
25#include "shared/ipc_protocol.h"
26#include "client/ipc_client.h"
27#include "ipc_client_generated.h"
28
29#include <string.h>
30#include <stdio.h>
31#if !defined(XRT_OS_WINDOWS)
32#include <unistd.h>
33#include <sys/socket.h>
34#include <sys/un.h>
35#endif
36#include <errno.h>
37#include <assert.h>
38
39#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
40#include <unistd.h>
41#endif
42
43
44/*
45 *
46 * Internal structs and helpers.
47 *
48 */
49
50//! Define to test the loopback allocator.
51#undef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
52
53/*!
54 * Client proxy for an xrt_compositor_native implementation over IPC.
55 * @implements xrt_compositor_native
56 */
57struct ipc_client_compositor
58{
59 struct xrt_compositor_native base;
60
61 //! Should be turned into its own object.
62 struct xrt_system_compositor system;
63
64 struct ipc_connection *ipc_c;
65
66 //! Optional image allocator.
67 struct xrt_image_native_allocator *xina;
68
69 struct
70 {
71 //! Id that we are currently using for submitting layers.
72 uint32_t slot_id;
73
74 uint32_t layer_count;
75 } layers;
76
77 //! Has the native compositor been created, only supports one for now.
78 bool compositor_created;
79
80 //! To get better wake up in wait frame.
81 struct os_precise_sleeper sleeper;
82
83#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
84 //! To test image allocator.
85 struct xrt_image_native_allocator loopback_xina;
86#endif
87};
88
89/*!
90 * Client proxy for an xrt_swapchain_native implementation over IPC.
91 * @implements xrt_swapchain_native
92 */
93struct ipc_client_swapchain
94{
95 struct xrt_swapchain_native base;
96
97 struct ipc_client_compositor *icc;
98
99 uint32_t id;
100};
101
102/*!
103 * Client proxy for an xrt_compositor_semaphore implementation over IPC.
104 * @implements xrt_compositor_semaphore
105 */
106struct ipc_client_compositor_semaphore
107{
108 struct xrt_compositor_semaphore base;
109
110 struct ipc_client_compositor *icc;
111
112 uint32_t id;
113};
114
115
116/*
117 *
118 * Helper functions.
119 *
120 */
121
122static inline struct ipc_client_compositor *
123ipc_client_compositor(struct xrt_compositor *xc)
124{
125 return (struct ipc_client_compositor *)xc;
126}
127
128static inline struct ipc_client_swapchain *
129ipc_client_swapchain(struct xrt_swapchain *xs)
130{
131 return (struct ipc_client_swapchain *)xs;
132}
133
134static inline struct ipc_client_compositor_semaphore *
135ipc_client_compositor_semaphore(struct xrt_compositor_semaphore *xcsem)
136{
137 return (struct ipc_client_compositor_semaphore *)xcsem;
138}
139
140
141/*
142 *
143 * Misc functions
144 *
145 */
146
147static xrt_result_t
148get_info(struct xrt_compositor *xc, struct xrt_compositor_info *out_info)
149{
150 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
151
152 xrt_result_t xret = ipc_call_compositor_get_info(icc->ipc_c, out_info);
153 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_info");
154}
155
156static xrt_result_t
157get_system_info(struct ipc_client_compositor *icc, struct xrt_system_compositor_info *out_info)
158{
159 xrt_result_t xret = ipc_call_system_compositor_get_info(icc->ipc_c, out_info);
160 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_system_compositor_get_info");
161}
162
163
164/*
165 *
166 * Swapchain.
167 *
168 */
169
170static void
171ipc_compositor_swapchain_destroy(struct xrt_swapchain *xsc)
172{
173 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
174 struct ipc_client_compositor *icc = ics->icc;
175 xrt_result_t xret;
176
177 xret = ipc_call_swapchain_destroy(icc->ipc_c, ics->id);
178
179 // Can't return anything here, just continue.
180 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_semaphore_destroy");
181
182 free(xsc);
183}
184
185static xrt_result_t
186ipc_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index)
187{
188 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
189 struct ipc_client_compositor *icc = ics->icc;
190 xrt_result_t xret;
191
192 xret = ipc_call_swapchain_wait_image(icc->ipc_c, ics->id, timeout_ns, index);
193 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_wait_image");
194}
195
196static xrt_result_t
197ipc_compositor_swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *out_index)
198{
199 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
200 struct ipc_client_compositor *icc = ics->icc;
201 xrt_result_t xret;
202
203 xret = ipc_call_swapchain_acquire_image(icc->ipc_c, ics->id, out_index);
204 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_acquire_image");
205}
206
207static xrt_result_t
208ipc_compositor_swapchain_release_image(struct xrt_swapchain *xsc, uint32_t index)
209{
210 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
211 struct ipc_client_compositor *icc = ics->icc;
212 xrt_result_t xret;
213
214 xret = ipc_call_swapchain_release_image(icc->ipc_c, ics->id, index);
215 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_release_image");
216}
217
218
219/*
220 *
221 * Compositor semaphore functions.
222 *
223 */
224
225static xrt_result_t
226ipc_client_compositor_semaphore_wait(struct xrt_compositor_semaphore *xcsem, uint64_t value, uint64_t timeout_ns)
227{
228 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
229 struct ipc_client_compositor *icc = iccs->icc;
230
231 IPC_ERROR(icc->ipc_c, "Cannot call wait on client side!");
232
233 return XRT_ERROR_IPC_FAILURE;
234}
235
236static void
237ipc_client_compositor_semaphore_destroy(struct xrt_compositor_semaphore *xcsem)
238{
239 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
240 struct ipc_client_compositor *icc = iccs->icc;
241 xrt_result_t xret;
242
243 xret = ipc_call_compositor_semaphore_destroy(icc->ipc_c, iccs->id);
244
245 // Can't return anything here, just continue.
246 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_semaphore_destroy");
247
248 free(iccs);
249}
250
251
252/*
253 *
254 * Compositor functions.
255 *
256 */
257
258static xrt_result_t
259ipc_compositor_get_swapchain_create_properties(struct xrt_compositor *xc,
260 const struct xrt_swapchain_create_info *info,
261 struct xrt_swapchain_create_properties *xsccp)
262{
263 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
264 xrt_result_t xret;
265
266 xret = ipc_call_swapchain_get_properties(icc->ipc_c, info, xsccp);
267 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_swapchain_get_properties");
268}
269
270static xrt_result_t
271swapchain_server_create(struct ipc_client_compositor *icc,
272 const struct xrt_swapchain_create_info *info,
273 struct xrt_swapchain **out_xsc)
274{
275 xrt_graphics_buffer_handle_t remote_handles[XRT_MAX_SWAPCHAIN_IMAGES] = {0};
276 xrt_result_t xret;
277 uint32_t handle;
278 uint32_t image_count;
279 uint64_t size;
280 bool use_dedicated_allocation;
281
282 xret = ipc_call_swapchain_create( //
283 icc->ipc_c, // connection
284 info, // in
285 &handle, // out
286 &image_count, // out
287 &size, // out
288 &use_dedicated_allocation, // out
289 remote_handles, // handles
290 XRT_MAX_SWAPCHAIN_IMAGES); // handles
291 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_create");
292
293 struct ipc_client_swapchain *ics = U_TYPED_CALLOC(struct ipc_client_swapchain);
294 ics->base.base.image_count = image_count;
295 ics->base.base.wait_image = ipc_compositor_swapchain_wait_image;
296 ics->base.base.acquire_image = ipc_compositor_swapchain_acquire_image;
297 ics->base.base.release_image = ipc_compositor_swapchain_release_image;
298 ics->base.base.destroy = ipc_compositor_swapchain_destroy;
299 ics->base.base.reference.count = 1;
300 ics->base.limited_unique_id = u_limited_unique_id_get();
301 ics->icc = icc;
302 ics->id = handle;
303
304 for (uint32_t i = 0; i < image_count; i++) {
305 ics->base.images[i].handle = remote_handles[i];
306 ics->base.images[i].size = size;
307 ics->base.images[i].use_dedicated_allocation = use_dedicated_allocation;
308 }
309
310 *out_xsc = &ics->base.base;
311
312 return XRT_SUCCESS;
313}
314
315static xrt_result_t
316swapchain_server_import(struct ipc_client_compositor *icc,
317 const struct xrt_swapchain_create_info *info,
318 struct xrt_image_native *native_images,
319 uint32_t image_count,
320 struct xrt_swapchain **out_xsc)
321{
322 struct ipc_arg_swapchain_from_native args = {0};
323 xrt_graphics_buffer_handle_t handles[XRT_MAX_SWAPCHAIN_IMAGES] = {0};
324 xrt_result_t xret;
325 uint32_t id = 0;
326
327 for (uint32_t i = 0; i < image_count; i++) {
328 handles[i] = native_images[i].handle;
329 args.sizes[i] = native_images[i].size;
330
331#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
332 // DXGI handles need to be dealt with differently, they are identified
333 // by having their lower bit set to 1 during transfer
334 if (native_images[i].is_dxgi_handle) {
335 handles[i] = (void *)((size_t)handles[i] | 1);
336 }
337#endif
338 }
339
340 // This does not consume the handles, it copies them.
341 xret = ipc_call_swapchain_import( //
342 icc->ipc_c, // connection
343 info, // in
344 &args, // in
345 handles, // handles
346 image_count, // handles
347 &id); // out
348 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_create");
349
350 struct ipc_client_swapchain *ics = U_TYPED_CALLOC(struct ipc_client_swapchain);
351 ics->base.base.image_count = image_count;
352 ics->base.base.wait_image = ipc_compositor_swapchain_wait_image;
353 ics->base.base.acquire_image = ipc_compositor_swapchain_acquire_image;
354 ics->base.base.release_image = ipc_compositor_swapchain_release_image;
355 ics->base.base.destroy = ipc_compositor_swapchain_destroy;
356 ics->base.base.reference.count = 1;
357 ics->base.limited_unique_id = u_limited_unique_id_get();
358 ics->icc = icc;
359 ics->id = id;
360
361 // The handles were copied in the IPC call so we can reuse them here.
362 for (uint32_t i = 0; i < image_count; i++) {
363 ics->base.images[i] = native_images[i];
364 }
365
366 *out_xsc = &ics->base.base;
367
368 return XRT_SUCCESS;
369}
370
371static xrt_result_t
372swapchain_allocator_create(struct ipc_client_compositor *icc,
373 struct xrt_image_native_allocator *xina,
374 const struct xrt_swapchain_create_info *info,
375 struct xrt_swapchain **out_xsc)
376{
377 struct xrt_swapchain_create_properties xsccp = {0};
378 struct xrt_image_native *images = NULL;
379 xrt_result_t xret;
380
381 // Get any needed properties.
382 xret = ipc_compositor_get_swapchain_create_properties(&icc->base.base, info, &xsccp);
383 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_compositor_get_swapchain_create_properties");
384
385 // Alloc the array of structs for the images.
386 images = U_TYPED_ARRAY_CALLOC(struct xrt_image_native, xsccp.image_count);
387
388 // Now allocate the images themselves
389 xret = xrt_images_allocate(xina, info, xsccp.image_count, images);
390 IPC_CHK_WITH_GOTO(icc->ipc_c, xret, "xrt_images_allocate", out_free);
391
392 /*
393 * The import function takes ownership of the handles,
394 * we do not need free them if the call succeeds.
395 */
396 xret = swapchain_server_import(icc, info, images, xsccp.image_count, out_xsc);
397 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "swapchain_server_import");
398 if (xret != XRT_SUCCESS) {
399 xrt_images_free(xina, xsccp.image_count, images);
400 }
401
402out_free:
403 free(images);
404
405 return xret;
406}
407
408static xrt_result_t
409ipc_compositor_swapchain_create(struct xrt_compositor *xc,
410 const struct xrt_swapchain_create_info *info,
411 struct xrt_swapchain **out_xsc)
412{
413 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
414 struct xrt_image_native_allocator *xina = icc->xina;
415 xrt_result_t xret;
416
417 if (xina == NULL) {
418 xret = swapchain_server_create(icc, info, out_xsc);
419 } else {
420 xret = swapchain_allocator_create(icc, xina, info, out_xsc);
421 }
422
423 // Errors already printed.
424 return xret;
425}
426
427static xrt_result_t
428ipc_compositor_create_passthrough(struct xrt_compositor *xc, const struct xrt_passthrough_create_info *info)
429{
430 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
431 xrt_result_t xret;
432
433 xret = ipc_call_compositor_create_passthrough(icc->ipc_c, info);
434 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_create_passthrough");
435}
436
437static xrt_result_t
438ipc_compositor_create_passthrough_layer(struct xrt_compositor *xc, const struct xrt_passthrough_layer_create_info *info)
439{
440 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
441 xrt_result_t xret;
442
443 xret = ipc_call_compositor_create_passthrough_layer(icc->ipc_c, info);
444 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_create_passthrough_layer");
445}
446
447static xrt_result_t
448ipc_compositor_destroy_passthrough(struct xrt_compositor *xc)
449{
450 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
451 xrt_result_t xret;
452
453 xret = ipc_call_compositor_destroy_passthrough(icc->ipc_c);
454 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_destroy_passthrough");
455}
456
457static xrt_result_t
458ipc_compositor_swapchain_import(struct xrt_compositor *xc,
459 const struct xrt_swapchain_create_info *info,
460 struct xrt_image_native *native_images,
461 uint32_t image_count,
462 struct xrt_swapchain **out_xsc)
463{
464 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
465
466 // Errors already printed.
467 return swapchain_server_import(icc, info, native_images, image_count, out_xsc);
468}
469
470static xrt_result_t
471ipc_compositor_semaphore_create(struct xrt_compositor *xc,
472 xrt_graphics_sync_handle_t *out_handle,
473 struct xrt_compositor_semaphore **out_xcsem)
474{
475 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
476 xrt_graphics_sync_handle_t handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
477 xrt_result_t xret;
478 uint32_t id = 0;
479
480 xret = ipc_call_compositor_semaphore_create(icc->ipc_c, &id, &handle, 1);
481 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_semaphore_create");
482
483 struct ipc_client_compositor_semaphore *iccs = U_TYPED_CALLOC(struct ipc_client_compositor_semaphore);
484 iccs->base.reference.count = 1;
485 iccs->base.wait = ipc_client_compositor_semaphore_wait;
486 iccs->base.destroy = ipc_client_compositor_semaphore_destroy;
487 iccs->id = id;
488 iccs->icc = icc;
489
490 *out_handle = handle;
491 *out_xcsem = &iccs->base;
492
493 return XRT_SUCCESS;
494}
495
496static xrt_result_t
497ipc_compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info)
498{
499 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
500 xrt_result_t xret;
501
502 IPC_TRACE(icc->ipc_c, "Compositor begin session.");
503
504 xret = ipc_call_session_begin(icc->ipc_c);
505 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_session_begin");
506}
507
508static xrt_result_t
509ipc_compositor_end_session(struct xrt_compositor *xc)
510{
511 IPC_TRACE_MARKER();
512
513 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
514 xrt_result_t xret;
515
516 IPC_TRACE(icc->ipc_c, "Compositor end session.");
517
518 xret = ipc_call_session_end(icc->ipc_c);
519 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_session_end");
520}
521
522static xrt_result_t
523ipc_compositor_wait_frame(struct xrt_compositor *xc,
524 int64_t *out_frame_id,
525 int64_t *out_predicted_display_time,
526 int64_t *out_predicted_display_period)
527{
528 IPC_TRACE_MARKER();
529 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
530 xrt_result_t xret;
531
532 int64_t frame_id = -1;
533 int64_t wake_up_time_ns = 0;
534 int64_t predicted_display_time = 0;
535 int64_t predicted_display_period = 0;
536
537 xret = ipc_call_compositor_predict_frame( //
538 icc->ipc_c, // Connection
539 &frame_id, // Frame id
540 &wake_up_time_ns, // When we should wake up
541 &predicted_display_time, // Display time
542 &predicted_display_period); // Current period
543 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_predict_frame");
544
545 // Wait until the given wake up time.
546 u_wait_until(&icc->sleeper, wake_up_time_ns);
547
548 // Signal that we woke up.
549 xret = ipc_call_compositor_wait_woke(icc->ipc_c, frame_id);
550 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_compositor_wait_woke");
551
552 // Only write arguments once we have fully waited.
553 *out_frame_id = frame_id;
554 *out_predicted_display_time = predicted_display_time;
555 *out_predicted_display_period = predicted_display_period;
556
557 return xret;
558}
559
560static xrt_result_t
561ipc_compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id)
562{
563 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
564 xrt_result_t xret;
565
566 xret = ipc_call_compositor_begin_frame(icc->ipc_c, frame_id);
567 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_begin_frame");
568}
569
570static xrt_result_t
571ipc_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_data *data)
572{
573 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
574
575 struct ipc_shared_memory *ism = icc->ipc_c->ism;
576 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
577
578 slot->data = *data;
579
580 return XRT_SUCCESS;
581}
582
583static xrt_result_t
584ipc_compositor_layer_projection(struct xrt_compositor *xc,
585 struct xrt_device *xdev,
586 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
587 const struct xrt_layer_data *data)
588{
589 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
590
591 assert(data->type == XRT_LAYER_PROJECTION);
592
593 struct ipc_shared_memory *ism = icc->ipc_c->ism;
594 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
595 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
596 layer->xdev_id = 0; //! @todo Real id.
597 layer->data = *data;
598 for (uint32_t i = 0; i < data->view_count; ++i) {
599 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc[i]);
600 layer->swapchain_ids[i] = ics->id;
601 }
602 // Increment the number of layers.
603 icc->layers.layer_count++;
604
605 return XRT_SUCCESS;
606}
607
608static xrt_result_t
609ipc_compositor_layer_projection_depth(struct xrt_compositor *xc,
610 struct xrt_device *xdev,
611 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
612 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS],
613 const struct xrt_layer_data *data)
614{
615 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
616
617 assert(data->type == XRT_LAYER_PROJECTION_DEPTH);
618
619 struct ipc_shared_memory *ism = icc->ipc_c->ism;
620 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
621 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
622 struct ipc_client_swapchain *xscn[XRT_MAX_VIEWS];
623 struct ipc_client_swapchain *d_xscn[XRT_MAX_VIEWS];
624 for (uint32_t i = 0; i < data->view_count; ++i) {
625 xscn[i] = ipc_client_swapchain(xsc[i]);
626 d_xscn[i] = ipc_client_swapchain(d_xsc[i]);
627
628 layer->swapchain_ids[i] = xscn[i]->id;
629 layer->swapchain_ids[i + data->view_count] = d_xscn[i]->id;
630 }
631
632 layer->xdev_id = 0; //! @todo Real id.
633
634 layer->data = *data;
635
636 // Increment the number of layers.
637 icc->layers.layer_count++;
638
639 return XRT_SUCCESS;
640}
641
642static xrt_result_t
643handle_layer(struct xrt_compositor *xc,
644 struct xrt_device *xdev,
645 struct xrt_swapchain *xsc,
646 const struct xrt_layer_data *data,
647 enum xrt_layer_type type)
648{
649 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
650
651 assert(data->type == type);
652
653 struct ipc_shared_memory *ism = icc->ipc_c->ism;
654 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
655 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
656 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc);
657
658 layer->xdev_id = 0; //! @todo Real id.
659 layer->swapchain_ids[0] = ics->id;
660 layer->swapchain_ids[1] = -1;
661 layer->swapchain_ids[2] = -1;
662 layer->swapchain_ids[3] = -1;
663 layer->data = *data;
664
665 // Increment the number of layers.
666 icc->layers.layer_count++;
667
668 return XRT_SUCCESS;
669}
670
671static xrt_result_t
672ipc_compositor_layer_quad(struct xrt_compositor *xc,
673 struct xrt_device *xdev,
674 struct xrt_swapchain *xsc,
675 const struct xrt_layer_data *data)
676{
677 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_QUAD);
678}
679
680static xrt_result_t
681ipc_compositor_layer_cube(struct xrt_compositor *xc,
682 struct xrt_device *xdev,
683 struct xrt_swapchain *xsc,
684 const struct xrt_layer_data *data)
685{
686 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_CUBE);
687}
688
689static xrt_result_t
690ipc_compositor_layer_cylinder(struct xrt_compositor *xc,
691 struct xrt_device *xdev,
692 struct xrt_swapchain *xsc,
693 const struct xrt_layer_data *data)
694{
695 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_CYLINDER);
696}
697
698static xrt_result_t
699ipc_compositor_layer_equirect1(struct xrt_compositor *xc,
700 struct xrt_device *xdev,
701 struct xrt_swapchain *xsc,
702 const struct xrt_layer_data *data)
703{
704 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_EQUIRECT1);
705}
706
707static xrt_result_t
708ipc_compositor_layer_equirect2(struct xrt_compositor *xc,
709 struct xrt_device *xdev,
710 struct xrt_swapchain *xsc,
711 const struct xrt_layer_data *data)
712{
713 return handle_layer(xc, xdev, xsc, data, XRT_LAYER_EQUIRECT2);
714}
715
716static xrt_result_t
717ipc_compositor_layer_passthrough(struct xrt_compositor *xc, struct xrt_device *xdev, const struct xrt_layer_data *data)
718{
719 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
720
721 assert(data->type == XRT_LAYER_PASSTHROUGH);
722
723 struct ipc_shared_memory *ism = icc->ipc_c->ism;
724 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
725 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
726
727 layer->xdev_id = 0; //! @todo Real id.
728 layer->data = *data;
729
730 // Increment the number of layers.
731 icc->layers.layer_count++;
732
733 return XRT_SUCCESS;
734}
735
736static xrt_result_t
737ipc_compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle)
738{
739 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
740 xrt_result_t xret;
741
742 bool valid_sync = xrt_graphics_sync_handle_is_valid(sync_handle);
743
744 struct ipc_shared_memory *ism = icc->ipc_c->ism;
745 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
746
747 // Last bit of data to put in the shared memory area.
748 slot->layer_count = icc->layers.layer_count;
749
750 xret = ipc_call_compositor_layer_sync( //
751 icc->ipc_c, //
752 icc->layers.slot_id, //
753 &sync_handle, //
754 valid_sync ? 1 : 0, //
755 &icc->layers.slot_id); //
756
757 /*
758 * We are probably in a really bad state if we fail, at
759 * least print out the error and continue as best we can.
760 */
761 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_layer_sync_with_semaphore");
762
763 // Reset.
764 icc->layers.layer_count = 0;
765
766 // Need to consume this handle.
767 if (valid_sync) {
768 u_graphics_sync_unref(&sync_handle);
769 }
770
771 return xret;
772}
773
774static xrt_result_t
775ipc_compositor_layer_commit_with_semaphore(struct xrt_compositor *xc,
776 struct xrt_compositor_semaphore *xcsem,
777 uint64_t value)
778{
779 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
780 struct ipc_client_compositor_semaphore *iccs = ipc_client_compositor_semaphore(xcsem);
781 xrt_result_t xret;
782
783 struct ipc_shared_memory *ism = icc->ipc_c->ism;
784 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
785
786 // Last bit of data to put in the shared memory area.
787 slot->layer_count = icc->layers.layer_count;
788
789 xret = ipc_call_compositor_layer_sync_with_semaphore( //
790 icc->ipc_c, //
791 icc->layers.slot_id, //
792 iccs->id, //
793 value, //
794 &icc->layers.slot_id); //
795
796 /*
797 * We are probably in a really bad state if we fail, at
798 * least print out the error and continue as best we can.
799 */
800 IPC_CHK_ONLY_PRINT(icc->ipc_c, xret, "ipc_call_compositor_layer_sync_with_semaphore");
801
802 // Reset.
803 icc->layers.layer_count = 0;
804
805 return xret;
806}
807
808static xrt_result_t
809ipc_compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id)
810{
811 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
812 xrt_result_t xret;
813
814 xret = ipc_call_compositor_discard_frame(icc->ipc_c, frame_id);
815 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_discard_frame");
816}
817
818static xrt_result_t
819ipc_compositor_set_performance_level(struct xrt_compositor *xc,
820 enum xrt_perf_domain domain,
821 enum xrt_perf_set_level level)
822{
823 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
824 xrt_result_t xret;
825 xret = ipc_call_compositor_set_performance_level(icc->ipc_c, domain, level);
826 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_set_performance_level");
827}
828
829static xrt_result_t
830ipc_compositor_set_thread_hint(struct xrt_compositor *xc, enum xrt_thread_hint hint, uint32_t thread_id)
831{
832 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
833 xrt_result_t xret;
834
835 xret = ipc_call_compositor_set_thread_hint(icc->ipc_c, hint, thread_id);
836 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_set_thread_hint");
837}
838
839static xrt_result_t
840ipc_compositor_get_display_refresh_rate(struct xrt_compositor *xc, float *out_display_refresh_rate_hz)
841{
842 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
843 xrt_result_t xret;
844
845 xret = ipc_call_compositor_get_display_refresh_rate(icc->ipc_c, out_display_refresh_rate_hz);
846 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_display_refresh_rate");
847}
848
849static xrt_result_t
850ipc_compositor_request_display_refresh_rate(struct xrt_compositor *xc, float display_refresh_rate_hz)
851{
852 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
853 xrt_result_t xret;
854
855 xret = ipc_call_compositor_request_display_refresh_rate(icc->ipc_c, display_refresh_rate_hz);
856 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_request_display_refresh_rate");
857}
858
859static xrt_result_t
860ipc_compositor_get_reference_bounds_rect(struct xrt_compositor *xc,
861 enum xrt_reference_space_type reference_space_type,
862 struct xrt_vec2 *bounds)
863{
864 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
865 xrt_result_t xret;
866
867 xret = ipc_call_compositor_get_reference_bounds_rect(icc->ipc_c, reference_space_type, bounds);
868 IPC_CHK_ALWAYS_RET(icc->ipc_c, xret, "ipc_call_compositor_get_reference_bounds_rect");
869}
870
871static void
872ipc_compositor_destroy(struct xrt_compositor *xc)
873{
874 struct ipc_client_compositor *icc = ipc_client_compositor(xc);
875
876 assert(icc->compositor_created);
877
878 os_precise_sleeper_deinit(&icc->sleeper);
879
880 icc->compositor_created = false;
881}
882
883static void
884ipc_compositor_init(struct ipc_client_compositor *icc, struct xrt_compositor_native **out_xcn)
885{
886 icc->base.base.get_swapchain_create_properties = ipc_compositor_get_swapchain_create_properties;
887 icc->base.base.create_swapchain = ipc_compositor_swapchain_create;
888 icc->base.base.import_swapchain = ipc_compositor_swapchain_import;
889 icc->base.base.create_semaphore = ipc_compositor_semaphore_create;
890 icc->base.base.create_passthrough = ipc_compositor_create_passthrough;
891 icc->base.base.create_passthrough_layer = ipc_compositor_create_passthrough_layer;
892 icc->base.base.destroy_passthrough = ipc_compositor_destroy_passthrough;
893 icc->base.base.begin_session = ipc_compositor_begin_session;
894 icc->base.base.end_session = ipc_compositor_end_session;
895 icc->base.base.wait_frame = ipc_compositor_wait_frame;
896 icc->base.base.begin_frame = ipc_compositor_begin_frame;
897 icc->base.base.discard_frame = ipc_compositor_discard_frame;
898 icc->base.base.layer_begin = ipc_compositor_layer_begin;
899 icc->base.base.layer_projection = ipc_compositor_layer_projection;
900 icc->base.base.layer_projection_depth = ipc_compositor_layer_projection_depth;
901 icc->base.base.layer_quad = ipc_compositor_layer_quad;
902 icc->base.base.layer_cube = ipc_compositor_layer_cube;
903 icc->base.base.layer_cylinder = ipc_compositor_layer_cylinder;
904 icc->base.base.layer_equirect1 = ipc_compositor_layer_equirect1;
905 icc->base.base.layer_equirect2 = ipc_compositor_layer_equirect2;
906 icc->base.base.layer_passthrough = ipc_compositor_layer_passthrough;
907 icc->base.base.layer_commit = ipc_compositor_layer_commit;
908 icc->base.base.layer_commit_with_semaphore = ipc_compositor_layer_commit_with_semaphore;
909 icc->base.base.destroy = ipc_compositor_destroy;
910 icc->base.base.set_thread_hint = ipc_compositor_set_thread_hint;
911 icc->base.base.get_display_refresh_rate = ipc_compositor_get_display_refresh_rate;
912 icc->base.base.request_display_refresh_rate = ipc_compositor_request_display_refresh_rate;
913 icc->base.base.set_performance_level = ipc_compositor_set_performance_level;
914 icc->base.base.get_reference_bounds_rect = ipc_compositor_get_reference_bounds_rect;
915
916 // Using in wait frame.
917 os_precise_sleeper_init(&icc->sleeper);
918
919 // Fetch info from the compositor, among it the format format list.
920 get_info(&(icc->base.base), &icc->base.base.info);
921
922 *out_xcn = &icc->base;
923}
924
925
926/*
927 *
928 * Loopback image allocator.
929 *
930 */
931
932#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
933static inline xrt_result_t
934ipc_compositor_images_allocate(struct xrt_image_native_allocator *xina,
935 const struct xrt_swapchain_create_info *xsci,
936 size_t in_image_count,
937 struct xrt_image_native *out_images)
938{
939 struct ipc_client_compositor *icc = container_of(xina, struct ipc_client_compositor, loopback_xina);
940
941 int remote_fds[IPC_MAX_SWAPCHAIN_FDS] = {0};
942 xrt_result_t xret;
943 uint32_t image_count;
944 uint32_t handle;
945 uint64_t size;
946
947 for (size_t i = 0; i < ARRAY_SIZE(remote_fds); i++) {
948 remote_fds[i] = -1;
949 }
950
951 for (size_t i = 0; i < in_image_count; i++) {
952 out_images[i].fd = -1;
953 out_images[i].size = 0;
954 }
955
956 xret = ipc_call_swapchain_create( //
957 icc->ipc_c, // connection
958 xsci, // in
959 &handle, // out
960 &image_count, // out
961 &size, // out
962 remote_fds, // fds
963 IPC_MAX_SWAPCHAIN_FDS); // fds
964 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_swapchain_create");
965
966 /*
967 * It's okay to destroy it immediately, the native handles are
968 * now owned by us and we keep the buffers alive that way.
969 */
970 xret = ipc_call_swapchain_destroy(icc->ipc_c, handle);
971 assert(xret == XRT_SUCCESS);
972
973 // Clumsy way of handling this.
974 if (image_count < in_image_count) {
975 for (uint32_t k = 0; k < image_count && k < in_image_count; k++) {
976 /*
977 * Overly-broad condition: we know that any fd not touched by
978 * ipc_call_swapchain_create will be -1.
979 */
980 if (remote_fds[k] >= 0) {
981 close(remote_fds[k]);
982 remote_fds[k] = -1;
983 }
984 }
985
986 return XRT_ERROR_IPC_FAILURE;
987 }
988
989 // Copy up to in_image_count, or image_count what ever is lowest.
990 uint32_t i = 0;
991 for (; i < image_count && i < in_image_count; i++) {
992 out_images[i].fd = remote_fds[i];
993 out_images[i].size = size;
994 }
995
996 // Close any fds we are not interested in.
997 for (; i < image_count; i++) {
998 /*
999 * Overly-broad condition: we know that any fd not touched by
1000 * ipc_call_swapchain_create will be -1.
1001 */
1002 if (remote_fds[i] >= 0) {
1003 close(remote_fds[i]);
1004 remote_fds[i] = -1;
1005 }
1006 }
1007
1008 return XRT_SUCCESS;
1009}
1010
1011static inline xrt_result_t
1012ipc_compositor_images_free(struct xrt_image_native_allocator *xina,
1013 size_t image_count,
1014 struct xrt_image_native *out_images)
1015{
1016 for (uint32_t i = 0; i < image_count; i++) {
1017 close(out_images[i].fd);
1018 out_images[i].fd = -1;
1019 out_images[i].size = 0;
1020 }
1021
1022 return XRT_SUCCESS;
1023}
1024
1025static inline void
1026ipc_compositor_images_destroy(struct xrt_image_native_allocator *xina)
1027{
1028 // Noop
1029}
1030#endif
1031
1032
1033/*
1034 *
1035 * System compositor.
1036 *
1037 */
1038
1039xrt_result_t
1040ipc_syscomp_create_native_compositor(struct xrt_system_compositor *xsc,
1041 const struct xrt_session_info *xsi,
1042 struct xrt_session_event_sink *xses,
1043 struct xrt_compositor_native **out_xcn)
1044{
1045 struct ipc_client_compositor *icc = container_of(xsc, struct ipc_client_compositor, system);
1046
1047 IPC_ERROR(icc->ipc_c, "This function shouldn't be called!");
1048
1049 return XRT_ERROR_IPC_FAILURE;
1050}
1051
1052void
1053ipc_syscomp_destroy(struct xrt_system_compositor *xsc)
1054{
1055 struct ipc_client_compositor *icc = container_of(xsc, struct ipc_client_compositor, system);
1056
1057 // Does null checking.
1058 xrt_images_destroy(&icc->xina);
1059
1060 //! @todo Implement
1061 IPC_TRACE(icc->ipc_c, "NOT IMPLEMENTED compositor destroy.");
1062
1063 free(icc);
1064}
1065
1066
1067/*
1068 *
1069 * 'Exported' functions.
1070 *
1071 */
1072
1073xrt_result_t
1074ipc_client_create_native_compositor(struct xrt_system_compositor *xsysc,
1075 const struct xrt_session_info *xsi,
1076 struct xrt_compositor_native **out_xcn)
1077{
1078 struct ipc_client_compositor *icc = container_of(xsysc, struct ipc_client_compositor, system);
1079 xrt_result_t xret;
1080
1081 if (icc->compositor_created) {
1082 return XRT_ERROR_MULTI_SESSION_NOT_IMPLEMENTED;
1083 }
1084
1085 /*
1086 * Needs to be done before init, we don't own the service side session
1087 * the session does. But we create it here in case any extra arguments
1088 * that only the compositor knows about needs to be sent.
1089 */
1090 xret = ipc_call_session_create( //
1091 icc->ipc_c, // ipc_c
1092 xsi, // xsi
1093 true); // create_native_compositor
1094 IPC_CHK_AND_RET(icc->ipc_c, xret, "ipc_call_session_create");
1095
1096 // Needs to be done after session create call.
1097 ipc_compositor_init(icc, out_xcn);
1098
1099 icc->compositor_created = true;
1100
1101 return XRT_SUCCESS;
1102}
1103
1104xrt_result_t
1105ipc_client_create_system_compositor(struct ipc_connection *ipc_c,
1106 struct xrt_image_native_allocator *xina,
1107 struct xrt_device *xdev,
1108 struct xrt_system_compositor **out_xcs)
1109{
1110 struct ipc_client_compositor *c = U_TYPED_CALLOC(struct ipc_client_compositor);
1111
1112 c->system.create_native_compositor = ipc_syscomp_create_native_compositor;
1113 c->system.destroy = ipc_syscomp_destroy;
1114 c->ipc_c = ipc_c;
1115 c->xina = xina;
1116
1117
1118#ifdef IPC_USE_LOOPBACK_IMAGE_ALLOCATOR
1119 c->loopback_xina.images_allocate = ipc_compositor_images_allocate;
1120 c->loopback_xina.images_free = ipc_compositor_images_free;
1121 c->loopback_xina.destroy = ipc_compositor_images_destroy;
1122
1123 if (c->xina == NULL) {
1124 c->xina = &c->loopback_xina;
1125 }
1126#endif
1127
1128 // Fetch info from the system compositor.
1129 get_system_info(c, &c->system.info);
1130
1131 *out_xcs = &c->system;
1132
1133 return XRT_SUCCESS;
1134}