The open source OpenXR runtime
1// Copyright 2019-2021, Collabora, Ltd.
2// Copyright 2024-2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Multi client wrapper compositor.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @ingroup comp_multi
10 */
11
12#include "util/u_logging.h"
13#include "xrt/xrt_session.h"
14
15#include "os/os_time.h"
16
17#include "util/u_var.h"
18#include "util/u_wait.h"
19#include "util/u_misc.h"
20#include "util/u_time.h"
21#include "util/u_debug.h"
22#include "util/u_handles.h"
23#include "util/u_trace_marker.h"
24#include "util/u_distortion_mesh.h"
25
26#include "multi/comp_multi_private.h"
27
28#include <math.h>
29#include <stdio.h>
30#include <assert.h>
31#include <stdarg.h>
32#include <stdlib.h>
33#include <string.h>
34
35#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
36#include <unistd.h>
37#endif
38
39#ifdef XRT_OS_ANDROID
40#include "android/android_custom_surface.h"
41#include "android/android_globals.h"
42#endif
43
44DEBUG_GET_ONCE_LOG_OPTION(app_frame_lag_level, "XRT_APP_FRAME_LAG_LOG_AS_LEVEL", U_LOGGING_DEBUG)
45#define LOG_FRAME_LAG(...) U_LOG_IFL(debug_get_log_option_app_frame_lag_level(), u_log_get_global_level(), __VA_ARGS__)
46
47/*
48 *
49 * Slot management functions.
50 *
51 */
52
53
54/*!
55 * Clear a slot, need to have the list_and_timing_lock held.
56 */
57static void
58slot_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *slot)
59{
60 if (slot->active) {
61 int64_t now_ns = os_monotonic_get_ns();
62 u_pa_retired(mc->upa, slot->data.frame_id, now_ns);
63 }
64
65 for (size_t i = 0; i < slot->layer_count; i++) {
66 for (size_t k = 0; k < ARRAY_SIZE(slot->layers[i].xscs); k++) {
67 xrt_swapchain_reference(&slot->layers[i].xscs[k], NULL);
68 }
69 }
70
71 U_ZERO(slot);
72 slot->data.frame_id = -1;
73}
74
75/*!
76 * Clear a slot, need to have the list_and_timing_lock held.
77 */
78static void
79slot_move_into_cleared(struct multi_layer_slot *dst, struct multi_layer_slot *src)
80{
81 assert(!dst->active);
82 assert(dst->data.frame_id == -1);
83
84 // All references are kept.
85 *dst = *src;
86
87 U_ZERO(src);
88 src->data.frame_id = -1;
89}
90
91/*!
92 * Move a slot into a cleared slot, must be cleared before.
93 */
94static void
95slot_move_and_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *dst, struct multi_layer_slot *src)
96{
97 slot_clear_locked(mc, dst);
98 slot_move_into_cleared(dst, src);
99}
100
101
102/*
103 *
104 * Event management functions.
105 *
106 */
107
108xrt_result_t
109multi_compositor_push_event(struct multi_compositor *mc, const union xrt_session_event *xse)
110{
111 // Dispatch to the current event sink.
112 return xrt_session_event_sink_push(mc->xses, xse);
113}
114
115
116/*
117 *
118 * Wait helper thread.
119 *
120 */
121
122static bool
123is_pushed_or_waiting_locked(struct multi_compositor *mc)
124{
125 return mc->wait_thread.waiting || //
126 mc->wait_thread.xcf != NULL || //
127 mc->wait_thread.xcsem != NULL; //
128}
129
130static void
131wait_fence(struct multi_compositor *mc, struct xrt_compositor_fence **xcf_ptr)
132{
133 COMP_TRACE_MARKER();
134 xrt_result_t ret = XRT_SUCCESS;
135
136 // 100ms
137 int64_t timeout_ns = 100 * U_TIME_1MS_IN_NS;
138
139 do {
140 ret = xrt_compositor_fence_wait(*xcf_ptr, timeout_ns);
141 if (ret != XRT_TIMEOUT) {
142 break;
143 }
144
145 U_LOG_W("Waiting on client fence timed out > 100ms!");
146 } while (os_thread_helper_is_running(&mc->wait_thread.oth));
147
148 xrt_compositor_fence_destroy(xcf_ptr);
149
150 if (ret != XRT_SUCCESS) {
151 U_LOG_E("Fence waiting failed!");
152 }
153}
154
155static void
156wait_semaphore(struct multi_compositor *mc, struct xrt_compositor_semaphore **xcsem_ptr, uint64_t value)
157{
158 COMP_TRACE_MARKER();
159 xrt_result_t ret = XRT_SUCCESS;
160
161 // 100ms
162 int64_t timeout_ns = 100 * U_TIME_1MS_IN_NS;
163
164 do {
165 ret = xrt_compositor_semaphore_wait(*xcsem_ptr, value, timeout_ns);
166 if (ret != XRT_TIMEOUT) {
167 break;
168 }
169
170 U_LOG_W("Waiting on client semaphore value '%" PRIu64 "' timed out > 100ms!", value);
171 } while (os_thread_helper_is_running(&mc->wait_thread.oth));
172
173 xrt_compositor_semaphore_reference(xcsem_ptr, NULL);
174
175 if (ret != XRT_SUCCESS) {
176 U_LOG_E("Semaphore waiting failed!");
177 }
178}
179
180static void
181wait_for_scheduled_free(struct multi_compositor *mc)
182{
183 COMP_TRACE_MARKER();
184
185 os_mutex_lock(&mc->slot_lock);
186
187 struct multi_compositor volatile *v_mc = mc;
188
189 // Block here if the scheduled slot is not clear.
190 while (v_mc->scheduled.active) {
191 int64_t now_ns = os_monotonic_get_ns();
192
193 // This frame is for the next frame, drop the old one no matter what.
194 if (time_is_within_half_ms(mc->progress.data.display_time_ns, mc->slot_next_frame_display)) {
195 LOG_FRAME_LAG("%.3fms: Dropping old missed frame in favour for completed new frame",
196 time_ns_to_ms_f(now_ns));
197 break;
198 }
199
200 // Replace the scheduled frame if it's in the past.
201 if (v_mc->scheduled.data.display_time_ns < now_ns) {
202 U_LOG_T("%.3fms: Replacing frame for time in past in favour of completed new frame",
203 time_ns_to_ms_f(now_ns));
204 break;
205 }
206
207 U_LOG_D(
208 "Two frames have completed GPU work and are waiting to be displayed."
209 "\n\tnext frame: %fms (%" PRIu64
210 ") (next time for compositor to pick up frame)"
211 "\n\tprogress: %fms (%" PRIu64
212 ") (latest completed frame)"
213 "\n\tscheduled: %fms (%" PRIu64 ") (oldest waiting frame)",
214 time_ns_to_ms_f((int64_t)v_mc->slot_next_frame_display - now_ns), //
215 v_mc->slot_next_frame_display, //
216 time_ns_to_ms_f((int64_t)v_mc->progress.data.display_time_ns - now_ns), //
217 v_mc->progress.data.display_time_ns, //
218 time_ns_to_ms_f((int64_t)v_mc->scheduled.data.display_time_ns - now_ns), //
219 v_mc->scheduled.data.display_time_ns); //
220
221 os_mutex_unlock(&mc->slot_lock);
222
223 os_precise_sleeper_nanosleep(&mc->scheduled_sleeper, U_TIME_1MS_IN_NS);
224
225 os_mutex_lock(&mc->slot_lock);
226 }
227
228 os_mutex_unlock(&mc->slot_lock);
229
230 /*
231 * Need to take list_and_timing_lock before slot_lock because slot_lock
232 * is taken in multi_compositor_deliver_any_frames with list_and_timing_lock
233 * held to stop clients from going away.
234 */
235 os_mutex_lock(&mc->msc->list_and_timing_lock);
236 os_mutex_lock(&mc->slot_lock);
237 slot_move_and_clear_locked(mc, &mc->scheduled, &mc->progress);
238 os_mutex_unlock(&mc->slot_lock);
239 os_mutex_unlock(&mc->msc->list_and_timing_lock);
240}
241
242static void *
243run_func(void *ptr)
244{
245 struct multi_compositor *mc = (struct multi_compositor *)ptr;
246
247 U_TRACE_SET_THREAD_NAME("Multi Client Module: Waiter");
248 os_thread_helper_name(&mc->wait_thread.oth, "Multi Client Module: Waiter");
249
250 os_thread_helper_lock(&mc->wait_thread.oth);
251
252 // Signal the start function that we are enterting the loop.
253 mc->wait_thread.alive = true;
254 os_thread_helper_signal_locked(&mc->wait_thread.oth);
255
256 /*
257 * One can view the layer_commit function and the wait thread as a
258 * producer/consumer pair. This loop is the consumer side of that pair.
259 * We look for either a fence or a semaphore on each loop, if none are
260 * found we check if we are running then wait on the conditional
261 * variable once again waiting to be signalled by the producer.
262 */
263 while (os_thread_helper_is_running_locked(&mc->wait_thread.oth)) {
264 /*
265 * Here we wait for the either a semaphore or a fence, if
266 * neither has been set we wait/sleep here (again).
267 */
268 if (mc->wait_thread.xcsem == NULL && mc->wait_thread.xcf == NULL) {
269 // Spurious wakeups are handled below.
270 os_thread_helper_wait_locked(&mc->wait_thread.oth);
271 // Fall through here on stopping to clean up and outstanding waits.
272 }
273
274 int64_t frame_id = mc->wait_thread.frame_id;
275 struct xrt_compositor_fence *xcf = mc->wait_thread.xcf;
276 struct xrt_compositor_semaphore *xcsem = mc->wait_thread.xcsem; // No need to ref, a move.
277 uint64_t value = mc->wait_thread.value;
278
279 // Ok to clear these on spurious wakeup as they are empty then anyways.
280 mc->wait_thread.frame_id = 0;
281 mc->wait_thread.xcf = NULL;
282 mc->wait_thread.xcsem = NULL;
283 mc->wait_thread.value = 0;
284
285 // We are being stopped, or a spurious wakeup, loop back and check running.
286 if (xcf == NULL && xcsem == NULL) {
287 continue;
288 }
289
290 // We now know that we should wait.
291 mc->wait_thread.waiting = true;
292
293 os_thread_helper_unlock(&mc->wait_thread.oth);
294
295 if (xcsem != NULL) {
296 wait_semaphore(mc, &xcsem, value);
297 }
298 if (xcf != NULL) {
299 wait_fence(mc, &xcf);
300 }
301
302 // Sample time outside of lock.
303 int64_t now_ns = os_monotonic_get_ns();
304
305 os_mutex_lock(&mc->msc->list_and_timing_lock);
306 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns);
307 os_mutex_unlock(&mc->msc->list_and_timing_lock);
308
309 // Wait for the delivery slot.
310 wait_for_scheduled_free(mc);
311
312 os_thread_helper_lock(&mc->wait_thread.oth);
313
314 /*
315 * Finally no longer waiting, this must be done after
316 * wait_for_scheduled_free because it moves the slots/layers
317 * from progress to scheduled to be picked up by the compositor.
318 */
319 mc->wait_thread.waiting = false;
320
321 if (mc->wait_thread.blocked) {
322 // Release one thread
323 mc->wait_thread.blocked = false;
324 os_thread_helper_signal_locked(&mc->wait_thread.oth);
325 }
326 }
327
328 os_thread_helper_unlock(&mc->wait_thread.oth);
329
330 return NULL;
331}
332
333static void
334wait_for_wait_thread_locked(struct multi_compositor *mc)
335{
336 // Should we wait for the last frame.
337 if (is_pushed_or_waiting_locked(mc)) {
338 COMP_TRACE_IDENT(blocked);
339
340 // There should only be one thread entering here.
341 assert(mc->wait_thread.blocked == false);
342
343 // OK, wait until the wait thread releases us by setting blocked to false
344 mc->wait_thread.blocked = true;
345 while (mc->wait_thread.blocked) {
346 os_thread_helper_wait_locked(&mc->wait_thread.oth);
347 }
348 }
349}
350
351static void
352wait_for_wait_thread(struct multi_compositor *mc)
353{
354 os_thread_helper_lock(&mc->wait_thread.oth);
355
356 wait_for_wait_thread_locked(mc);
357
358 os_thread_helper_unlock(&mc->wait_thread.oth);
359}
360
361static void
362push_fence_to_wait_thread(struct multi_compositor *mc, int64_t frame_id, struct xrt_compositor_fence *xcf)
363{
364 os_thread_helper_lock(&mc->wait_thread.oth);
365
366 // The function begin_layer should have waited, but just in case.
367 assert(!mc->wait_thread.waiting);
368 wait_for_wait_thread_locked(mc);
369
370 assert(mc->wait_thread.xcf == NULL);
371
372 mc->wait_thread.frame_id = frame_id;
373 mc->wait_thread.xcf = xcf;
374
375 os_thread_helper_signal_locked(&mc->wait_thread.oth);
376
377 os_thread_helper_unlock(&mc->wait_thread.oth);
378}
379
380static void
381push_semaphore_to_wait_thread(struct multi_compositor *mc,
382 int64_t frame_id,
383 struct xrt_compositor_semaphore *xcsem,
384 uint64_t value)
385{
386 os_thread_helper_lock(&mc->wait_thread.oth);
387
388 // The function begin_layer should have waited, but just in case.
389 assert(!mc->wait_thread.waiting);
390 wait_for_wait_thread_locked(mc);
391
392 assert(mc->wait_thread.xcsem == NULL);
393
394 mc->wait_thread.frame_id = frame_id;
395 xrt_compositor_semaphore_reference(&mc->wait_thread.xcsem, xcsem);
396 mc->wait_thread.value = value;
397
398 os_thread_helper_signal_locked(&mc->wait_thread.oth);
399
400 os_thread_helper_unlock(&mc->wait_thread.oth);
401}
402
403
404/*
405 *
406 * Compositor functions.
407 *
408 */
409
410static xrt_result_t
411multi_compositor_get_swapchain_create_properties(struct xrt_compositor *xc,
412 const struct xrt_swapchain_create_info *info,
413 struct xrt_swapchain_create_properties *xsccp)
414{
415 COMP_TRACE_MARKER();
416
417 struct multi_compositor *mc = multi_compositor(xc);
418
419 return xrt_comp_get_swapchain_create_properties(&mc->msc->xcn->base, info, xsccp);
420}
421
422static xrt_result_t
423multi_compositor_create_swapchain(struct xrt_compositor *xc,
424 const struct xrt_swapchain_create_info *info,
425 struct xrt_swapchain **out_xsc)
426{
427 COMP_TRACE_MARKER();
428
429 struct multi_compositor *mc = multi_compositor(xc);
430
431 return xrt_comp_create_swapchain(&mc->msc->xcn->base, info, out_xsc);
432}
433
434static xrt_result_t
435multi_compositor_import_swapchain(struct xrt_compositor *xc,
436 const struct xrt_swapchain_create_info *info,
437 struct xrt_image_native *native_images,
438 uint32_t image_count,
439 struct xrt_swapchain **out_xsc)
440{
441 COMP_TRACE_MARKER();
442
443 struct multi_compositor *mc = multi_compositor(xc);
444
445 return xrt_comp_import_swapchain(&mc->msc->xcn->base, info, native_images, image_count, out_xsc);
446}
447
448static xrt_result_t
449multi_compositor_import_fence(struct xrt_compositor *xc,
450 xrt_graphics_sync_handle_t handle,
451 struct xrt_compositor_fence **out_xcf)
452{
453 COMP_TRACE_MARKER();
454
455 struct multi_compositor *mc = multi_compositor(xc);
456
457 return xrt_comp_import_fence(&mc->msc->xcn->base, handle, out_xcf);
458}
459
460static xrt_result_t
461multi_compositor_create_semaphore(struct xrt_compositor *xc,
462 xrt_graphics_sync_handle_t *out_handle,
463 struct xrt_compositor_semaphore **out_xcsem)
464{
465 COMP_TRACE_MARKER();
466
467 struct multi_compositor *mc = multi_compositor(xc);
468
469 // We don't wrap the semaphore and it's safe to pass it out directly.
470 return xrt_comp_create_semaphore(&mc->msc->xcn->base, out_handle, out_xcsem);
471}
472
473static xrt_result_t
474multi_compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info)
475{
476 COMP_TRACE_MARKER();
477
478 struct multi_compositor *mc = multi_compositor(xc);
479
480 assert(!mc->state.session_active);
481 if (!mc->state.session_active) {
482 multi_system_compositor_update_session_status(mc->msc, true);
483 mc->state.session_active = true;
484 }
485
486 return XRT_SUCCESS;
487}
488
489static xrt_result_t
490multi_compositor_end_session(struct xrt_compositor *xc)
491{
492 COMP_TRACE_MARKER();
493
494 struct multi_compositor *mc = multi_compositor(xc);
495
496 assert(mc->state.session_active);
497 if (mc->state.session_active) {
498 multi_system_compositor_update_session_status(mc->msc, false);
499 mc->state.session_active = false;
500 }
501
502 return XRT_SUCCESS;
503}
504
505static xrt_result_t
506multi_compositor_predict_frame(struct xrt_compositor *xc,
507 int64_t *out_frame_id,
508 int64_t *out_wake_time_ns,
509 int64_t *out_predicted_gpu_time_ns,
510 int64_t *out_predicted_display_time_ns,
511 int64_t *out_predicted_display_period_ns)
512{
513 COMP_TRACE_MARKER();
514
515 struct multi_compositor *mc = multi_compositor(xc);
516 int64_t now_ns = os_monotonic_get_ns();
517 os_mutex_lock(&mc->msc->list_and_timing_lock);
518
519 u_pa_predict( //
520 mc->upa, //
521 now_ns, //
522 out_frame_id, //
523 out_wake_time_ns, //
524 out_predicted_display_time_ns, //
525 out_predicted_display_period_ns); //
526
527 os_mutex_unlock(&mc->msc->list_and_timing_lock);
528
529 *out_predicted_gpu_time_ns = 0;
530
531 return XRT_SUCCESS;
532}
533
534static xrt_result_t
535multi_compositor_mark_frame(struct xrt_compositor *xc,
536 int64_t frame_id,
537 enum xrt_compositor_frame_point point,
538 int64_t when_ns)
539{
540 COMP_TRACE_MARKER();
541
542 struct multi_compositor *mc = multi_compositor(xc);
543
544 int64_t now_ns = os_monotonic_get_ns();
545
546 switch (point) {
547 case XRT_COMPOSITOR_FRAME_POINT_WOKE:
548 os_mutex_lock(&mc->msc->list_and_timing_lock);
549 u_pa_mark_point(mc->upa, frame_id, U_TIMING_POINT_WAKE_UP, now_ns);
550 os_mutex_unlock(&mc->msc->list_and_timing_lock);
551 break;
552 default: assert(false);
553 }
554
555 return XRT_SUCCESS;
556}
557
558static xrt_result_t
559multi_compositor_wait_frame(struct xrt_compositor *xc,
560 int64_t *out_frame_id,
561 int64_t *out_predicted_display_time_ns,
562 int64_t *out_predicted_display_period_ns)
563{
564 COMP_TRACE_MARKER();
565
566 struct multi_compositor *mc = multi_compositor(xc);
567
568 int64_t frame_id = -1;
569 int64_t wake_up_time_ns = 0;
570 int64_t predicted_gpu_time_ns = 0;
571
572 xrt_comp_predict_frame( //
573 xc, //
574 &frame_id, //
575 &wake_up_time_ns, //
576 &predicted_gpu_time_ns, //
577 out_predicted_display_time_ns, //
578 out_predicted_display_period_ns); //
579
580 // Wait until the given wake up time.
581 u_wait_until(&mc->frame_sleeper, wake_up_time_ns);
582
583 int64_t now_ns = os_monotonic_get_ns();
584
585 // Signal that we woke up.
586 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns);
587
588 *out_frame_id = frame_id;
589
590 return XRT_SUCCESS;
591}
592
593static xrt_result_t
594multi_compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id)
595{
596 COMP_TRACE_MARKER();
597
598 struct multi_compositor *mc = multi_compositor(xc);
599
600 os_mutex_lock(&mc->msc->list_and_timing_lock);
601 int64_t now_ns = os_monotonic_get_ns();
602 u_pa_mark_point(mc->upa, frame_id, U_TIMING_POINT_BEGIN, now_ns);
603 os_mutex_unlock(&mc->msc->list_and_timing_lock);
604
605 return XRT_SUCCESS;
606}
607
608static xrt_result_t
609multi_compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id)
610{
611 COMP_TRACE_MARKER();
612
613 struct multi_compositor *mc = multi_compositor(xc);
614 int64_t now_ns = os_monotonic_get_ns();
615
616 os_mutex_lock(&mc->msc->list_and_timing_lock);
617 u_pa_mark_discarded(mc->upa, frame_id, now_ns);
618 os_mutex_unlock(&mc->msc->list_and_timing_lock);
619
620 return XRT_SUCCESS;
621}
622
623static xrt_result_t
624multi_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_data *data)
625{
626 struct multi_compositor *mc = multi_compositor(xc);
627
628 // As early as possible.
629 int64_t now_ns = os_monotonic_get_ns();
630 os_mutex_lock(&mc->msc->list_and_timing_lock);
631 u_pa_mark_delivered(mc->upa, data->frame_id, now_ns, data->display_time_ns);
632 os_mutex_unlock(&mc->msc->list_and_timing_lock);
633
634 /*
635 * We have to block here for the waiting thread to push the last
636 * submitted frame from the progress slot to the scheduled slot,
637 * it only does after the sync object has signaled completion.
638 *
639 * If the previous frame's GPU work has not completed that means we
640 * will block here, but that is okay as the app has already submitted
641 * the GPU for this frame. This should have very little impact on GPU
642 * utilisation, if any.
643 */
644 wait_for_wait_thread(mc);
645
646 assert(mc->progress.layer_count == 0);
647 U_ZERO(&mc->progress);
648
649 mc->progress.active = true;
650 mc->progress.data = *data;
651
652 return XRT_SUCCESS;
653}
654
655static xrt_result_t
656multi_compositor_layer_projection(struct xrt_compositor *xc,
657 struct xrt_device *xdev,
658 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
659 const struct xrt_layer_data *data)
660{
661 struct multi_compositor *mc = multi_compositor(xc);
662 (void)mc;
663
664 size_t index = mc->progress.layer_count++;
665 mc->progress.layers[index].xdev = xdev;
666 for (uint32_t i = 0; i < data->view_count; ++i) {
667 xrt_swapchain_reference(&mc->progress.layers[index].xscs[i], xsc[i]);
668 }
669 mc->progress.layers[index].data = *data;
670
671 return XRT_SUCCESS;
672}
673
674static xrt_result_t
675multi_compositor_layer_projection_depth(struct xrt_compositor *xc,
676 struct xrt_device *xdev,
677 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
678 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS],
679 const struct xrt_layer_data *data)
680{
681 struct multi_compositor *mc = multi_compositor(xc);
682
683 size_t index = mc->progress.layer_count++;
684 mc->progress.layers[index].xdev = xdev;
685
686 for (uint32_t i = 0; i < data->view_count; ++i) {
687 xrt_swapchain_reference(&mc->progress.layers[index].xscs[i], xsc[i]);
688 xrt_swapchain_reference(&mc->progress.layers[index].xscs[i + data->view_count], d_xsc[i]);
689 }
690 mc->progress.layers[index].data = *data;
691
692 return XRT_SUCCESS;
693}
694
695static xrt_result_t
696multi_compositor_layer_quad(struct xrt_compositor *xc,
697 struct xrt_device *xdev,
698 struct xrt_swapchain *xsc,
699 const struct xrt_layer_data *data)
700{
701 struct multi_compositor *mc = multi_compositor(xc);
702
703 size_t index = mc->progress.layer_count++;
704 mc->progress.layers[index].xdev = xdev;
705 xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], xsc);
706 mc->progress.layers[index].data = *data;
707
708 return XRT_SUCCESS;
709}
710
711static xrt_result_t
712multi_compositor_layer_cube(struct xrt_compositor *xc,
713 struct xrt_device *xdev,
714 struct xrt_swapchain *xsc,
715 const struct xrt_layer_data *data)
716{
717 struct multi_compositor *mc = multi_compositor(xc);
718
719 size_t index = mc->progress.layer_count++;
720 mc->progress.layers[index].xdev = xdev;
721 xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], xsc);
722 mc->progress.layers[index].data = *data;
723
724 return XRT_SUCCESS;
725}
726
727static xrt_result_t
728multi_compositor_layer_cylinder(struct xrt_compositor *xc,
729 struct xrt_device *xdev,
730 struct xrt_swapchain *xsc,
731 const struct xrt_layer_data *data)
732{
733 struct multi_compositor *mc = multi_compositor(xc);
734
735 size_t index = mc->progress.layer_count++;
736 mc->progress.layers[index].xdev = xdev;
737 xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], xsc);
738 mc->progress.layers[index].data = *data;
739
740 return XRT_SUCCESS;
741}
742
743static xrt_result_t
744multi_compositor_layer_equirect1(struct xrt_compositor *xc,
745 struct xrt_device *xdev,
746 struct xrt_swapchain *xsc,
747 const struct xrt_layer_data *data)
748{
749 struct multi_compositor *mc = multi_compositor(xc);
750
751 size_t index = mc->progress.layer_count++;
752 mc->progress.layers[index].xdev = xdev;
753 xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], xsc);
754 mc->progress.layers[index].data = *data;
755
756 return XRT_SUCCESS;
757}
758
759static xrt_result_t
760multi_compositor_layer_equirect2(struct xrt_compositor *xc,
761 struct xrt_device *xdev,
762 struct xrt_swapchain *xsc,
763 const struct xrt_layer_data *data)
764{
765 struct multi_compositor *mc = multi_compositor(xc);
766
767 size_t index = mc->progress.layer_count++;
768 mc->progress.layers[index].xdev = xdev;
769 xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], xsc);
770 mc->progress.layers[index].data = *data;
771
772 return XRT_SUCCESS;
773}
774
775static xrt_result_t
776multi_compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle)
777{
778 COMP_TRACE_MARKER();
779
780 struct multi_compositor *mc = multi_compositor(xc);
781 struct xrt_compositor_fence *xcf = NULL;
782 int64_t frame_id = mc->progress.data.frame_id;
783
784 do {
785 if (!xrt_graphics_sync_handle_is_valid(sync_handle)) {
786 break;
787 }
788
789 xrt_result_t xret = xrt_comp_import_fence( //
790 &mc->msc->xcn->base, //
791 sync_handle, //
792 &xcf); //
793 /*!
794 * If import_fence succeeded, we have transferred ownership to
795 * the compositor; no need to do anything more. If the call
796 * failed we need to close the handle.
797 */
798 if (xret == XRT_SUCCESS) {
799 break;
800 }
801
802 u_graphics_sync_unref(&sync_handle);
803 } while (false); // Goto without the labels.
804
805 if (xcf != NULL) {
806 push_fence_to_wait_thread(mc, frame_id, xcf);
807 } else {
808 // Assume that the app side compositor waited.
809 int64_t now_ns = os_monotonic_get_ns();
810
811 os_mutex_lock(&mc->msc->list_and_timing_lock);
812 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns);
813 os_mutex_unlock(&mc->msc->list_and_timing_lock);
814
815 wait_for_scheduled_free(mc);
816 }
817
818 return XRT_SUCCESS;
819}
820
821static xrt_result_t
822multi_compositor_layer_commit_with_semaphore(struct xrt_compositor *xc,
823 struct xrt_compositor_semaphore *xcsem,
824 uint64_t value)
825{
826 COMP_TRACE_MARKER();
827
828 struct multi_compositor *mc = multi_compositor(xc);
829 int64_t frame_id = mc->progress.data.frame_id;
830
831 push_semaphore_to_wait_thread(mc, frame_id, xcsem, value);
832
833 return XRT_SUCCESS;
834}
835
836static xrt_result_t
837multi_compositor_set_thread_hint(struct xrt_compositor *xc, enum xrt_thread_hint hint, uint32_t thread_id)
838{
839 // No-op
840 return XRT_SUCCESS;
841}
842
843static xrt_result_t
844multi_compositor_get_display_refresh_rate(struct xrt_compositor *xc, float *out_display_refresh_rate_hz)
845{
846 COMP_TRACE_MARKER();
847
848 struct multi_compositor *mc = multi_compositor(xc);
849
850 return xrt_comp_get_display_refresh_rate(&mc->msc->xcn->base, out_display_refresh_rate_hz);
851}
852
853static xrt_result_t
854multi_compositor_request_display_refresh_rate(struct xrt_compositor *xc, float display_refresh_rate_hz)
855{
856 COMP_TRACE_MARKER();
857
858 struct multi_compositor *mc = multi_compositor(xc);
859
860 xrt_comp_request_display_refresh_rate(&mc->msc->xcn->base, display_refresh_rate_hz);
861
862#ifdef XRT_OS_ANDROID
863 // TODO: notify the display refresh changed event by android display callback function.
864 float current_refresh_rate_hz =
865 android_custom_surface_get_display_refresh_rate(android_globals_get_vm(), android_globals_get_context());
866
867 if (current_refresh_rate_hz != 0 && current_refresh_rate_hz != mc->current_refresh_rate_hz) {
868 xrt_syscomp_notify_display_refresh_changed(&mc->msc->base, xc, mc->current_refresh_rate_hz,
869 current_refresh_rate_hz);
870 mc->current_refresh_rate_hz = current_refresh_rate_hz;
871 }
872#endif
873
874 return XRT_SUCCESS;
875}
876
877static void
878multi_compositor_destroy(struct xrt_compositor *xc)
879{
880 COMP_TRACE_MARKER();
881
882 struct multi_compositor *mc = multi_compositor(xc);
883
884 if (mc->state.session_active) {
885 multi_system_compositor_update_session_status(mc->msc, false);
886 mc->state.session_active = false;
887 }
888
889 os_mutex_lock(&mc->msc->list_and_timing_lock);
890
891 // Remove it from the list of clients.
892 for (size_t i = 0; i < MULTI_MAX_CLIENTS; i++) {
893 if (mc->msc->clients[i] == mc) {
894 mc->msc->clients[i] = NULL;
895 }
896 }
897
898 os_mutex_unlock(&mc->msc->list_and_timing_lock);
899
900 // Destroy the wait thread, destroy also stops the thread.
901 os_thread_helper_destroy(&mc->wait_thread.oth);
902
903 // We are now off the rendering list, clear slots for any swapchains.
904 os_mutex_lock(&mc->msc->list_and_timing_lock);
905 slot_clear_locked(mc, &mc->progress);
906 slot_clear_locked(mc, &mc->scheduled);
907 slot_clear_locked(mc, &mc->delivered);
908 os_mutex_unlock(&mc->msc->list_and_timing_lock);
909
910 // Does null checking.
911 u_pa_destroy(&mc->upa);
912
913 os_precise_sleeper_deinit(&mc->frame_sleeper);
914 os_precise_sleeper_deinit(&mc->scheduled_sleeper);
915
916 os_mutex_destroy(&mc->slot_lock);
917
918 free(mc);
919}
920
921static void
922log_frame_time_diff(int64_t frame_time_ns, int64_t display_time_ns)
923{
924 int64_t diff_ns = (int64_t)frame_time_ns - (int64_t)display_time_ns;
925 bool late = false;
926 if (diff_ns < 0) {
927 diff_ns = -diff_ns;
928 late = true;
929 }
930
931 LOG_FRAME_LAG("Frame %s by %.2fms!", late ? "late" : "early", time_ns_to_ms_f(diff_ns));
932}
933
934void
935multi_compositor_deliver_any_frames(struct multi_compositor *mc, int64_t display_time_ns)
936{
937 os_mutex_lock(&mc->slot_lock);
938
939 if (!mc->scheduled.active) {
940 os_mutex_unlock(&mc->slot_lock);
941 return;
942 }
943
944 if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.data.display_time_ns)) {
945 slot_move_and_clear_locked(mc, &mc->delivered, &mc->scheduled);
946
947 int64_t frame_time_ns = mc->delivered.data.display_time_ns;
948 if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) {
949 log_frame_time_diff(frame_time_ns, display_time_ns);
950 }
951 }
952
953 os_mutex_unlock(&mc->slot_lock);
954}
955
956void
957multi_compositor_latch_frame_locked(struct multi_compositor *mc, int64_t when_ns, int64_t system_frame_id)
958{
959 u_pa_latched(mc->upa, mc->delivered.data.frame_id, when_ns, system_frame_id);
960}
961
962void
963multi_compositor_retire_delivered_locked(struct multi_compositor *mc, int64_t when_ns)
964{
965 slot_clear_locked(mc, &mc->delivered);
966}
967
968xrt_result_t
969multi_compositor_create(struct multi_system_compositor *msc,
970 const struct xrt_session_info *xsi,
971 struct xrt_session_event_sink *xses,
972 struct xrt_compositor_native **out_xcn)
973{
974 COMP_TRACE_MARKER();
975
976 struct multi_compositor *mc = U_TYPED_CALLOC(struct multi_compositor);
977
978 mc->base.base.get_swapchain_create_properties = multi_compositor_get_swapchain_create_properties;
979 mc->base.base.create_swapchain = multi_compositor_create_swapchain;
980 mc->base.base.import_swapchain = multi_compositor_import_swapchain;
981 mc->base.base.import_fence = multi_compositor_import_fence;
982 mc->base.base.create_semaphore = multi_compositor_create_semaphore;
983 mc->base.base.begin_session = multi_compositor_begin_session;
984 mc->base.base.end_session = multi_compositor_end_session;
985 mc->base.base.predict_frame = multi_compositor_predict_frame;
986 mc->base.base.mark_frame = multi_compositor_mark_frame;
987 mc->base.base.wait_frame = multi_compositor_wait_frame;
988 mc->base.base.begin_frame = multi_compositor_begin_frame;
989 mc->base.base.discard_frame = multi_compositor_discard_frame;
990 mc->base.base.layer_begin = multi_compositor_layer_begin;
991 mc->base.base.layer_projection = multi_compositor_layer_projection;
992 mc->base.base.layer_projection_depth = multi_compositor_layer_projection_depth;
993 mc->base.base.layer_quad = multi_compositor_layer_quad;
994 mc->base.base.layer_cube = multi_compositor_layer_cube;
995 mc->base.base.layer_cylinder = multi_compositor_layer_cylinder;
996 mc->base.base.layer_equirect1 = multi_compositor_layer_equirect1;
997 mc->base.base.layer_equirect2 = multi_compositor_layer_equirect2;
998 mc->base.base.layer_commit = multi_compositor_layer_commit;
999 mc->base.base.layer_commit_with_semaphore = multi_compositor_layer_commit_with_semaphore;
1000 mc->base.base.destroy = multi_compositor_destroy;
1001 mc->base.base.set_thread_hint = multi_compositor_set_thread_hint;
1002 mc->base.base.get_display_refresh_rate = multi_compositor_get_display_refresh_rate;
1003 mc->base.base.request_display_refresh_rate = multi_compositor_request_display_refresh_rate;
1004 mc->msc = msc;
1005 mc->xses = xses;
1006 mc->xsi = *xsi;
1007
1008 os_mutex_init(&mc->slot_lock);
1009 os_thread_helper_init(&mc->wait_thread.oth);
1010
1011 // Passthrough our formats from the native compositor to the client.
1012 mc->base.base.info = msc->xcn->base.info;
1013
1014 // Used in wait frame.
1015 os_precise_sleeper_init(&mc->frame_sleeper);
1016
1017 // Used in scheduled waiting function.
1018 os_precise_sleeper_init(&mc->scheduled_sleeper);
1019
1020 // This is safe to do without a lock since we are not on the list yet.
1021 u_paf_create(msc->upaf, &mc->upa);
1022
1023 os_mutex_lock(&msc->list_and_timing_lock);
1024
1025 // If we have too many clients, just ignore it.
1026 for (size_t i = 0; i < MULTI_MAX_CLIENTS; i++) {
1027 if (mc->msc->clients[i] != NULL) {
1028 continue;
1029 }
1030 mc->msc->clients[i] = mc;
1031 break;
1032 }
1033
1034 u_pa_info( //
1035 mc->upa, //
1036 msc->last_timings.predicted_display_time_ns, //
1037 msc->last_timings.predicted_display_period_ns, //
1038 msc->last_timings.diff_ns); //
1039
1040 os_mutex_unlock(&msc->list_and_timing_lock);
1041
1042 // Last start the wait thread.
1043 os_thread_helper_start(&mc->wait_thread.oth, run_func, mc);
1044
1045 os_thread_helper_lock(&mc->wait_thread.oth);
1046
1047 // Wait for the wait thread to fully start.
1048 while (!mc->wait_thread.alive) {
1049 os_thread_helper_wait_locked(&mc->wait_thread.oth);
1050 }
1051
1052 os_thread_helper_unlock(&mc->wait_thread.oth);
1053
1054#ifdef XRT_OS_ANDROID
1055 mc->current_refresh_rate_hz =
1056 android_custom_surface_get_display_refresh_rate(android_globals_get_vm(), android_globals_get_context());
1057#endif
1058
1059 *out_xcn = &mc->base;
1060
1061 return XRT_SUCCESS;
1062}