The open source OpenXR runtime
1// Copyright 2019-2024, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Multi client wrapper compositor.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @author Korcan Hussein <korcan.hussein@collabora.com>
10 * @ingroup comp_multi
11 */
12
13#include "xrt/xrt_config_os.h"
14#include "xrt/xrt_session.h"
15
16#include "os/os_time.h"
17#include "os/os_threading.h"
18
19#include "util/u_var.h"
20#include "util/u_misc.h"
21#include "util/u_time.h"
22#include "util/u_wait.h"
23#include "util/u_debug.h"
24#include "util/u_trace_marker.h"
25#include "util/u_distortion_mesh.h"
26
27#ifdef XRT_OS_LINUX
28#include "util/u_linux.h"
29#endif
30
31#include "multi/comp_multi_private.h"
32#include "multi/comp_multi_interface.h"
33
34#include <math.h>
35#include <stdio.h>
36#include <assert.h>
37#include <stdarg.h>
38#include <stdlib.h>
39#include <string.h>
40
41#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
42#include <unistd.h>
43#endif
44
45
46/*
47 *
48 * Render thread.
49 *
50 */
51
52static void
53do_projection_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
54{
55 struct xrt_device *xdev = layer->xdev;
56
57 // Cast away
58 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
59
60 // Do not need to copy the reference, but should verify the pointers for consistency
61 for (uint32_t j = 0; j < data->view_count; j++) {
62 if (layer->xscs[j] == NULL) {
63 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
64 return;
65 }
66 }
67
68 if (xdev == NULL) {
69 U_LOG_E("Invalid xdev for projection layer #%u!", i);
70 return;
71 }
72
73 xrt_comp_layer_projection(xc, xdev, layer->xscs, data);
74}
75
76static void
77do_projection_layer_depth(struct xrt_compositor *xc,
78 struct multi_compositor *mc,
79 struct multi_layer_entry *layer,
80 uint32_t i)
81{
82 struct xrt_device *xdev = layer->xdev;
83
84 struct xrt_swapchain *xsc[XRT_MAX_VIEWS];
85 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS];
86 // Cast away
87 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
88
89 for (uint32_t j = 0; j < data->view_count; j++) {
90 xsc[j] = layer->xscs[j];
91 d_xsc[j] = layer->xscs[j + data->view_count];
92
93 if (xsc[j] == NULL || d_xsc[j] == NULL) {
94 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
95 return;
96 }
97 }
98
99 if (xdev == NULL) {
100 U_LOG_E("Invalid xdev for projection layer #%u!", i);
101 return;
102 }
103
104
105 xrt_comp_layer_projection_depth(xc, xdev, xsc, d_xsc, data);
106}
107
108static bool
109do_single(struct xrt_compositor *xc,
110 struct multi_compositor *mc,
111 struct multi_layer_entry *layer,
112 uint32_t i,
113 const char *name,
114 struct xrt_device **out_xdev,
115 struct xrt_swapchain **out_xcs,
116 struct xrt_layer_data **out_data)
117{
118 struct xrt_device *xdev = layer->xdev;
119 struct xrt_swapchain *xcs = layer->xscs[0];
120
121 if (xcs == NULL) {
122 U_LOG_E("Invalid swapchain for layer #%u '%s'!", i, name);
123 return false;
124 }
125
126 if (xdev == NULL) {
127 U_LOG_E("Invalid xdev for layer #%u '%s'!", i, name);
128 return false;
129 }
130
131 // Cast away
132 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
133
134 *out_xdev = xdev;
135 *out_xcs = xcs;
136 *out_data = data;
137
138 return true;
139}
140
141static void
142do_quad_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
143{
144 struct xrt_device *xdev = NULL;
145 struct xrt_swapchain *xcs = NULL;
146 struct xrt_layer_data *data = NULL;
147
148 if (!do_single(xc, mc, layer, i, "quad", &xdev, &xcs, &data)) {
149 return;
150 }
151
152 xrt_comp_layer_quad(xc, xdev, xcs, data);
153}
154
155static void
156do_cube_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
157{
158 struct xrt_device *xdev = NULL;
159 struct xrt_swapchain *xcs = NULL;
160 struct xrt_layer_data *data = NULL;
161
162 if (!do_single(xc, mc, layer, i, "cube", &xdev, &xcs, &data)) {
163 return;
164 }
165
166 xrt_comp_layer_cube(xc, xdev, xcs, data);
167}
168
169static void
170do_cylinder_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
171{
172 struct xrt_device *xdev = NULL;
173 struct xrt_swapchain *xcs = NULL;
174 struct xrt_layer_data *data = NULL;
175
176 if (!do_single(xc, mc, layer, i, "cylinder", &xdev, &xcs, &data)) {
177 return;
178 }
179
180 xrt_comp_layer_cylinder(xc, xdev, xcs, data);
181}
182
183static void
184do_equirect1_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
185{
186 struct xrt_device *xdev = NULL;
187 struct xrt_swapchain *xcs = NULL;
188 struct xrt_layer_data *data = NULL;
189
190 if (!do_single(xc, mc, layer, i, "equirect1", &xdev, &xcs, &data)) {
191 return;
192 }
193
194 xrt_comp_layer_equirect1(xc, xdev, xcs, data);
195}
196
197static void
198do_equirect2_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
199{
200 struct xrt_device *xdev = NULL;
201 struct xrt_swapchain *xcs = NULL;
202 struct xrt_layer_data *data = NULL;
203
204 if (!do_single(xc, mc, layer, i, "equirect2", &xdev, &xcs, &data)) {
205 return;
206 }
207
208 xrt_comp_layer_equirect2(xc, xdev, xcs, data);
209}
210
211static int
212overlay_sort_func(const void *a, const void *b)
213{
214 struct multi_compositor *mc_a = *(struct multi_compositor **)a;
215 struct multi_compositor *mc_b = *(struct multi_compositor **)b;
216
217 if (mc_a->state.z_order < mc_b->state.z_order) {
218 return -1;
219 }
220
221 if (mc_a->state.z_order > mc_b->state.z_order) {
222 return 1;
223 }
224
225 return 0;
226}
227
228static enum xrt_blend_mode
229find_active_blend_mode(struct multi_compositor **overlay_sorted_clients, size_t size)
230{
231 if (overlay_sorted_clients == NULL)
232 return XRT_BLEND_MODE_OPAQUE;
233
234 const struct multi_compositor *first_visible = NULL;
235 for (size_t k = 0; k < size; ++k) {
236 const struct multi_compositor *mc = overlay_sorted_clients[k];
237 assert(mc != NULL);
238
239 // if a focused client is found just return, "first_visible" has lower priority and can be ignored.
240 if (mc->state.focused) {
241 assert(mc->state.visible);
242 return mc->delivered.data.env_blend_mode;
243 }
244
245 if (first_visible == NULL && mc->state.visible) {
246 first_visible = mc;
247 }
248 }
249 if (first_visible != NULL)
250 return first_visible->delivered.data.env_blend_mode;
251 return XRT_BLEND_MODE_OPAQUE;
252}
253
254static void
255transfer_layers_locked(struct multi_system_compositor *msc, int64_t display_time_ns, int64_t system_frame_id)
256{
257 COMP_TRACE_MARKER();
258
259 struct xrt_compositor *xc = &msc->xcn->base;
260
261 struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0};
262
263 // To mark latching.
264 int64_t now_ns = os_monotonic_get_ns();
265
266 size_t count = 0;
267 for (size_t k = 0; k < ARRAY_SIZE(array); k++) {
268 struct multi_compositor *mc = msc->clients[k];
269
270 // Array can be empty
271 if (mc == NULL) {
272 continue;
273 }
274
275 // Even if it's not shown, make sure that frames are delivered.
276 multi_compositor_deliver_any_frames(mc, display_time_ns);
277
278 // None of the data in this slot is valid, don't check access it.
279 if (!mc->delivered.active) {
280 continue;
281 }
282
283 // The client isn't visible, do not submit it's layers.
284 if (!mc->state.visible) {
285 // Need to drop delivered frame as it shouldn't be reused.
286 multi_compositor_retire_delivered_locked(mc, now_ns);
287 continue;
288 }
289
290 // Just in case.
291 if (!mc->state.session_active) {
292 U_LOG_W("Session is visible but not active.");
293
294 // Need to drop delivered frame as it shouldn't be reused.
295 multi_compositor_retire_delivered_locked(mc, now_ns);
296 continue;
297 }
298
299 // The list_and_timing_lock is held when callign this function.
300 multi_compositor_latch_frame_locked(mc, now_ns, system_frame_id);
301
302 array[count++] = msc->clients[k];
303 }
304
305 // Sort the stack array
306 qsort(array, count, sizeof(struct multi_compositor *), overlay_sort_func);
307
308 // find first (ordered by bottom to top) active client to retrieve xrt_layer_frame_data
309 const enum xrt_blend_mode blend_mode = find_active_blend_mode(array, count);
310
311 const struct xrt_layer_frame_data data = {
312 .frame_id = system_frame_id,
313 .display_time_ns = display_time_ns,
314 .env_blend_mode = blend_mode,
315 };
316 xrt_comp_layer_begin(xc, &data);
317
318 // Copy all active layers.
319 for (size_t k = 0; k < count; k++) {
320 struct multi_compositor *mc = array[k];
321 assert(mc != NULL);
322
323 for (uint32_t i = 0; i < mc->delivered.layer_count; i++) {
324 struct multi_layer_entry *layer = &mc->delivered.layers[i];
325
326 switch (layer->data.type) {
327 case XRT_LAYER_PROJECTION: do_projection_layer(xc, mc, layer, i); break;
328 case XRT_LAYER_PROJECTION_DEPTH: do_projection_layer_depth(xc, mc, layer, i); break;
329 case XRT_LAYER_QUAD: do_quad_layer(xc, mc, layer, i); break;
330 case XRT_LAYER_CUBE: do_cube_layer(xc, mc, layer, i); break;
331 case XRT_LAYER_CYLINDER: do_cylinder_layer(xc, mc, layer, i); break;
332 case XRT_LAYER_EQUIRECT1: do_equirect1_layer(xc, mc, layer, i); break;
333 case XRT_LAYER_EQUIRECT2: do_equirect2_layer(xc, mc, layer, i); break;
334 default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
335 }
336 }
337 }
338}
339
340static void
341broadcast_timings_to_clients(struct multi_system_compositor *msc, int64_t predicted_display_time_ns)
342{
343 COMP_TRACE_MARKER();
344
345 os_mutex_lock(&msc->list_and_timing_lock);
346
347 for (size_t i = 0; i < ARRAY_SIZE(msc->clients); i++) {
348 struct multi_compositor *mc = msc->clients[i];
349 if (mc == NULL) {
350 continue;
351 }
352
353 os_mutex_lock(&mc->slot_lock);
354 mc->slot_next_frame_display = predicted_display_time_ns;
355 os_mutex_unlock(&mc->slot_lock);
356 }
357
358 os_mutex_unlock(&msc->list_and_timing_lock);
359}
360
361static void
362broadcast_timings_to_pacers(struct multi_system_compositor *msc,
363 int64_t predicted_display_time_ns,
364 int64_t predicted_display_period_ns,
365 int64_t diff_ns)
366{
367 COMP_TRACE_MARKER();
368
369 os_mutex_lock(&msc->list_and_timing_lock);
370
371 for (size_t i = 0; i < ARRAY_SIZE(msc->clients); i++) {
372 struct multi_compositor *mc = msc->clients[i];
373 if (mc == NULL) {
374 continue;
375 }
376
377 u_pa_info( //
378 mc->upa, //
379 predicted_display_time_ns, //
380 predicted_display_period_ns, //
381 diff_ns); //
382
383 os_mutex_lock(&mc->slot_lock);
384 mc->slot_next_frame_display = predicted_display_time_ns;
385 os_mutex_unlock(&mc->slot_lock);
386 }
387
388 msc->last_timings.predicted_display_time_ns = predicted_display_time_ns;
389 msc->last_timings.predicted_display_period_ns = predicted_display_period_ns;
390 msc->last_timings.diff_ns = diff_ns;
391
392 os_mutex_unlock(&msc->list_and_timing_lock);
393}
394
395static void
396wait_frame(struct os_precise_sleeper *sleeper, struct xrt_compositor *xc, int64_t frame_id, int64_t wake_up_time_ns)
397{
398 COMP_TRACE_MARKER();
399
400 // Wait until the given wake up time.
401 u_wait_until(sleeper, wake_up_time_ns);
402
403 int64_t now_ns = os_monotonic_get_ns();
404
405 // Signal that we woke up.
406 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns);
407}
408
409static void
410update_session_state_locked(struct multi_system_compositor *msc)
411{
412 struct xrt_compositor *xc = &msc->xcn->base;
413
414 //! @todo Make this not be hardcoded.
415 const struct xrt_begin_session_info begin_session_info = {
416 .view_type = XRT_VIEW_TYPE_STEREO,
417 .ext_hand_tracking_enabled = false,
418 .ext_hand_tracking_data_source_enabled = false,
419 .ext_eye_gaze_interaction_enabled = false,
420 .ext_hand_interaction_enabled = false,
421 .htc_facial_tracking_enabled = false,
422 .fb_body_tracking_enabled = false,
423 .fb_face_tracking2_enabled = false,
424 .meta_body_tracking_full_body_enabled = false,
425 .meta_body_tracking_calibration_enabled = false,
426 };
427
428 switch (msc->sessions.state) {
429 case MULTI_SYSTEM_STATE_INIT_WARM_START:
430 // Produce at least one frame on init.
431 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
432 xrt_comp_begin_session(xc, &begin_session_info);
433 U_LOG_I("Doing warm start, %u active app session(s).", (uint32_t)msc->sessions.active_count);
434 break;
435
436 case MULTI_SYSTEM_STATE_STOPPED:
437 if (msc->sessions.active_count == 0) {
438 break;
439 }
440
441 msc->sessions.state = MULTI_SYSTEM_STATE_RUNNING;
442 xrt_comp_begin_session(xc, &begin_session_info);
443 U_LOG_I("Started native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
444 break;
445
446 case MULTI_SYSTEM_STATE_RUNNING:
447 if (msc->sessions.active_count > 0) {
448 break;
449 }
450
451 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
452 U_LOG_D("Stopping native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
453 break;
454
455 case MULTI_SYSTEM_STATE_STOPPING:
456 // Just in case
457 if (msc->sessions.active_count > 0) {
458 msc->sessions.state = MULTI_SYSTEM_STATE_RUNNING;
459 U_LOG_D("Restarting native session, %u active app session(s).",
460 (uint32_t)msc->sessions.active_count);
461 break;
462 }
463
464 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPED;
465 xrt_comp_end_session(xc);
466 U_LOG_I("Stopped native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
467 break;
468
469 case MULTI_SYSTEM_STATE_INVALID:
470 default:
471 U_LOG_E("Got invalid state %u", msc->sessions.state);
472 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
473 assert(false);
474 }
475}
476
477static int
478multi_main_loop(struct multi_system_compositor *msc)
479{
480 U_TRACE_SET_THREAD_NAME("Multi Client Module");
481 os_thread_helper_name(&msc->oth, "Multi Client Module");
482
483#ifdef XRT_OS_LINUX
484 // Try to raise priority of this thread.
485 u_linux_try_to_set_realtime_priority_on_thread(U_LOGGING_INFO, "Multi Client Module");
486#endif
487
488 struct xrt_compositor *xc = &msc->xcn->base;
489
490 // For wait frame.
491 struct os_precise_sleeper sleeper = {0};
492 os_precise_sleeper_init(&sleeper);
493
494 // Protect the thread state and the sessions state.
495 os_thread_helper_lock(&msc->oth);
496
497 while (os_thread_helper_is_running_locked(&msc->oth)) {
498
499 // Updates msc->sessions.active depending on active client sessions.
500 update_session_state_locked(msc);
501
502 if (msc->sessions.state == MULTI_SYSTEM_STATE_STOPPED) {
503 // Sleep and wait to be signaled.
504 os_thread_helper_wait_locked(&msc->oth);
505
506 // Loop back to running and session check.
507 continue;
508 }
509
510 // Unlock the thread after the checks has been done.
511 os_thread_helper_unlock(&msc->oth);
512
513 int64_t frame_id = -1;
514 int64_t wake_up_time_ns = 0;
515 int64_t predicted_gpu_time_ns = 0;
516 int64_t predicted_display_time_ns = 0;
517 int64_t predicted_display_period_ns = 0;
518
519 // Get the information for the next frame.
520 xrt_comp_predict_frame( //
521 xc, //
522 &frame_id, //
523 &wake_up_time_ns, //
524 &predicted_gpu_time_ns, //
525 &predicted_display_time_ns, //
526 &predicted_display_period_ns); //
527
528 // Do this as soon as we have the new display time.
529 broadcast_timings_to_clients(msc, predicted_display_time_ns);
530
531 // Now we can wait.
532 wait_frame(&sleeper, xc, frame_id, wake_up_time_ns);
533
534 int64_t now_ns = os_monotonic_get_ns();
535 int64_t diff_ns = predicted_display_time_ns - now_ns;
536
537 // Now we know the diff, broadcast to pacers.
538 broadcast_timings_to_pacers(msc, predicted_display_time_ns, predicted_display_period_ns, diff_ns);
539
540 xrt_comp_begin_frame(xc, frame_id);
541
542 // Make sure that the clients doesn't go away while we transfer layers.
543 os_mutex_lock(&msc->list_and_timing_lock);
544 transfer_layers_locked(msc, predicted_display_time_ns, frame_id);
545 os_mutex_unlock(&msc->list_and_timing_lock);
546
547 xrt_comp_layer_commit(xc, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
548
549 // Re-lock the thread for check in while statement.
550 os_thread_helper_lock(&msc->oth);
551 }
552
553 // Clean up the sessions state.
554 switch (msc->sessions.state) {
555 case MULTI_SYSTEM_STATE_RUNNING:
556 case MULTI_SYSTEM_STATE_STOPPING:
557 U_LOG_I("Stopped native session, shutting down.");
558 xrt_comp_end_session(xc);
559 break;
560 case MULTI_SYSTEM_STATE_STOPPED: U_LOG_I("Already stopped, nothing to clean up."); break;
561 case MULTI_SYSTEM_STATE_INIT_WARM_START:
562 U_LOG_I("Cleaning up from warm start state.");
563 xrt_comp_end_session(xc);
564 break;
565 case MULTI_SYSTEM_STATE_INVALID:
566 U_LOG_W("Cleaning up from invalid state.");
567 // Best effort cleanup
568 xrt_comp_end_session(xc);
569 break;
570 default: U_LOG_E("Unknown session state during cleanup: %d", msc->sessions.state); assert(false);
571 }
572
573 os_thread_helper_unlock(&msc->oth);
574
575 os_precise_sleeper_deinit(&sleeper);
576
577 return 0;
578}
579
580static void *
581thread_func(void *ptr)
582{
583 return (void *)(intptr_t)multi_main_loop((struct multi_system_compositor *)ptr);
584}
585
586
587/*
588 *
589 * System multi compositor functions.
590 *
591 */
592
593static xrt_result_t
594system_compositor_set_state(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, bool visible, bool focused)
595{
596 struct multi_system_compositor *msc = multi_system_compositor(xsc);
597 struct multi_compositor *mc = multi_compositor(xc);
598 (void)msc;
599
600 //! @todo Locking?
601 if (mc->state.visible != visible || mc->state.focused != focused) {
602 mc->state.visible = visible;
603 mc->state.focused = focused;
604
605 union xrt_session_event xse = XRT_STRUCT_INIT;
606 xse.type = XRT_SESSION_EVENT_STATE_CHANGE;
607 xse.state.visible = visible;
608 xse.state.focused = focused;
609
610 return multi_compositor_push_event(mc, &xse);
611 }
612
613 return XRT_SUCCESS;
614}
615
616static xrt_result_t
617system_compositor_set_z_order(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, int64_t z_order)
618{
619 struct multi_system_compositor *msc = multi_system_compositor(xsc);
620 struct multi_compositor *mc = multi_compositor(xc);
621 (void)msc;
622
623 //! @todo Locking?
624 mc->state.z_order = z_order;
625
626 return XRT_SUCCESS;
627}
628
629static xrt_result_t
630system_compositor_set_main_app_visibility(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, bool visible)
631{
632 struct multi_system_compositor *msc = multi_system_compositor(xsc);
633 struct multi_compositor *mc = multi_compositor(xc);
634 (void)msc;
635
636 union xrt_session_event xse = XRT_STRUCT_INIT;
637 xse.type = XRT_SESSION_EVENT_OVERLAY_CHANGE;
638 xse.overlay.visible = visible;
639
640 return multi_compositor_push_event(mc, &xse);
641}
642
643static xrt_result_t
644system_compositor_notify_loss_pending(struct xrt_system_compositor *xsc,
645 struct xrt_compositor *xc,
646 int64_t loss_time_ns)
647{
648 struct multi_system_compositor *msc = multi_system_compositor(xsc);
649 struct multi_compositor *mc = multi_compositor(xc);
650 (void)msc;
651
652 union xrt_session_event xse = XRT_STRUCT_INIT;
653 xse.type = XRT_SESSION_EVENT_LOSS_PENDING;
654 xse.loss_pending.loss_time_ns = loss_time_ns;
655
656 return multi_compositor_push_event(mc, &xse);
657}
658
659static xrt_result_t
660system_compositor_notify_lost(struct xrt_system_compositor *xsc, struct xrt_compositor *xc)
661{
662 struct multi_system_compositor *msc = multi_system_compositor(xsc);
663 struct multi_compositor *mc = multi_compositor(xc);
664 (void)msc;
665
666 union xrt_session_event xse = XRT_STRUCT_INIT;
667 xse.type = XRT_SESSION_EVENT_LOST;
668
669 return multi_compositor_push_event(mc, &xse);
670}
671
672static xrt_result_t
673system_compositor_notify_display_refresh_changed(struct xrt_system_compositor *xsc,
674 struct xrt_compositor *xc,
675 float from_display_refresh_rate_hz,
676 float to_display_refresh_rate_hz)
677{
678 struct multi_system_compositor *msc = multi_system_compositor(xsc);
679 struct multi_compositor *mc = multi_compositor(xc);
680 (void)msc;
681
682 union xrt_session_event xse = XRT_STRUCT_INIT;
683 xse.type = XRT_SESSION_EVENT_DISPLAY_REFRESH_RATE_CHANGE;
684 xse.display.from_display_refresh_rate_hz = from_display_refresh_rate_hz;
685 xse.display.to_display_refresh_rate_hz = to_display_refresh_rate_hz;
686
687 return multi_compositor_push_event(mc, &xse);
688}
689
690
691/*
692 *
693 * System compositor functions.
694 *
695 */
696
697static xrt_result_t
698system_compositor_create_native_compositor(struct xrt_system_compositor *xsc,
699 const struct xrt_session_info *xsi,
700 struct xrt_session_event_sink *xses,
701 struct xrt_compositor_native **out_xcn)
702{
703 struct multi_system_compositor *msc = multi_system_compositor(xsc);
704
705 return multi_compositor_create(msc, xsi, xses, out_xcn);
706}
707
708static void
709system_compositor_destroy(struct xrt_system_compositor *xsc)
710{
711 struct multi_system_compositor *msc = multi_system_compositor(xsc);
712
713 // Destroy the render thread first, destroy also stops the thread.
714 os_thread_helper_destroy(&msc->oth);
715
716 u_paf_destroy(&msc->upaf);
717
718 xrt_comp_native_destroy(&msc->xcn);
719
720 os_mutex_destroy(&msc->list_and_timing_lock);
721
722 free(msc);
723}
724
725
726/*
727 *
728 * 'Exported' functions.
729 *
730 */
731
732void
733multi_system_compositor_update_session_status(struct multi_system_compositor *msc, bool active)
734{
735 os_thread_helper_lock(&msc->oth);
736
737 if (active) {
738 assert(msc->sessions.active_count < UINT32_MAX);
739 msc->sessions.active_count++;
740
741 // If the thread is sleeping wake it up.
742 os_thread_helper_signal_locked(&msc->oth);
743 } else {
744 assert(msc->sessions.active_count > 0);
745 msc->sessions.active_count--;
746 }
747
748 os_thread_helper_unlock(&msc->oth);
749}
750
751xrt_result_t
752comp_multi_create_system_compositor(struct xrt_compositor_native *xcn,
753 struct u_pacing_app_factory *upaf,
754 const struct xrt_system_compositor_info *xsci,
755 bool do_warm_start,
756 struct xrt_system_compositor **out_xsysc)
757{
758 struct multi_system_compositor *msc = U_TYPED_CALLOC(struct multi_system_compositor);
759 msc->base.create_native_compositor = system_compositor_create_native_compositor;
760 msc->base.destroy = system_compositor_destroy;
761 msc->xmcc.set_state = system_compositor_set_state;
762 msc->xmcc.set_z_order = system_compositor_set_z_order;
763 msc->xmcc.set_main_app_visibility = system_compositor_set_main_app_visibility;
764 msc->xmcc.notify_loss_pending = system_compositor_notify_loss_pending;
765 msc->xmcc.notify_lost = system_compositor_notify_lost;
766 msc->xmcc.notify_display_refresh_changed = system_compositor_notify_display_refresh_changed;
767 msc->base.xmcc = &msc->xmcc;
768 msc->base.info = *xsci;
769 msc->upaf = upaf;
770 msc->xcn = xcn;
771 msc->sessions.active_count = 0;
772 msc->sessions.state = do_warm_start ? MULTI_SYSTEM_STATE_INIT_WARM_START : MULTI_SYSTEM_STATE_STOPPED;
773
774 os_mutex_init(&msc->list_and_timing_lock);
775
776 //! @todo Make the clients not go from IDLE to READY before we have completed a first frame.
777 // Make sure there is at least some sort of valid frame data here.
778 msc->last_timings.predicted_display_time_ns = os_monotonic_get_ns(); // As good as any time.
779 msc->last_timings.predicted_display_period_ns = U_TIME_1MS_IN_NS * 16; // Just a wild guess.
780 msc->last_timings.diff_ns = U_TIME_1MS_IN_NS * 5; // Make sure it's not zero at least.
781
782 int ret = os_thread_helper_init(&msc->oth);
783 if (ret < 0) {
784 return XRT_ERROR_THREADING_INIT_FAILURE;
785 }
786
787 os_thread_helper_start(&msc->oth, thread_func, msc);
788
789 *out_xsysc = &msc->base;
790
791 return XRT_SUCCESS;
792}