The open source OpenXR runtime
1// Copyright 2019-2024, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Main compositor written using Vulkan implementation.
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
9 * @author Rylie Pavlik <rylie.pavlik@collabora.com>
10 * @author Moshi Turner <moshiturner@protonmail.com>
11 * @ingroup comp_main
12 *
13 *
14 * begin_frame and end_frame delimit the application's work on graphics for a
15 * single frame. end_frame updates our estimate of the current estimated app
16 * graphics duration, as well as the "swap interval" for scheduling the
17 * application.
18 *
19 * We have some known overhead work required to composite a frame: eventually
20 * this may be measured as well. Overhead plus the estimated app render duration
21 * is compared to the frame duration: if it's longer, then we go to a "swap
22 * interval" of 2.
23 *
24 * wait_frame must be the one to produce the next predicted display time,
25 * because we cannot distinguish two sequential wait_frame calls (an app
26 * skipping a frame) from an OS scheduling blip causing the second wait_frame to
27 * happen before the first begin_frame actually gets executed. It cannot use the
28 * last display time in this computation for this reason. (Except perhaps to
29 * align the period at a sub-frame level? e.g. should be a multiple of the frame
30 * duration after the last displayed time).
31 *
32 * wait_frame should not actually produce the predicted display time until it's
33 * done waiting: it should wake up once a frame and see what the current swap
34 * interval suggests: this handles the case where end_frame changes the swap
35 * interval from 2 to 1 during a wait_frame call. (That is, we should wait until
36 * whichever is closer of the next vsync or the time we currently predict we
37 * should release the app.)
38 *
39 * Sleeping can be a bit hairy: in general right now we'll use a combination of
40 * operating system sleeps and busy-waits (for fine-grained waiting). Some
41 * platforms provide vsync-related sync primitives that may get us closer to our
42 * desired time. This is also convenient for the "wait until next frame"
43 * behavior.
44 */
45
46#include "xrt/xrt_compiler.h"
47#include "xrt/xrt_compositor.h"
48#include "xrt/xrt_config_have.h"
49#include "xrt/xrt_results.h"
50
51#include "math/m_api.h"
52
53#include "os/os_time.h"
54
55#include "util/u_var.h"
56#include "util/u_misc.h"
57#include "util/u_time.h"
58#include "util/u_debug.h"
59#include "util/u_pacing.h"
60#include "util/u_handles.h"
61#include "util/u_trace_marker.h"
62#include "util/u_pretty_print.h"
63#include "util/u_distortion_mesh.h"
64#include "util/u_verify.h"
65
66#include "util/comp_vulkan.h"
67#include "main/comp_compositor.h"
68#include "main/comp_frame.h"
69
70#ifdef XRT_FEATURE_WINDOW_PEEK
71#include "main/comp_window_peek.h"
72#endif
73
74#include "multi/comp_multi_interface.h"
75
76#include <math.h>
77#include <stdio.h>
78#include <assert.h>
79#include <stdarg.h>
80#include <stdlib.h>
81#include <string.h>
82
83#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
84#include <unistd.h>
85#endif
86
87#ifdef XRT_OS_ANDROID
88#include "android/android_custom_surface.h"
89#include "android/android_globals.h"
90#include <dlfcn.h>
91#endif
92
93#define WINDOW_TITLE "Monado"
94
95DEBUG_GET_ONCE_BOOL_OPTION(disable_deferred, "XRT_COMPOSITOR_DISABLE_DEFERRED", false)
96
97
98/*
99 *
100 * Helper functions.
101 *
102 */
103
104static double
105ns_to_ms(int64_t ns)
106{
107 double ms = ((double)ns) * 1. / 1000. * 1. / 1000.;
108 return ms;
109}
110
111static double
112ts_ms(void)
113{
114 int64_t monotonic = os_monotonic_get_ns();
115 return ns_to_ms(monotonic);
116}
117
118static struct vk_bundle *
119get_vk(struct comp_compositor *c)
120{
121 return &c->base.vk;
122}
123
124
125/*
126 *
127 * Compositor functions.
128 *
129 */
130
131static bool
132compositor_init_window_post_vulkan(struct comp_compositor *c);
133static bool
134compositor_init_swapchain(struct comp_compositor *c);
135static bool
136compositor_init_renderer(struct comp_compositor *c);
137
138static xrt_result_t
139compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info)
140{
141 struct comp_compositor *c = comp_compositor(xc);
142 COMP_DEBUG(c, "BEGIN_SESSION");
143
144 // clang-format off
145 if (c->deferred_surface) {
146 if (!compositor_init_window_post_vulkan(c) ||
147 !compositor_init_swapchain(c) ||
148 !compositor_init_renderer(c)) {
149 COMP_ERROR(c, "Failed to init compositor %p", (void *)c);
150 c->base.base.base.destroy(&c->base.base.base);
151
152 return XRT_ERROR_VULKAN;
153 }
154 comp_target_set_title(c->target, WINDOW_TITLE);
155 comp_renderer_add_debug_vars(c->r);
156 }
157 // clang-format on
158
159 return XRT_SUCCESS;
160}
161
162static xrt_result_t
163compositor_end_session(struct xrt_compositor *xc)
164{
165 struct comp_compositor *c = comp_compositor(xc);
166 COMP_DEBUG(c, "END_SESSION");
167
168 if (c->deferred_surface) {
169 // Make sure we don't have anything to destroy.
170 comp_swapchain_shared_garbage_collect(&c->base.cscs);
171 comp_renderer_destroy(&c->r);
172#ifdef XRT_FEATURE_WINDOW_PEEK
173 comp_window_peek_destroy(&c->peek);
174#endif
175 comp_target_destroy(&c->target);
176 }
177
178 return XRT_SUCCESS;
179}
180
181static xrt_result_t
182compositor_predict_frame(struct xrt_compositor *xc,
183 int64_t *out_frame_id,
184 int64_t *out_wake_time_ns,
185 int64_t *out_predicted_gpu_time_ns,
186 int64_t *out_predicted_display_time_ns,
187 int64_t *out_predicted_display_period_ns)
188{
189 COMP_TRACE_MARKER();
190
191 struct comp_compositor *c = comp_compositor(xc);
192
193 COMP_SPEW(c, "PREDICT_FRAME");
194
195 comp_target_update_timings(c->target);
196
197 assert(comp_frame_is_invalid_locked(&c->frame.waited));
198
199 int64_t frame_id = -1;
200 int64_t wake_up_time_ns = 0;
201 int64_t present_slop_ns = 0;
202 int64_t desired_present_time_ns = 0;
203 int64_t predicted_display_time_ns = 0;
204 comp_target_calc_frame_pacing( //
205 c->target, //
206 &frame_id, //
207 &wake_up_time_ns, //
208 &desired_present_time_ns, //
209 &present_slop_ns, //
210 &predicted_display_time_ns); //
211
212 c->frame.waited.id = frame_id;
213 c->frame.waited.desired_present_time_ns = desired_present_time_ns;
214 c->frame.waited.present_slop_ns = present_slop_ns;
215 c->frame.waited.predicted_display_time_ns = predicted_display_time_ns;
216
217 *out_frame_id = frame_id;
218 *out_wake_time_ns = wake_up_time_ns;
219 *out_predicted_gpu_time_ns = desired_present_time_ns; // Not quite right but close enough.
220 *out_predicted_display_time_ns = predicted_display_time_ns;
221 *out_predicted_display_period_ns = c->frame_interval_ns;
222
223 return XRT_SUCCESS;
224}
225
226static xrt_result_t
227compositor_mark_frame(struct xrt_compositor *xc,
228 int64_t frame_id,
229 enum xrt_compositor_frame_point point,
230 int64_t when_ns)
231{
232 COMP_TRACE_MARKER();
233
234 struct comp_compositor *c = comp_compositor(xc);
235
236 COMP_SPEW(c, "MARK_FRAME %i", point);
237
238 switch (point) {
239 case XRT_COMPOSITOR_FRAME_POINT_WOKE:
240 comp_target_mark_wake_up(c->target, frame_id, when_ns);
241 return XRT_SUCCESS;
242 default: assert(false);
243 }
244 return XRT_ERROR_VULKAN;
245}
246
247static xrt_result_t
248compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id)
249{
250 struct comp_compositor *c = comp_compositor(xc);
251 COMP_SPEW(c, "BEGIN_FRAME");
252 c->app_profiling.last_begin = os_monotonic_get_ns();
253 return XRT_SUCCESS;
254}
255
256static xrt_result_t
257compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id)
258{
259 struct comp_compositor *c = comp_compositor(xc);
260 COMP_SPEW(c, "DISCARD_FRAME at %8.3fms", ts_ms());
261 return XRT_SUCCESS;
262}
263
264/*!
265 * We have a fast path for single projection layer that goes directly
266 * to the distortion shader, so no need to use the layer renderer.
267 */
268static bool
269can_do_one_projection_layer_fast_path(struct comp_compositor *c)
270{
271 if (c->base.layer_accum.layer_count != 1) {
272 return false;
273 }
274
275 struct comp_layer *layer = &c->base.layer_accum.layers[0];
276 enum xrt_layer_type type = layer->data.type;
277
278 // Handled by the distortion shader.
279 return type == XRT_LAYER_PROJECTION || //
280 type == XRT_LAYER_PROJECTION_DEPTH;
281}
282
283static XRT_CHECK_RESULT xrt_result_t
284compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle)
285{
286 COMP_TRACE_MARKER();
287
288 struct comp_compositor *c = comp_compositor(xc);
289
290 COMP_SPEW(c, "LAYER_COMMIT at %8.3fms", ts_ms());
291
292 /*
293 * We have a fast path for single projection layer that goes directly
294 * to the distortion shader, so no need to use the layer renderer.
295 */
296 bool fast_path = //
297 !c->peek && //
298 !c->mirroring_to_debug_gui && //
299 !c->debug.disable_fast_path && //
300 can_do_one_projection_layer_fast_path(c); //
301 c->base.frame_params.one_projection_layer_fast_path = fast_path;
302
303
304 u_graphics_sync_unref(&sync_handle);
305
306 // Do the drawing
307 xrt_result_t xret = comp_renderer_draw(c->r);
308 if (xret != XRT_SUCCESS) {
309 return xret;
310 }
311
312 u_frame_times_widget_push_sample(&c->compositor_frame_times, os_monotonic_get_ns());
313
314 // Record the time of this frame.
315 c->last_frame_time_ns = os_monotonic_get_ns();
316 c->app_profiling.last_end = c->last_frame_time_ns;
317
318
319 COMP_SPEW(c, "LAYER_COMMIT finished drawing at %8.3fms", ns_to_ms(c->last_frame_time_ns));
320
321 // Now is a good point to garbage collect.
322 comp_swapchain_shared_garbage_collect(&c->base.cscs);
323
324 return XRT_SUCCESS;
325}
326
327static xrt_result_t
328compositor_get_display_refresh_rate(struct xrt_compositor *xc, float *out_display_refresh_rate_hz)
329{
330#ifdef XRT_OS_ANDROID
331 *out_display_refresh_rate_hz =
332 android_custom_surface_get_display_refresh_rate(android_globals_get_vm(), android_globals_get_context());
333#else
334 struct comp_compositor *c = comp_compositor(xc);
335
336 if (c->target && c->target->get_current_refresh_rate) {
337 return comp_target_get_current_refresh_rate(c->target, out_display_refresh_rate_hz);
338 } else {
339 *out_display_refresh_rate_hz = (float)(1. / time_ns_to_s(c->frame_interval_ns));
340 }
341#endif
342
343 return XRT_SUCCESS;
344}
345
346static xrt_result_t
347compositor_request_display_refresh_rate(struct xrt_compositor *xc, float display_refresh_rate_hz)
348{
349#ifdef XRT_OS_ANDROID
350 typedef int32_t (*PF_SETFRAMERATE)(ANativeWindow * window, float frameRate, int8_t compatibility);
351
352 // Note that this will just increment the reference count, rather than actually load it again,
353 // since we are linked for other symbols too.
354 void *android_handle = dlopen("libandroid.so", RTLD_NOW);
355 PF_SETFRAMERATE set_frame_rate = (PF_SETFRAMERATE)dlsym(android_handle, "ANativeWindow_setFrameRate");
356 if (!set_frame_rate) {
357 U_LOG_E("ANativeWindow_setFrameRate not found");
358 dlclose(android_handle);
359 return XRT_SUCCESS;
360 }
361 struct ANativeWindow *window = (struct ANativeWindow *)android_globals_get_window();
362 if (window == NULL || (set_frame_rate(window, display_refresh_rate_hz, 1) != 0)) {
363 U_LOG_E("set_frame_rate error");
364 }
365 dlclose(android_handle);
366#else
367 struct comp_compositor *c = comp_compositor(xc);
368 if (c->target && c->target->request_refresh_rate) {
369 xrt_result_t result = comp_target_request_refresh_rate(c->target, display_refresh_rate_hz);
370 // Assume refresh rate change is immediate
371 if (result == XRT_SUCCESS)
372 c->frame_interval_ns = U_TIME_1S_IN_NS / display_refresh_rate_hz;
373 return result;
374 }
375#endif
376 return XRT_SUCCESS;
377}
378
379static void
380compositor_destroy(struct xrt_compositor *xc)
381{
382 struct comp_compositor *c = comp_compositor(xc);
383 struct vk_bundle *vk = get_vk(c);
384
385 COMP_DEBUG(c, "COMP_DESTROY");
386
387 // Need to do this as early as possible.
388 u_var_remove_root(c);
389
390 // Destroy any Vulkan resources, even if not used.
391 chl_scratch_free_resources(&c->scratch, &c->nr);
392
393 // Destroy the scratch images fully, we initialized all of them.
394 chl_scratch_fini(&c->scratch);
395
396 // Make sure we are not holding onto any swapchains.
397 u_swapchain_debug_destroy(&c->debug.sc);
398
399 // Make sure we don't have anything to destroy.
400 comp_swapchain_shared_garbage_collect(&c->base.cscs);
401
402 // Must be destroyed before Vulkan.
403 comp_swapchain_shared_destroy(&c->base.cscs, vk);
404
405 comp_renderer_destroy(&c->r);
406
407#ifdef XRT_FEATURE_WINDOW_PEEK
408 comp_window_peek_destroy(&c->peek);
409#endif
410
411 // Does NULL checking.
412 comp_target_destroy(&c->target);
413
414 // Only depends on vk_bundle and shaders.
415 render_resources_fini(&c->nr);
416
417 // As long as vk_bundle is valid it's safe to call this function.
418 render_shaders_fini(&c->shaders, vk);
419
420 if (vk->device != VK_NULL_HANDLE) {
421 vk->vkDestroyDevice(vk->device, NULL);
422 vk->device = VK_NULL_HANDLE;
423 }
424
425 vk_deinit_mutex(vk);
426
427 if (vk->instance != VK_NULL_HANDLE) {
428 vk->vkDestroyInstance(vk->instance, NULL);
429 vk->instance = VK_NULL_HANDLE;
430 }
431
432 // Can do this now.
433 u_frame_times_widget_teardown(&c->compositor_frame_times);
434
435 comp_base_fini(&c->base);
436
437 free(c);
438}
439
440
441/*
442 *
443 * xdev functions.
444 *
445 */
446
447static bool
448compositor_check_and_prepare_xdev(struct comp_compositor *c, struct xrt_device *xdev)
449{
450 COMP_TRACE_MARKER();
451
452 // clang-format off
453 bool has_none = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_NONE) != 0;
454 bool has_meshuv = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_MESHUV) != 0;
455 bool has_compute = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_COMPUTE) != 0;
456 // clang-format on
457
458 // Everything is okay! :D
459 if (has_meshuv) {
460 return true;
461 }
462
463 if (!has_none && !has_compute) {
464 COMP_ERROR(c, "The xdev '%s' didn't have none nor compute distortion.", xdev->str);
465 return false;
466 }
467
468 COMP_WARN(c,
469 "Had to fill in meshuv on xdev '%s', "
470 "this should be done in the driver.",
471 xdev->str);
472
473 u_distortion_mesh_fill_in_compute(xdev);
474
475 // clang-format off
476 has_meshuv = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_MESHUV) != 0;
477 // clang-format on
478
479 if (has_meshuv) {
480 return true;
481 }
482
483 COMP_ERROR(c, "Failed to fill in meshuv on the xdev '%s'.", xdev->str);
484
485 return false;
486}
487
488
489/*
490 *
491 * Vulkan functions.
492 *
493 */
494
495// If any of these lists are updated, please also update the appropriate column
496// in `vulkan-extensions.md`
497
498static const char *instance_extensions_common[] = {
499 COMP_INSTANCE_EXTENSIONS_COMMON,
500};
501
502static const char *optional_instance_extensions[] = {
503#ifdef VK_EXT_swapchain_colorspace
504 VK_EXT_SWAPCHAIN_COLORSPACE_EXTENSION_NAME,
505#endif
506#ifdef VK_EXT_display_surface_counter
507 VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME,
508#endif
509#if defined VK_EXT_debug_utils && !defined NDEBUG
510 VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
511#endif
512};
513
514// Note: Keep synchronized with comp_vk_glue - we should have everything they
515// do, plus VK_KHR_SWAPCHAIN_EXTENSION_NAME
516static const char *required_device_extensions[] = {
517 VK_KHR_SWAPCHAIN_EXTENSION_NAME, //
518 VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, //
519 VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME, //
520 VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, //
521 VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME, //
522 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, //
523
524// Platform version of "external_memory"
525#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_FD)
526 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
527
528#elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_AHARDWAREBUFFER)
529 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
530 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
531 VK_KHR_MAINTENANCE_1_EXTENSION_NAME,
532 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
533 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
534
535#elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
536 VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
537
538#else
539#error "Need port!"
540#endif
541
542// Platform version of "external_fence" and "external_semaphore"
543#if defined(XRT_GRAPHICS_SYNC_HANDLE_IS_FD) // Optional
544
545#elif defined(XRT_GRAPHICS_SYNC_HANDLE_IS_WIN32_HANDLE)
546 VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
547 VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME,
548
549#else
550#error "Need port!"
551#endif
552};
553
554static const char *optional_device_extensions[] = {
555 VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME, //
556 VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, //
557
558// Platform version of "external_fence" and "external_semaphore"
559#if defined(XRT_GRAPHICS_SYNC_HANDLE_IS_FD)
560 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, //
561 VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME, //
562
563#elif defined(XRT_GRAPHICS_SYNC_HANDLE_IS_WIN32_HANDLE) // Not optional
564#elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_AHARDWAREBUFFER)
565#if defined(VK_ANDROID_external_format_resolve)
566 // Requires Vulkan 1.3.268.1
567 VK_ANDROID_EXTERNAL_FORMAT_RESOLVE_EXTENSION_NAME //
568#endif
569#else
570#error "Need port!"
571#endif
572
573#ifdef VK_KHR_present_id
574 VK_KHR_PRESENT_ID_EXTENSION_NAME,
575#endif
576#ifdef VK_KHR_present_wait
577 VK_KHR_PRESENT_WAIT_EXTENSION_NAME,
578#endif
579#ifdef VK_KHR_format_feature_flags2
580 VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME,
581#endif
582#ifdef VK_KHR_global_priority
583 VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME,
584#endif
585#ifdef VK_KHR_image_format_list
586 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
587#endif
588#ifdef VK_KHR_maintenance1
589 VK_KHR_MAINTENANCE_1_EXTENSION_NAME,
590#endif
591#ifdef VK_KHR_maintenance2
592 VK_KHR_MAINTENANCE_2_EXTENSION_NAME,
593#endif
594#ifdef VK_KHR_timeline_semaphore
595 VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
596#endif
597#ifdef VK_EXT_calibrated_timestamps
598 VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME,
599#endif
600#ifdef VK_EXT_robustness2
601 VK_EXT_ROBUSTNESS_2_EXTENSION_NAME,
602#endif
603#ifdef VK_EXT_display_control
604 VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME,
605#endif
606#ifdef VK_KHR_synchronization2
607 VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME,
608#endif
609};
610
611static bool
612select_instances_extensions(struct comp_compositor *c, struct u_string_list *required, struct u_string_list *optional)
613{
614#ifdef XRT_FEATURE_WINDOW_PEEK
615 if (!comp_window_peek_get_vk_instance_exts(required)) {
616 COMP_ERROR(c, "Failed to get required vulkan instance extensions for peek window.");
617 return false;
618 }
619#endif
620 return true;
621}
622
623static bool
624compositor_init_vulkan(struct comp_compositor *c)
625{
626 COMP_TRACE_MARKER();
627
628 assert(c->target_factory != NULL);
629
630 struct vk_bundle *vk = get_vk(c);
631
632
633 /*
634 * Instance extensions.
635 */
636
637 struct u_string_list *required_instance_ext_list = u_string_list_create();
638 struct u_string_list *optional_instance_ext_list = u_string_list_create();
639
640 // Every backend needs at least the common extensions.
641 u_string_list_append_array( //
642 required_instance_ext_list, //
643 instance_extensions_common, //
644 ARRAY_SIZE(instance_extensions_common)); //
645
646 // Add per target required extensions.
647 u_string_list_append_array( //
648 required_instance_ext_list, //
649 c->target_factory->required_instance_extensions, //
650 c->target_factory->required_instance_extension_count); //
651
652 // Optional instance extensions.
653 u_string_list_append_array( //
654 optional_instance_ext_list, //
655 optional_instance_extensions, //
656 ARRAY_SIZE(optional_instance_extensions)); //
657
658 if (!select_instances_extensions(c, required_instance_ext_list, optional_instance_ext_list)) {
659 COMP_ERROR(c, "Failed to select additional instance extensions.");
660 u_string_list_destroy(&required_instance_ext_list);
661 u_string_list_destroy(&optional_instance_ext_list);
662 return false;
663 }
664
665 /*
666 * Device extensions.
667 */
668
669 struct u_string_list *required_device_extension_list = u_string_list_create();
670 struct u_string_list *optional_device_extension_list = u_string_list_create();
671
672 // Required device extensions.
673 u_string_list_append_array( //
674 required_device_extension_list, //
675 required_device_extensions, //
676 ARRAY_SIZE(required_device_extensions)); //
677
678 // Optional device extensions.
679 u_string_list_append_array( //
680 optional_device_extension_list, //
681 optional_device_extensions, //
682 ARRAY_SIZE(optional_device_extensions)); //
683
684 // Add per target optional device extensions.
685 u_string_list_append_array( //
686 optional_device_extension_list, //
687 c->target_factory->optional_device_extensions, //
688 c->target_factory->optional_device_extension_count); //
689
690 // Select required Vulkan version, suitable for both compositor and target
691 uint32_t required_instance_version = MAX(c->target_factory->required_instance_version, VK_API_VERSION_1_0);
692
693 /*
694 * Create the device.
695 */
696
697 struct comp_vulkan_arguments vk_args = {
698 .get_instance_proc_address = vkGetInstanceProcAddr,
699 .required_instance_version = required_instance_version,
700 .required_instance_extensions = required_instance_ext_list,
701 .optional_instance_extensions = optional_instance_ext_list,
702 .required_device_extensions = required_device_extension_list,
703 .optional_device_extensions = optional_device_extension_list,
704 .log_level = c->settings.log_level,
705 .only_compute_queue = c->settings.use_compute,
706 .selected_gpu_index = c->settings.selected_gpu_index,
707 .client_gpu_index = c->settings.client_gpu_index,
708 .timeline_semaphore = true, // Flag is optional, not a hard requirement.
709 };
710
711 struct comp_vulkan_results vk_res = {0};
712 bool bundle_ret = comp_vulkan_init_bundle(vk, &vk_args, &vk_res);
713
714 u_string_list_destroy(&required_instance_ext_list);
715 u_string_list_destroy(&optional_instance_ext_list);
716 u_string_list_destroy(&required_device_extension_list);
717 u_string_list_destroy(&optional_device_extension_list);
718
719 if (!bundle_ret) {
720 return false;
721 }
722
723 // clang-format off
724 static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceUUID.data) == XRT_UUID_SIZE, "array size mismatch");
725 static_assert(ARRAY_SIZE(vk_res.selected_gpu_deviceUUID.data) == XRT_UUID_SIZE, "array size mismatch");
726 static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceUUID.data) == ARRAY_SIZE(c->settings.client_gpu_deviceUUID.data), "array size mismatch");
727 static_assert(ARRAY_SIZE(vk_res.selected_gpu_deviceUUID.data) == ARRAY_SIZE(c->settings.selected_gpu_deviceUUID.data), "array size mismatch");
728 static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceLUID.data) == XRT_LUID_SIZE, "array size mismatch");
729 static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceLUID.data) == ARRAY_SIZE(c->settings.client_gpu_deviceLUID.data), "array size mismatch");
730 // clang-format on
731
732 c->settings.client_gpu_deviceUUID = vk_res.client_gpu_deviceUUID;
733 c->settings.selected_gpu_deviceUUID = vk_res.selected_gpu_deviceUUID;
734 c->settings.client_gpu_index = vk_res.client_gpu_index;
735 c->settings.selected_gpu_index = vk_res.selected_gpu_index;
736 c->settings.client_gpu_deviceLUID = vk_res.client_gpu_deviceLUID;
737 c->settings.client_gpu_deviceLUID_valid = vk_res.client_gpu_deviceLUID_valid;
738
739 // Tie the lifetimes of swapchains to Vulkan.
740 xrt_result_t xret = comp_swapchain_shared_init(&c->base.cscs, vk);
741 if (xret != XRT_SUCCESS) {
742 return false;
743 }
744
745 return true;
746}
747
748
749/*
750 *
751 * Other functions.
752 *
753 */
754
755const struct comp_target_factory *ctfs[] = {
756#if defined VK_USE_PLATFORM_WAYLAND_KHR && defined XRT_HAVE_WAYLAND_DIRECT
757 &comp_target_factory_direct_wayland,
758#endif
759#ifdef VK_USE_PLATFORM_WAYLAND_KHR
760 &comp_target_factory_wayland,
761#endif
762#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
763 &comp_target_factory_direct_randr,
764#endif
765#ifdef VK_USE_PLATFORM_XCB_KHR
766 &comp_target_factory_xcb,
767#endif
768#ifdef XRT_OS_ANDROID
769 &comp_target_factory_android,
770#endif
771#ifdef XRT_OS_WINDOWS
772 &comp_target_factory_mswin,
773#endif
774#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
775 &comp_target_factory_direct_nvidia,
776#endif
777#ifdef VK_USE_PLATFORM_DISPLAY_KHR
778 &comp_target_factory_vk_display,
779#endif
780 &comp_target_factory_debug_image,
781};
782
783static void
784error_msg_with_list(struct comp_compositor *c, const char *msg)
785{
786 struct u_pp_sink_stack_only sink;
787 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
788 u_pp(dg, "%s, available targets:", msg);
789 for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) {
790 u_pp(dg, "\n\t%s: %s", ctfs[i]->identifier, ctfs[i]->name);
791 }
792
793 COMP_ERROR(c, "%s", sink.buffer);
794}
795
796static bool
797compositor_check_deferred(struct comp_compositor *c, const struct comp_target_factory *ctf)
798{
799 if (debug_get_bool_option_disable_deferred()) {
800 COMP_DEBUG(c, "Deferred window initialization globally disabled!");
801 return false;
802 }
803
804 if (!ctf->is_deferred) {
805 return false; // It is not deferred but that's okay.
806 }
807
808 COMP_DEBUG(c, "Deferred target backend %s selected!", ctf->name);
809
810 c->target_factory = ctf;
811 c->deferred_surface = true;
812
813 return true;
814}
815
816static bool
817compositor_try_window(struct comp_compositor *c, const struct comp_target_factory *ctf)
818{
819 COMP_TRACE_MARKER();
820
821 struct comp_target *ct = NULL;
822
823 if (!ctf->create_target(ctf, c, &ct)) {
824 return false;
825 }
826
827 if (!comp_target_init_pre_vulkan(ct)) {
828 ct->destroy(ct);
829 return false;
830 }
831
832 COMP_DEBUG(c, "Target backend %s initialized!", ct->name);
833
834 c->target_factory = ctf;
835 c->target = ct;
836
837 return true;
838}
839
840static bool
841select_target_factory_from_settings(struct comp_compositor *c, const struct comp_target_factory **out_ctf)
842{
843 const char *identifier = c->settings.target_identifier;
844
845 if (identifier == NULL) {
846 return true; // Didn't ask for a target, all ok.
847 }
848
849 for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) {
850 const struct comp_target_factory *ctf = ctfs[i];
851
852 if (strcmp(ctf->identifier, identifier) == 0) {
853 *out_ctf = ctf;
854 return true;
855 }
856 }
857
858 char buffer[256];
859 snprintf(buffer, ARRAY_SIZE(buffer), "Could not find target factory with identifier '%s'", identifier);
860 error_msg_with_list(c, buffer);
861
862 return false; // User asked for a target that we couldn't find, error.
863}
864
865static bool
866select_target_factory_by_detecting(struct comp_compositor *c, const struct comp_target_factory **out_ctf)
867{
868 for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) {
869 const struct comp_target_factory *ctf = ctfs[i];
870
871 if (comp_target_factory_detect(ctf, c)) {
872 *out_ctf = ctf;
873 return true;
874 }
875 }
876
877 return true; // Didn't detect a target, but that's ok.
878}
879
880static bool
881compositor_init_window_pre_vulkan(struct comp_compositor *c, const struct comp_target_factory *selected_ctf)
882{
883 COMP_TRACE_MARKER();
884
885 if (selected_ctf == NULL && !select_target_factory_from_settings(c, &selected_ctf)) {
886 return false; // Error!
887 }
888
889 if (selected_ctf == NULL && !select_target_factory_by_detecting(c, &selected_ctf)) {
890 return false; // Error!
891 }
892
893 if (selected_ctf != NULL) {
894 // We have selected a target factory, but it needs Vulkan.
895 if (selected_ctf->requires_vulkan_for_create) {
896 COMP_INFO(c, "Selected %s backend!", selected_ctf->name);
897 c->target_factory = selected_ctf;
898 return true;
899 }
900
901 if (compositor_check_deferred(c, selected_ctf)) {
902 return true;
903 }
904
905 if (!compositor_try_window(c, selected_ctf)) {
906 COMP_ERROR(c, "Failed to init %s backend!", selected_ctf->name);
907 return false;
908 }
909
910 return true;
911 }
912
913 for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) {
914 const struct comp_target_factory *ctf = ctfs[i];
915
916 // Skip targets that requires Vulkan.
917 if (ctf->requires_vulkan_for_create) {
918 continue;
919 }
920
921 if (compositor_check_deferred(c, ctf)) {
922 return true;
923 }
924
925 if (compositor_try_window(c, ctf)) {
926 return true;
927 }
928 }
929
930 // Nothing worked, giving up.
931 error_msg_with_list(c, "Failed to create any target");
932
933 return false;
934}
935
936static bool
937compositor_init_window_post_vulkan(struct comp_compositor *c)
938{
939 COMP_TRACE_MARKER();
940
941 assert(c->target_factory != NULL);
942
943 if (c->target != NULL) {
944 return true;
945 }
946
947 return compositor_try_window(c, c->target_factory);
948}
949
950static bool
951compositor_init_swapchain(struct comp_compositor *c)
952{
953 COMP_TRACE_MARKER();
954
955 assert(c->target != NULL);
956 assert(c->target_factory != NULL);
957
958 if (comp_target_init_post_vulkan(c->target, //
959 c->settings.preferred.width, //
960 c->settings.preferred.height)) {
961 return true;
962 }
963
964 COMP_ERROR(c, "Window init_swapchain failed!");
965
966 comp_target_destroy(&c->target);
967
968 return false;
969}
970
971static bool
972compositor_init_render_resources(struct comp_compositor *c)
973{
974 COMP_TRACE_MARKER();
975
976 struct vk_bundle *vk = get_vk(c);
977
978 if (!render_shaders_load(&c->shaders, vk)) {
979 return false;
980 }
981
982 if (!render_resources_init(&c->nr, &c->shaders, get_vk(c), c->xdev)) {
983 return false;
984 }
985
986 return true;
987}
988
989static bool
990compositor_init_renderer(struct comp_compositor *c)
991{
992 COMP_TRACE_MARKER();
993
994 c->r = comp_renderer_create(c, c->view_extents);
995
996#ifdef XRT_FEATURE_WINDOW_PEEK
997 c->peek = comp_window_peek_create(c);
998#else
999 c->peek = NULL;
1000#endif
1001
1002 return c->r != NULL;
1003}
1004
1005xrt_result_t
1006comp_main_create_system_compositor(struct xrt_device *xdev,
1007 const struct comp_target_factory *ctf,
1008 struct u_pacing_app_factory *upaf,
1009 struct xrt_system_compositor **out_xsysc)
1010{
1011 COMP_TRACE_MARKER();
1012
1013 struct comp_compositor *c = U_TYPED_CALLOC(struct comp_compositor);
1014
1015 struct xrt_compositor *iface = &c->base.base.base;
1016 iface->begin_session = compositor_begin_session;
1017 iface->end_session = compositor_end_session;
1018 iface->predict_frame = compositor_predict_frame;
1019 iface->mark_frame = compositor_mark_frame;
1020 iface->begin_frame = compositor_begin_frame;
1021 iface->discard_frame = compositor_discard_frame;
1022 iface->layer_commit = compositor_layer_commit;
1023 iface->get_display_refresh_rate = compositor_get_display_refresh_rate;
1024 iface->request_display_refresh_rate = compositor_request_display_refresh_rate;
1025 iface->destroy = compositor_destroy;
1026 c->frame.waited.id = -1;
1027 c->frame.rendering.id = -1;
1028 c->xdev = xdev;
1029
1030 xrt_result_t xret = XRT_SUCCESS;
1031
1032 COMP_DEBUG(c, "Doing init %p", (void *)c);
1033
1034 uint32_t view_count = xdev->hmd->view_count;
1035 enum xrt_view_type view_type = 0; // Invalid
1036
1037 switch (view_count) {
1038 case 0:
1039 U_LOG_E("Bug detected: HMD \"%s\" xdev->hmd.view_count must be > 0!", xdev->str);
1040 assert(xdev->hmd->view_count > 0);
1041 break;
1042 case 1: view_type = XRT_VIEW_TYPE_MONO; break;
1043 case 2: view_type = XRT_VIEW_TYPE_STEREO; break;
1044 default:
1045 U_LOG_E("Bug detected: HMD \"%s\" xdev->hmd.view_count must be 1 or 2, not %u!", xdev->str, view_count);
1046 assert(view_count == 1 && view_count == 2);
1047 break;
1048 }
1049
1050 // Do this as early as possible.
1051 comp_base_init(&c->base);
1052
1053 // Init the settings to default.
1054 comp_settings_init(&c->settings, xdev);
1055
1056 // Init this before the renderer.
1057 u_swapchain_debug_init(&c->debug.sc);
1058
1059 // Init these before the renderer, not all might be used.
1060 chl_scratch_init(&c->scratch);
1061
1062 c->frame_interval_ns = c->settings.nominal_frame_interval_ns;
1063
1064 c->last_frame_time_ns = os_monotonic_get_ns();
1065
1066 double scale = c->settings.viewport_scale;
1067
1068 if (scale > 2.0) {
1069 scale = 2.0;
1070 COMP_DEBUG(c, "Clamped scale to 200%%\n");
1071 }
1072
1073 uint32_t w0 = (uint32_t)(xdev->hmd->views[0].display.w_pixels * scale);
1074 uint32_t h0 = (uint32_t)(xdev->hmd->views[0].display.h_pixels * scale);
1075
1076 c->view_extents.width = w0;
1077 c->view_extents.height = h0;
1078
1079 // Need to select window backend before creating Vulkan, then
1080 // swapchain will initialize the window fully and the swapchain,
1081 // and finally the renderer is created which renders to
1082 // window/swapchain.
1083
1084 // clang-format off
1085 if (!compositor_check_and_prepare_xdev(c, xdev) ||
1086 !compositor_init_window_pre_vulkan(c, ctf) ||
1087 !compositor_init_vulkan(c) ||
1088 !compositor_init_render_resources(c)) {
1089 COMP_ERROR(c, "Failed to init compositor %p", (void *)c);
1090
1091 xret = XRT_ERROR_VULKAN;
1092 goto error;
1093 }
1094
1095 if (!c->deferred_surface) {
1096 if (!compositor_init_window_post_vulkan(c) ||
1097 !compositor_init_swapchain(c) ||
1098 !compositor_init_renderer(c)) {
1099 COMP_ERROR(c, "Failed to init compositor %p", (void*)c);
1100
1101 xret = XRT_ERROR_VULKAN;
1102 goto error;
1103 }
1104 comp_target_set_title(c->target, WINDOW_TITLE);
1105 }
1106 // clang-format on
1107
1108 COMP_DEBUG(c, "Done %p", (void *)c);
1109
1110 /*!
1111 * @todo Support more like, depth/float formats etc,
1112 * remember to update the GL client as well.
1113 */
1114
1115 struct xrt_compositor_info *info = &c->base.base.base.info;
1116
1117
1118 /*
1119 * Formats.
1120 */
1121
1122 struct comp_vulkan_formats formats = {0};
1123 comp_vulkan_formats_check(get_vk(c), &formats);
1124 comp_vulkan_formats_copy_to_info(&formats, info);
1125 comp_vulkan_formats_log(c->settings.log_level, &formats);
1126
1127
1128 /*
1129 * Rest of info.
1130 */
1131
1132 struct xrt_system_compositor_info sys_info_storage = {0};
1133 struct xrt_system_compositor_info *sys_info = &sys_info_storage;
1134
1135 // Required by OpenXR spec.
1136 sys_info->max_layers = XRT_MAX_LAYERS;
1137 sys_info->compositor_vk_deviceUUID = c->settings.selected_gpu_deviceUUID;
1138 sys_info->client_vk_deviceUUID = c->settings.client_gpu_deviceUUID;
1139 sys_info->client_d3d_deviceLUID = c->settings.client_gpu_deviceLUID;
1140 sys_info->client_d3d_deviceLUID_valid = c->settings.client_gpu_deviceLUID_valid;
1141 // @note If timewarp is disabled this is not supported.
1142 sys_info->supports_fov_mutable = true;
1143
1144 // clang-format off
1145 for (uint32_t i = 0; i < view_count; ++i) {
1146 uint32_t w = (uint32_t)(xdev->hmd->views[i].display.w_pixels * scale);
1147 uint32_t h = (uint32_t)(xdev->hmd->views[i].display.h_pixels * scale);
1148 uint32_t w_2 = xdev->hmd->views[i].display.w_pixels * 2;
1149 uint32_t h_2 = xdev->hmd->views[i].display.h_pixels * 2;
1150
1151 sys_info->view_configs[0].views[i].recommended.width_pixels = w;
1152 sys_info->view_configs[0].views[i].recommended.height_pixels = h;
1153 sys_info->view_configs[0].views[i].recommended.sample_count = 1;
1154 sys_info->view_configs[0].views[i].max.width_pixels = w_2;
1155 sys_info->view_configs[0].views[i].max.height_pixels = h_2;
1156 sys_info->view_configs[0].views[i].max.sample_count = 1;
1157 }
1158 // clang-format on
1159 sys_info->view_configs[0].view_type = view_type;
1160 sys_info->view_configs[0].view_count = view_count;
1161 sys_info->view_config_count = 1; // Only one view config for now.
1162
1163 // If we can add e.g. video pass-through capabilities, we may need to change (augment) this list.
1164 // Just copying it directly right now.
1165 assert(xdev->hmd->blend_mode_count <= XRT_MAX_DEVICE_BLEND_MODES);
1166 assert(xdev->hmd->blend_mode_count != 0);
1167 assert(xdev->hmd->blend_mode_count <= ARRAY_SIZE(sys_info->supported_blend_modes));
1168 for (size_t i = 0; i < xdev->hmd->blend_mode_count; ++i) {
1169 assert(u_verify_blend_mode_valid(xdev->hmd->blend_modes[i]));
1170 sys_info->supported_blend_modes[i] = xdev->hmd->blend_modes[i];
1171 }
1172 sys_info->supported_blend_mode_count = (uint8_t)xdev->hmd->blend_mode_count;
1173
1174 u_var_add_root(c, "Compositor", true);
1175
1176 float target_frame_time_ms = (float)ns_to_ms(c->frame_interval_ns);
1177 u_frame_times_widget_init(&c->compositor_frame_times, target_frame_time_ms, 10.f);
1178
1179 u_var_add_ro_f32(c, &c->compositor_frame_times.fps, "FPS (Compositor)");
1180 u_var_add_bool(c, &c->debug.atw_off, "Debug: ATW OFF");
1181 u_var_add_bool(c, &c->debug.disable_fast_path, "Debug: Disable fast path");
1182 u_var_add_f32_timing(c, c->compositor_frame_times.debug_var, "Frame Times (Compositor)");
1183
1184 // Only add active views.
1185 for (uint32_t i = 0; i < view_count; i++) {
1186 char tmp[64] = {0};
1187 snprintf(tmp, sizeof(tmp), "View[%u]", i);
1188 u_var_add_native_images_debug(c, &c->scratch.views[i].cssi.unid, tmp);
1189 }
1190
1191#ifdef XRT_OS_ANDROID
1192 // Get info about display.
1193 struct xrt_android_display_metrics metrics;
1194 if (!android_custom_surface_get_display_metrics(android_globals_get_vm(), android_globals_get_context(),
1195 &metrics)) {
1196 U_LOG_E("Could not get Android display metrics.");
1197 /* Fallback to default values */
1198 metrics.refresh_rates[0] = 60.0f;
1199 metrics.refresh_rate_count = 1;
1200 metrics.refresh_rate = metrics.refresh_rates[0];
1201 }
1202
1203 // Copy data to info.
1204 sys_info->refresh_rate_count = metrics.refresh_rate_count;
1205 for (size_t i = 0; i < sys_info->refresh_rate_count; ++i) {
1206 sys_info->refresh_rates_hz[i] = metrics.refresh_rates[i];
1207 }
1208#else
1209 if (c->target && c->target->get_refresh_rates) {
1210 comp_target_get_refresh_rates(c->target, &sys_info->refresh_rate_count, sys_info->refresh_rates_hz);
1211 } else {
1212 //! @todo: Query all supported refresh rates of the current mode
1213 sys_info->refresh_rate_count = 1;
1214 sys_info->refresh_rates_hz[0] = (float)(1. / time_ns_to_s(c->frame_interval_ns));
1215 }
1216#endif // XRT_OS_ANDROID
1217
1218 // Needs to be delayed until after compositor's u_var has been setup.
1219 if (!c->deferred_surface) {
1220 comp_renderer_add_debug_vars(c->r);
1221 }
1222
1223 // Standard app pacer.
1224 if (upaf == NULL) {
1225 xret = u_pa_factory_create(&upaf);
1226 if (xret != XRT_SUCCESS || upaf == NULL) {
1227 COMP_ERROR(c, "Failed to create app pacing factory");
1228 goto error;
1229 }
1230 }
1231
1232 xret = comp_multi_create_system_compositor(&c->base.base, upaf, sys_info, !c->deferred_surface, out_xsysc);
1233 if (xret == XRT_SUCCESS) {
1234 return xret;
1235 }
1236
1237error:
1238 if (c != NULL) {
1239 c->base.base.base.destroy(&c->base.base.base);
1240 }
1241 u_paf_destroy(&upaf);
1242 return xret;
1243}