// Copyright 2019-2024, Collabora, Ltd. // Copyright 2025, NVIDIA CORPORATION. // SPDX-License-Identifier: BSL-1.0 /*! * @file * @brief Main compositor written using Vulkan implementation. * @author Jakob Bornecrantz * @author Lubosz Sarnecki * @author Rylie Pavlik * @author Moshi Turner * @ingroup comp_main * * * begin_frame and end_frame delimit the application's work on graphics for a * single frame. end_frame updates our estimate of the current estimated app * graphics duration, as well as the "swap interval" for scheduling the * application. * * We have some known overhead work required to composite a frame: eventually * this may be measured as well. Overhead plus the estimated app render duration * is compared to the frame duration: if it's longer, then we go to a "swap * interval" of 2. * * wait_frame must be the one to produce the next predicted display time, * because we cannot distinguish two sequential wait_frame calls (an app * skipping a frame) from an OS scheduling blip causing the second wait_frame to * happen before the first begin_frame actually gets executed. It cannot use the * last display time in this computation for this reason. (Except perhaps to * align the period at a sub-frame level? e.g. should be a multiple of the frame * duration after the last displayed time). * * wait_frame should not actually produce the predicted display time until it's * done waiting: it should wake up once a frame and see what the current swap * interval suggests: this handles the case where end_frame changes the swap * interval from 2 to 1 during a wait_frame call. (That is, we should wait until * whichever is closer of the next vsync or the time we currently predict we * should release the app.) * * Sleeping can be a bit hairy: in general right now we'll use a combination of * operating system sleeps and busy-waits (for fine-grained waiting). Some * platforms provide vsync-related sync primitives that may get us closer to our * desired time. This is also convenient for the "wait until next frame" * behavior. */ #include "xrt/xrt_compiler.h" #include "xrt/xrt_compositor.h" #include "xrt/xrt_config_have.h" #include "xrt/xrt_results.h" #include "math/m_api.h" #include "os/os_time.h" #include "util/u_var.h" #include "util/u_misc.h" #include "util/u_time.h" #include "util/u_debug.h" #include "util/u_pacing.h" #include "util/u_handles.h" #include "util/u_trace_marker.h" #include "util/u_pretty_print.h" #include "util/u_distortion_mesh.h" #include "util/u_verify.h" #include "util/comp_vulkan.h" #include "main/comp_compositor.h" #include "main/comp_frame.h" #ifdef XRT_FEATURE_WINDOW_PEEK #include "main/comp_window_peek.h" #endif #include "multi/comp_multi_interface.h" #include #include #include #include #include #include #ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD #include #endif #ifdef XRT_OS_ANDROID #include "android/android_custom_surface.h" #include "android/android_globals.h" #include #endif #define WINDOW_TITLE "Monado" DEBUG_GET_ONCE_BOOL_OPTION(disable_deferred, "XRT_COMPOSITOR_DISABLE_DEFERRED", false) /* * * Helper functions. * */ static double ns_to_ms(int64_t ns) { double ms = ((double)ns) * 1. / 1000. * 1. / 1000.; return ms; } static double ts_ms(void) { int64_t monotonic = os_monotonic_get_ns(); return ns_to_ms(monotonic); } static struct vk_bundle * get_vk(struct comp_compositor *c) { return &c->base.vk; } /* * * Compositor functions. * */ static bool compositor_init_window_post_vulkan(struct comp_compositor *c); static bool compositor_init_swapchain(struct comp_compositor *c); static bool compositor_init_renderer(struct comp_compositor *c); static xrt_result_t compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info) { struct comp_compositor *c = comp_compositor(xc); COMP_DEBUG(c, "BEGIN_SESSION"); // clang-format off if (c->deferred_surface) { if (!compositor_init_window_post_vulkan(c) || !compositor_init_swapchain(c) || !compositor_init_renderer(c)) { COMP_ERROR(c, "Failed to init compositor %p", (void *)c); c->base.base.base.destroy(&c->base.base.base); return XRT_ERROR_VULKAN; } comp_target_set_title(c->target, WINDOW_TITLE); comp_renderer_add_debug_vars(c->r); } // clang-format on return XRT_SUCCESS; } static xrt_result_t compositor_end_session(struct xrt_compositor *xc) { struct comp_compositor *c = comp_compositor(xc); COMP_DEBUG(c, "END_SESSION"); if (c->deferred_surface) { // Make sure we don't have anything to destroy. comp_swapchain_shared_garbage_collect(&c->base.cscs); comp_renderer_destroy(&c->r); #ifdef XRT_FEATURE_WINDOW_PEEK comp_window_peek_destroy(&c->peek); #endif comp_target_destroy(&c->target); } return XRT_SUCCESS; } static xrt_result_t compositor_predict_frame(struct xrt_compositor *xc, int64_t *out_frame_id, int64_t *out_wake_time_ns, int64_t *out_predicted_gpu_time_ns, int64_t *out_predicted_display_time_ns, int64_t *out_predicted_display_period_ns) { COMP_TRACE_MARKER(); struct comp_compositor *c = comp_compositor(xc); COMP_SPEW(c, "PREDICT_FRAME"); comp_target_update_timings(c->target); assert(comp_frame_is_invalid_locked(&c->frame.waited)); int64_t frame_id = -1; int64_t wake_up_time_ns = 0; int64_t present_slop_ns = 0; int64_t desired_present_time_ns = 0; int64_t predicted_display_time_ns = 0; comp_target_calc_frame_pacing( // c->target, // &frame_id, // &wake_up_time_ns, // &desired_present_time_ns, // &present_slop_ns, // &predicted_display_time_ns); // c->frame.waited.id = frame_id; c->frame.waited.desired_present_time_ns = desired_present_time_ns; c->frame.waited.present_slop_ns = present_slop_ns; c->frame.waited.predicted_display_time_ns = predicted_display_time_ns; *out_frame_id = frame_id; *out_wake_time_ns = wake_up_time_ns; *out_predicted_gpu_time_ns = desired_present_time_ns; // Not quite right but close enough. *out_predicted_display_time_ns = predicted_display_time_ns; *out_predicted_display_period_ns = c->frame_interval_ns; return XRT_SUCCESS; } static xrt_result_t compositor_mark_frame(struct xrt_compositor *xc, int64_t frame_id, enum xrt_compositor_frame_point point, int64_t when_ns) { COMP_TRACE_MARKER(); struct comp_compositor *c = comp_compositor(xc); COMP_SPEW(c, "MARK_FRAME %i", point); switch (point) { case XRT_COMPOSITOR_FRAME_POINT_WOKE: comp_target_mark_wake_up(c->target, frame_id, when_ns); return XRT_SUCCESS; default: assert(false); } return XRT_ERROR_VULKAN; } static xrt_result_t compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id) { struct comp_compositor *c = comp_compositor(xc); COMP_SPEW(c, "BEGIN_FRAME"); c->app_profiling.last_begin = os_monotonic_get_ns(); return XRT_SUCCESS; } static xrt_result_t compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id) { struct comp_compositor *c = comp_compositor(xc); COMP_SPEW(c, "DISCARD_FRAME at %8.3fms", ts_ms()); return XRT_SUCCESS; } /*! * We have a fast path for single projection layer that goes directly * to the distortion shader, so no need to use the layer renderer. */ static bool can_do_one_projection_layer_fast_path(struct comp_compositor *c) { if (c->base.layer_accum.layer_count != 1) { return false; } struct comp_layer *layer = &c->base.layer_accum.layers[0]; enum xrt_layer_type type = layer->data.type; // Handled by the distortion shader. return type == XRT_LAYER_PROJECTION || // type == XRT_LAYER_PROJECTION_DEPTH; } static XRT_CHECK_RESULT xrt_result_t compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle) { COMP_TRACE_MARKER(); struct comp_compositor *c = comp_compositor(xc); COMP_SPEW(c, "LAYER_COMMIT at %8.3fms", ts_ms()); /* * We have a fast path for single projection layer that goes directly * to the distortion shader, so no need to use the layer renderer. */ bool fast_path = // !c->peek && // !c->mirroring_to_debug_gui && // !c->debug.disable_fast_path && // can_do_one_projection_layer_fast_path(c); // c->base.frame_params.one_projection_layer_fast_path = fast_path; u_graphics_sync_unref(&sync_handle); // Do the drawing xrt_result_t xret = comp_renderer_draw(c->r); if (xret != XRT_SUCCESS) { return xret; } u_frame_times_widget_push_sample(&c->compositor_frame_times, os_monotonic_get_ns()); // Record the time of this frame. c->last_frame_time_ns = os_monotonic_get_ns(); c->app_profiling.last_end = c->last_frame_time_ns; COMP_SPEW(c, "LAYER_COMMIT finished drawing at %8.3fms", ns_to_ms(c->last_frame_time_ns)); // Now is a good point to garbage collect. comp_swapchain_shared_garbage_collect(&c->base.cscs); return XRT_SUCCESS; } static xrt_result_t compositor_get_display_refresh_rate(struct xrt_compositor *xc, float *out_display_refresh_rate_hz) { #ifdef XRT_OS_ANDROID *out_display_refresh_rate_hz = android_custom_surface_get_display_refresh_rate(android_globals_get_vm(), android_globals_get_context()); #else struct comp_compositor *c = comp_compositor(xc); if (c->target && c->target->get_current_refresh_rate) { return comp_target_get_current_refresh_rate(c->target, out_display_refresh_rate_hz); } else { *out_display_refresh_rate_hz = (float)(1. / time_ns_to_s(c->frame_interval_ns)); } #endif return XRT_SUCCESS; } static xrt_result_t compositor_request_display_refresh_rate(struct xrt_compositor *xc, float display_refresh_rate_hz) { #ifdef XRT_OS_ANDROID typedef int32_t (*PF_SETFRAMERATE)(ANativeWindow * window, float frameRate, int8_t compatibility); // Note that this will just increment the reference count, rather than actually load it again, // since we are linked for other symbols too. void *android_handle = dlopen("libandroid.so", RTLD_NOW); PF_SETFRAMERATE set_frame_rate = (PF_SETFRAMERATE)dlsym(android_handle, "ANativeWindow_setFrameRate"); if (!set_frame_rate) { U_LOG_E("ANativeWindow_setFrameRate not found"); dlclose(android_handle); return XRT_SUCCESS; } struct ANativeWindow *window = (struct ANativeWindow *)android_globals_get_window(); if (window == NULL || (set_frame_rate(window, display_refresh_rate_hz, 1) != 0)) { U_LOG_E("set_frame_rate error"); } dlclose(android_handle); #else struct comp_compositor *c = comp_compositor(xc); if (c->target && c->target->request_refresh_rate) { xrt_result_t result = comp_target_request_refresh_rate(c->target, display_refresh_rate_hz); // Assume refresh rate change is immediate if (result == XRT_SUCCESS) c->frame_interval_ns = U_TIME_1S_IN_NS / display_refresh_rate_hz; return result; } #endif return XRT_SUCCESS; } static void compositor_destroy(struct xrt_compositor *xc) { struct comp_compositor *c = comp_compositor(xc); struct vk_bundle *vk = get_vk(c); COMP_DEBUG(c, "COMP_DESTROY"); // Need to do this as early as possible. u_var_remove_root(c); // Destroy any Vulkan resources, even if not used. chl_scratch_free_resources(&c->scratch, &c->nr); // Destroy the scratch images fully, we initialized all of them. chl_scratch_fini(&c->scratch); // Make sure we are not holding onto any swapchains. u_swapchain_debug_destroy(&c->debug.sc); // Make sure we don't have anything to destroy. comp_swapchain_shared_garbage_collect(&c->base.cscs); // Must be destroyed before Vulkan. comp_swapchain_shared_destroy(&c->base.cscs, vk); comp_renderer_destroy(&c->r); #ifdef XRT_FEATURE_WINDOW_PEEK comp_window_peek_destroy(&c->peek); #endif // Does NULL checking. comp_target_destroy(&c->target); // Only depends on vk_bundle and shaders. render_resources_fini(&c->nr); // As long as vk_bundle is valid it's safe to call this function. render_shaders_fini(&c->shaders, vk); if (vk->device != VK_NULL_HANDLE) { vk->vkDestroyDevice(vk->device, NULL); vk->device = VK_NULL_HANDLE; } vk_deinit_mutex(vk); if (vk->instance != VK_NULL_HANDLE) { vk->vkDestroyInstance(vk->instance, NULL); vk->instance = VK_NULL_HANDLE; } // Can do this now. u_frame_times_widget_teardown(&c->compositor_frame_times); comp_base_fini(&c->base); free(c); } /* * * xdev functions. * */ static bool compositor_check_and_prepare_xdev(struct comp_compositor *c, struct xrt_device *xdev) { COMP_TRACE_MARKER(); // clang-format off bool has_none = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_NONE) != 0; bool has_meshuv = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_MESHUV) != 0; bool has_compute = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_COMPUTE) != 0; // clang-format on // Everything is okay! :D if (has_meshuv) { return true; } if (!has_none && !has_compute) { COMP_ERROR(c, "The xdev '%s' didn't have none nor compute distortion.", xdev->str); return false; } COMP_WARN(c, "Had to fill in meshuv on xdev '%s', " "this should be done in the driver.", xdev->str); u_distortion_mesh_fill_in_compute(xdev); // clang-format off has_meshuv = (xdev->hmd->distortion.models & XRT_DISTORTION_MODEL_MESHUV) != 0; // clang-format on if (has_meshuv) { return true; } COMP_ERROR(c, "Failed to fill in meshuv on the xdev '%s'.", xdev->str); return false; } /* * * Vulkan functions. * */ // If any of these lists are updated, please also update the appropriate column // in `vulkan-extensions.md` static const char *instance_extensions_common[] = { COMP_INSTANCE_EXTENSIONS_COMMON, }; static const char *optional_instance_extensions[] = { #ifdef VK_EXT_swapchain_colorspace VK_EXT_SWAPCHAIN_COLORSPACE_EXTENSION_NAME, #endif #ifdef VK_EXT_display_surface_counter VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME, #endif #if defined VK_EXT_debug_utils && !defined NDEBUG VK_EXT_DEBUG_UTILS_EXTENSION_NAME, #endif }; // Note: Keep synchronized with comp_vk_glue - we should have everything they // do, plus VK_KHR_SWAPCHAIN_EXTENSION_NAME static const char *required_device_extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME, // VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, // VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME, // VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, // VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME, // VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, // // Platform version of "external_memory" #if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_FD) VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME, #elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_AHARDWAREBUFFER) VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, VK_KHR_MAINTENANCE_1_EXTENSION_NAME, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME, #elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE) VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME, #else #error "Need port!" #endif // Platform version of "external_fence" and "external_semaphore" #if defined(XRT_GRAPHICS_SYNC_HANDLE_IS_FD) // Optional #elif defined(XRT_GRAPHICS_SYNC_HANDLE_IS_WIN32_HANDLE) VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME, VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME, #else #error "Need port!" #endif }; static const char *optional_device_extensions[] = { VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME, // VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, // // Platform version of "external_fence" and "external_semaphore" #if defined(XRT_GRAPHICS_SYNC_HANDLE_IS_FD) VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, // VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME, // #elif defined(XRT_GRAPHICS_SYNC_HANDLE_IS_WIN32_HANDLE) // Not optional #elif defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_AHARDWAREBUFFER) #if defined(VK_ANDROID_external_format_resolve) // Requires Vulkan 1.3.268.1 VK_ANDROID_EXTERNAL_FORMAT_RESOLVE_EXTENSION_NAME // #endif #else #error "Need port!" #endif #ifdef VK_KHR_present_id VK_KHR_PRESENT_ID_EXTENSION_NAME, #endif #ifdef VK_KHR_present_wait VK_KHR_PRESENT_WAIT_EXTENSION_NAME, #endif #ifdef VK_KHR_format_feature_flags2 VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME, #endif #ifdef VK_KHR_global_priority VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME, #endif #ifdef VK_KHR_image_format_list VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME, #endif #ifdef VK_KHR_maintenance1 VK_KHR_MAINTENANCE_1_EXTENSION_NAME, #endif #ifdef VK_KHR_maintenance2 VK_KHR_MAINTENANCE_2_EXTENSION_NAME, #endif #ifdef VK_KHR_timeline_semaphore VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME, #endif #ifdef VK_EXT_calibrated_timestamps VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME, #endif #ifdef VK_EXT_robustness2 VK_EXT_ROBUSTNESS_2_EXTENSION_NAME, #endif #ifdef VK_EXT_display_control VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME, #endif #ifdef VK_KHR_synchronization2 VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME, #endif }; static bool select_instances_extensions(struct comp_compositor *c, struct u_string_list *required, struct u_string_list *optional) { #ifdef XRT_FEATURE_WINDOW_PEEK if (!comp_window_peek_get_vk_instance_exts(required)) { COMP_ERROR(c, "Failed to get required vulkan instance extensions for peek window."); return false; } #endif return true; } static bool compositor_init_vulkan(struct comp_compositor *c) { COMP_TRACE_MARKER(); assert(c->target_factory != NULL); struct vk_bundle *vk = get_vk(c); /* * Instance extensions. */ struct u_string_list *required_instance_ext_list = u_string_list_create(); struct u_string_list *optional_instance_ext_list = u_string_list_create(); // Every backend needs at least the common extensions. u_string_list_append_array( // required_instance_ext_list, // instance_extensions_common, // ARRAY_SIZE(instance_extensions_common)); // // Add per target required extensions. u_string_list_append_array( // required_instance_ext_list, // c->target_factory->required_instance_extensions, // c->target_factory->required_instance_extension_count); // // Optional instance extensions. u_string_list_append_array( // optional_instance_ext_list, // optional_instance_extensions, // ARRAY_SIZE(optional_instance_extensions)); // if (!select_instances_extensions(c, required_instance_ext_list, optional_instance_ext_list)) { COMP_ERROR(c, "Failed to select additional instance extensions."); u_string_list_destroy(&required_instance_ext_list); u_string_list_destroy(&optional_instance_ext_list); return false; } /* * Device extensions. */ struct u_string_list *required_device_extension_list = u_string_list_create(); struct u_string_list *optional_device_extension_list = u_string_list_create(); // Required device extensions. u_string_list_append_array( // required_device_extension_list, // required_device_extensions, // ARRAY_SIZE(required_device_extensions)); // // Optional device extensions. u_string_list_append_array( // optional_device_extension_list, // optional_device_extensions, // ARRAY_SIZE(optional_device_extensions)); // // Add per target optional device extensions. u_string_list_append_array( // optional_device_extension_list, // c->target_factory->optional_device_extensions, // c->target_factory->optional_device_extension_count); // // Select required Vulkan version, suitable for both compositor and target uint32_t required_instance_version = MAX(c->target_factory->required_instance_version, VK_API_VERSION_1_0); /* * Create the device. */ struct comp_vulkan_arguments vk_args = { .get_instance_proc_address = vkGetInstanceProcAddr, .required_instance_version = required_instance_version, .required_instance_extensions = required_instance_ext_list, .optional_instance_extensions = optional_instance_ext_list, .required_device_extensions = required_device_extension_list, .optional_device_extensions = optional_device_extension_list, .log_level = c->settings.log_level, .only_compute_queue = c->settings.use_compute, .selected_gpu_index = c->settings.selected_gpu_index, .client_gpu_index = c->settings.client_gpu_index, .timeline_semaphore = true, // Flag is optional, not a hard requirement. }; struct comp_vulkan_results vk_res = {0}; bool bundle_ret = comp_vulkan_init_bundle(vk, &vk_args, &vk_res); u_string_list_destroy(&required_instance_ext_list); u_string_list_destroy(&optional_instance_ext_list); u_string_list_destroy(&required_device_extension_list); u_string_list_destroy(&optional_device_extension_list); if (!bundle_ret) { return false; } // clang-format off static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceUUID.data) == XRT_UUID_SIZE, "array size mismatch"); static_assert(ARRAY_SIZE(vk_res.selected_gpu_deviceUUID.data) == XRT_UUID_SIZE, "array size mismatch"); static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceUUID.data) == ARRAY_SIZE(c->settings.client_gpu_deviceUUID.data), "array size mismatch"); static_assert(ARRAY_SIZE(vk_res.selected_gpu_deviceUUID.data) == ARRAY_SIZE(c->settings.selected_gpu_deviceUUID.data), "array size mismatch"); static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceLUID.data) == XRT_LUID_SIZE, "array size mismatch"); static_assert(ARRAY_SIZE(vk_res.client_gpu_deviceLUID.data) == ARRAY_SIZE(c->settings.client_gpu_deviceLUID.data), "array size mismatch"); // clang-format on c->settings.client_gpu_deviceUUID = vk_res.client_gpu_deviceUUID; c->settings.selected_gpu_deviceUUID = vk_res.selected_gpu_deviceUUID; c->settings.client_gpu_index = vk_res.client_gpu_index; c->settings.selected_gpu_index = vk_res.selected_gpu_index; c->settings.client_gpu_deviceLUID = vk_res.client_gpu_deviceLUID; c->settings.client_gpu_deviceLUID_valid = vk_res.client_gpu_deviceLUID_valid; // Tie the lifetimes of swapchains to Vulkan. xrt_result_t xret = comp_swapchain_shared_init(&c->base.cscs, vk); if (xret != XRT_SUCCESS) { return false; } return true; } /* * * Other functions. * */ const struct comp_target_factory *ctfs[] = { #if defined VK_USE_PLATFORM_WAYLAND_KHR && defined XRT_HAVE_WAYLAND_DIRECT &comp_target_factory_direct_wayland, #endif #ifdef VK_USE_PLATFORM_WAYLAND_KHR &comp_target_factory_wayland, #endif #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT &comp_target_factory_direct_randr, #endif #ifdef VK_USE_PLATFORM_XCB_KHR &comp_target_factory_xcb, #endif #ifdef XRT_OS_ANDROID &comp_target_factory_android, #endif #ifdef XRT_OS_WINDOWS &comp_target_factory_mswin, #endif #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT &comp_target_factory_direct_nvidia, #endif #ifdef VK_USE_PLATFORM_DISPLAY_KHR &comp_target_factory_vk_display, #endif &comp_target_factory_debug_image, }; static void error_msg_with_list(struct comp_compositor *c, const char *msg) { struct u_pp_sink_stack_only sink; u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink); u_pp(dg, "%s, available targets:", msg); for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) { u_pp(dg, "\n\t%s: %s", ctfs[i]->identifier, ctfs[i]->name); } COMP_ERROR(c, "%s", sink.buffer); } static bool compositor_check_deferred(struct comp_compositor *c, const struct comp_target_factory *ctf) { if (debug_get_bool_option_disable_deferred()) { COMP_DEBUG(c, "Deferred window initialization globally disabled!"); return false; } if (!ctf->is_deferred) { return false; // It is not deferred but that's okay. } COMP_DEBUG(c, "Deferred target backend %s selected!", ctf->name); c->target_factory = ctf; c->deferred_surface = true; return true; } static bool compositor_try_window(struct comp_compositor *c, const struct comp_target_factory *ctf) { COMP_TRACE_MARKER(); struct comp_target *ct = NULL; if (!ctf->create_target(ctf, c, &ct)) { return false; } if (!comp_target_init_pre_vulkan(ct)) { ct->destroy(ct); return false; } COMP_DEBUG(c, "Target backend %s initialized!", ct->name); c->target_factory = ctf; c->target = ct; return true; } static bool select_target_factory_from_settings(struct comp_compositor *c, const struct comp_target_factory **out_ctf) { const char *identifier = c->settings.target_identifier; if (identifier == NULL) { return true; // Didn't ask for a target, all ok. } for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) { const struct comp_target_factory *ctf = ctfs[i]; if (strcmp(ctf->identifier, identifier) == 0) { *out_ctf = ctf; return true; } } char buffer[256]; snprintf(buffer, ARRAY_SIZE(buffer), "Could not find target factory with identifier '%s'", identifier); error_msg_with_list(c, buffer); return false; // User asked for a target that we couldn't find, error. } static bool select_target_factory_by_detecting(struct comp_compositor *c, const struct comp_target_factory **out_ctf) { for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) { const struct comp_target_factory *ctf = ctfs[i]; if (comp_target_factory_detect(ctf, c)) { *out_ctf = ctf; return true; } } return true; // Didn't detect a target, but that's ok. } static bool compositor_init_window_pre_vulkan(struct comp_compositor *c, const struct comp_target_factory *selected_ctf) { COMP_TRACE_MARKER(); if (selected_ctf == NULL && !select_target_factory_from_settings(c, &selected_ctf)) { return false; // Error! } if (selected_ctf == NULL && !select_target_factory_by_detecting(c, &selected_ctf)) { return false; // Error! } if (selected_ctf != NULL) { // We have selected a target factory, but it needs Vulkan. if (selected_ctf->requires_vulkan_for_create) { COMP_INFO(c, "Selected %s backend!", selected_ctf->name); c->target_factory = selected_ctf; return true; } if (compositor_check_deferred(c, selected_ctf)) { return true; } if (!compositor_try_window(c, selected_ctf)) { COMP_ERROR(c, "Failed to init %s backend!", selected_ctf->name); return false; } return true; } for (size_t i = 0; i < ARRAY_SIZE(ctfs); i++) { const struct comp_target_factory *ctf = ctfs[i]; // Skip targets that requires Vulkan. if (ctf->requires_vulkan_for_create) { continue; } if (compositor_check_deferred(c, ctf)) { return true; } if (compositor_try_window(c, ctf)) { return true; } } // Nothing worked, giving up. error_msg_with_list(c, "Failed to create any target"); return false; } static bool compositor_init_window_post_vulkan(struct comp_compositor *c) { COMP_TRACE_MARKER(); assert(c->target_factory != NULL); if (c->target != NULL) { return true; } return compositor_try_window(c, c->target_factory); } static bool compositor_init_swapchain(struct comp_compositor *c) { COMP_TRACE_MARKER(); assert(c->target != NULL); assert(c->target_factory != NULL); if (comp_target_init_post_vulkan(c->target, // c->settings.preferred.width, // c->settings.preferred.height)) { return true; } COMP_ERROR(c, "Window init_swapchain failed!"); comp_target_destroy(&c->target); return false; } static bool compositor_init_render_resources(struct comp_compositor *c) { COMP_TRACE_MARKER(); struct vk_bundle *vk = get_vk(c); if (!render_shaders_load(&c->shaders, vk)) { return false; } if (!render_resources_init(&c->nr, &c->shaders, get_vk(c), c->xdev)) { return false; } return true; } static bool compositor_init_renderer(struct comp_compositor *c) { COMP_TRACE_MARKER(); c->r = comp_renderer_create(c, c->view_extents); #ifdef XRT_FEATURE_WINDOW_PEEK c->peek = comp_window_peek_create(c); #else c->peek = NULL; #endif return c->r != NULL; } xrt_result_t comp_main_create_system_compositor(struct xrt_device *xdev, const struct comp_target_factory *ctf, struct u_pacing_app_factory *upaf, struct xrt_system_compositor **out_xsysc) { COMP_TRACE_MARKER(); struct comp_compositor *c = U_TYPED_CALLOC(struct comp_compositor); struct xrt_compositor *iface = &c->base.base.base; iface->begin_session = compositor_begin_session; iface->end_session = compositor_end_session; iface->predict_frame = compositor_predict_frame; iface->mark_frame = compositor_mark_frame; iface->begin_frame = compositor_begin_frame; iface->discard_frame = compositor_discard_frame; iface->layer_commit = compositor_layer_commit; iface->get_display_refresh_rate = compositor_get_display_refresh_rate; iface->request_display_refresh_rate = compositor_request_display_refresh_rate; iface->destroy = compositor_destroy; c->frame.waited.id = -1; c->frame.rendering.id = -1; c->xdev = xdev; xrt_result_t xret = XRT_SUCCESS; COMP_DEBUG(c, "Doing init %p", (void *)c); uint32_t view_count = xdev->hmd->view_count; enum xrt_view_type view_type = 0; // Invalid switch (view_count) { case 0: U_LOG_E("Bug detected: HMD \"%s\" xdev->hmd.view_count must be > 0!", xdev->str); assert(xdev->hmd->view_count > 0); break; case 1: view_type = XRT_VIEW_TYPE_MONO; break; case 2: view_type = XRT_VIEW_TYPE_STEREO; break; default: U_LOG_E("Bug detected: HMD \"%s\" xdev->hmd.view_count must be 1 or 2, not %u!", xdev->str, view_count); assert(view_count == 1 && view_count == 2); break; } // Do this as early as possible. comp_base_init(&c->base); // Init the settings to default. comp_settings_init(&c->settings, xdev); // Init this before the renderer. u_swapchain_debug_init(&c->debug.sc); // Init these before the renderer, not all might be used. chl_scratch_init(&c->scratch); c->frame_interval_ns = c->settings.nominal_frame_interval_ns; c->last_frame_time_ns = os_monotonic_get_ns(); double scale = c->settings.viewport_scale; if (scale > 2.0) { scale = 2.0; COMP_DEBUG(c, "Clamped scale to 200%%\n"); } uint32_t w0 = (uint32_t)(xdev->hmd->views[0].display.w_pixels * scale); uint32_t h0 = (uint32_t)(xdev->hmd->views[0].display.h_pixels * scale); c->view_extents.width = w0; c->view_extents.height = h0; // Need to select window backend before creating Vulkan, then // swapchain will initialize the window fully and the swapchain, // and finally the renderer is created which renders to // window/swapchain. // clang-format off if (!compositor_check_and_prepare_xdev(c, xdev) || !compositor_init_window_pre_vulkan(c, ctf) || !compositor_init_vulkan(c) || !compositor_init_render_resources(c)) { COMP_ERROR(c, "Failed to init compositor %p", (void *)c); xret = XRT_ERROR_VULKAN; goto error; } if (!c->deferred_surface) { if (!compositor_init_window_post_vulkan(c) || !compositor_init_swapchain(c) || !compositor_init_renderer(c)) { COMP_ERROR(c, "Failed to init compositor %p", (void*)c); xret = XRT_ERROR_VULKAN; goto error; } comp_target_set_title(c->target, WINDOW_TITLE); } // clang-format on COMP_DEBUG(c, "Done %p", (void *)c); /*! * @todo Support more like, depth/float formats etc, * remember to update the GL client as well. */ struct xrt_compositor_info *info = &c->base.base.base.info; /* * Formats. */ struct comp_vulkan_formats formats = {0}; comp_vulkan_formats_check(get_vk(c), &formats); comp_vulkan_formats_copy_to_info(&formats, info); comp_vulkan_formats_log(c->settings.log_level, &formats); /* * Rest of info. */ struct xrt_system_compositor_info sys_info_storage = {0}; struct xrt_system_compositor_info *sys_info = &sys_info_storage; // Required by OpenXR spec. sys_info->max_layers = XRT_MAX_LAYERS; sys_info->compositor_vk_deviceUUID = c->settings.selected_gpu_deviceUUID; sys_info->client_vk_deviceUUID = c->settings.client_gpu_deviceUUID; sys_info->client_d3d_deviceLUID = c->settings.client_gpu_deviceLUID; sys_info->client_d3d_deviceLUID_valid = c->settings.client_gpu_deviceLUID_valid; // @note If timewarp is disabled this is not supported. sys_info->supports_fov_mutable = true; // clang-format off for (uint32_t i = 0; i < view_count; ++i) { uint32_t w = (uint32_t)(xdev->hmd->views[i].display.w_pixels * scale); uint32_t h = (uint32_t)(xdev->hmd->views[i].display.h_pixels * scale); uint32_t w_2 = xdev->hmd->views[i].display.w_pixels * 2; uint32_t h_2 = xdev->hmd->views[i].display.h_pixels * 2; sys_info->view_configs[0].views[i].recommended.width_pixels = w; sys_info->view_configs[0].views[i].recommended.height_pixels = h; sys_info->view_configs[0].views[i].recommended.sample_count = 1; sys_info->view_configs[0].views[i].max.width_pixels = w_2; sys_info->view_configs[0].views[i].max.height_pixels = h_2; sys_info->view_configs[0].views[i].max.sample_count = 1; } // clang-format on sys_info->view_configs[0].view_type = view_type; sys_info->view_configs[0].view_count = view_count; sys_info->view_config_count = 1; // Only one view config for now. // If we can add e.g. video pass-through capabilities, we may need to change (augment) this list. // Just copying it directly right now. assert(xdev->hmd->blend_mode_count <= XRT_MAX_DEVICE_BLEND_MODES); assert(xdev->hmd->blend_mode_count != 0); assert(xdev->hmd->blend_mode_count <= ARRAY_SIZE(sys_info->supported_blend_modes)); for (size_t i = 0; i < xdev->hmd->blend_mode_count; ++i) { assert(u_verify_blend_mode_valid(xdev->hmd->blend_modes[i])); sys_info->supported_blend_modes[i] = xdev->hmd->blend_modes[i]; } sys_info->supported_blend_mode_count = (uint8_t)xdev->hmd->blend_mode_count; u_var_add_root(c, "Compositor", true); float target_frame_time_ms = (float)ns_to_ms(c->frame_interval_ns); u_frame_times_widget_init(&c->compositor_frame_times, target_frame_time_ms, 10.f); u_var_add_ro_f32(c, &c->compositor_frame_times.fps, "FPS (Compositor)"); u_var_add_bool(c, &c->debug.atw_off, "Debug: ATW OFF"); u_var_add_bool(c, &c->debug.disable_fast_path, "Debug: Disable fast path"); u_var_add_f32_timing(c, c->compositor_frame_times.debug_var, "Frame Times (Compositor)"); // Only add active views. for (uint32_t i = 0; i < view_count; i++) { char tmp[64] = {0}; snprintf(tmp, sizeof(tmp), "View[%u]", i); u_var_add_native_images_debug(c, &c->scratch.views[i].cssi.unid, tmp); } #ifdef XRT_OS_ANDROID // Get info about display. struct xrt_android_display_metrics metrics; if (!android_custom_surface_get_display_metrics(android_globals_get_vm(), android_globals_get_context(), &metrics)) { U_LOG_E("Could not get Android display metrics."); /* Fallback to default values */ metrics.refresh_rates[0] = 60.0f; metrics.refresh_rate_count = 1; metrics.refresh_rate = metrics.refresh_rates[0]; } // Copy data to info. sys_info->refresh_rate_count = metrics.refresh_rate_count; for (size_t i = 0; i < sys_info->refresh_rate_count; ++i) { sys_info->refresh_rates_hz[i] = metrics.refresh_rates[i]; } #else if (c->target && c->target->get_refresh_rates) { comp_target_get_refresh_rates(c->target, &sys_info->refresh_rate_count, sys_info->refresh_rates_hz); } else { //! @todo: Query all supported refresh rates of the current mode sys_info->refresh_rate_count = 1; sys_info->refresh_rates_hz[0] = (float)(1. / time_ns_to_s(c->frame_interval_ns)); } #endif // XRT_OS_ANDROID // Needs to be delayed until after compositor's u_var has been setup. if (!c->deferred_surface) { comp_renderer_add_debug_vars(c->r); } // Standard app pacer. if (upaf == NULL) { xret = u_pa_factory_create(&upaf); if (xret != XRT_SUCCESS || upaf == NULL) { COMP_ERROR(c, "Failed to create app pacing factory"); goto error; } } xret = comp_multi_create_system_compositor(&c->base.base, upaf, sys_info, !c->deferred_surface, out_xsysc); if (xret == XRT_SUCCESS) { return xret; } error: if (c != NULL) { c->base.base.base.destroy(&c->base.base.base); } u_paf_destroy(&upaf); return xret; }