The open source OpenXR runtime

c/util: Add format parameters to scratch image functions

Co-authored-by: Andrei Aristarkhov <aaristarkhov@nvidia.com>
Co-authored-by: Jakob Bornecrantz <tbornecrantz@nvidia.com>
Part-of: <https://gitlab.freedesktop.org/monado/monado/-/merge_requests/2502>

+187 -57
+1
src/xrt/compositor/CMakeLists.txt
··· 182 main/comp_compositor.c 183 main/comp_compositor.h 184 main/comp_documentation.h 185 main/comp_renderer.c 186 main/comp_renderer.h 187 main/comp_settings.c
··· 182 main/comp_compositor.c 183 main/comp_compositor.h 184 main/comp_documentation.h 185 + main/comp_main_interface.h 186 main/comp_renderer.c 187 main/comp_renderer.h 188 main/comp_settings.c
+10 -4
src/xrt/compositor/main/comp_renderer.c
··· 582 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // final_layout 583 584 for (uint32_t i = 0; i < c->nr.view_count; i++) { 585 - bret = comp_scratch_single_images_ensure(&r->c->scratch.views[i], &r->c->base.vk, scratch_extent); 586 if (!bret) { 587 COMP_ERROR(c, "comp_scratch_single_images_ensure: false"); 588 assert(false && "Whelp, can't return an error. But should never really fail."); ··· 937 // Scratch image covers the whole image. 938 struct xrt_normalized_rect layer_norm_rect = {.x = 0.0f, .y = 0.0f, .w = 1.0f, .h = 1.0f}; 939 940 comp_render_gfx_add_view( // 941 &data, // 942 &world_poses[i], // ··· 946 &layer_viewport_data, // 947 &layer_norm_rect, // 948 rsci->image, // 949 - rsci->srgb_view, // 950 &vertex_rots[i], // 951 &viewport_datas[i]); // target_viewport_data 952 ··· 1055 // Scratch image covers the whole image. 1056 struct xrt_normalized_rect layer_norm_rect = {.x = 0.0f, .y = 0.0f, .w = 1.0f, .h = 1.0f}; 1057 1058 comp_render_cs_add_view( // 1059 &data, // 1060 &world_poses[i], // ··· 1063 &layer_viewport_data, // 1064 &layer_norm_rect, // 1065 rsci->image, // 1066 - rsci->srgb_view, // 1067 - rsci->unorm_view, // 1068 &views[i]); // target_viewport_data 1069 1070 if (layer_count == 0) {
··· 582 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // final_layout 583 584 for (uint32_t i = 0; i < c->nr.view_count; i++) { 585 + bret = 586 + comp_scratch_single_images_ensure_mutable(&r->c->scratch.views[i], &r->c->base.vk, scratch_extent); 587 if (!bret) { 588 COMP_ERROR(c, "comp_scratch_single_images_ensure: false"); 589 assert(false && "Whelp, can't return an error. But should never really fail."); ··· 938 // Scratch image covers the whole image. 939 struct xrt_normalized_rect layer_norm_rect = {.x = 0.0f, .y = 0.0f, .w = 1.0f, .h = 1.0f}; 940 941 + VkImageView sample_view = comp_scratch_single_images_get_sample_view(scratch_view, scratch_index); 942 + 943 comp_render_gfx_add_view( // 944 &data, // 945 &world_poses[i], // ··· 949 &layer_viewport_data, // 950 &layer_norm_rect, // 951 rsci->image, // 952 + sample_view, // 953 &vertex_rots[i], // 954 &viewport_datas[i]); // target_viewport_data 955 ··· 1058 // Scratch image covers the whole image. 1059 struct xrt_normalized_rect layer_norm_rect = {.x = 0.0f, .y = 0.0f, .w = 1.0f, .h = 1.0f}; 1060 1061 + VkImageView sample_view = comp_scratch_single_images_get_sample_view(scratch_view, scratch_index); 1062 + VkImageView storage_view = comp_scratch_single_images_get_storage_view(scratch_view, scratch_index); 1063 + 1064 comp_render_cs_add_view( // 1065 &data, // 1066 &world_poses[i], // ··· 1069 &layer_viewport_data, // 1070 &layer_norm_rect, // 1071 rsci->image, // 1072 + sample_view, // 1073 + storage_view, // 1074 &views[i]); // target_viewport_data 1075 1076 if (layer_count == 0) {
+1 -1
src/xrt/compositor/main/comp_window_debug_image.c
··· 136 } 137 138 // Do the allocation. 139 - comp_scratch_single_images_ensure(&dit->target, vk, create_info->extent); 140 141 // Share the Vulkan handles of images and image views. 142 for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) {
··· 136 } 137 138 // Do the allocation. 139 + comp_scratch_single_images_ensure_mutable(&dit->target, vk, create_info->extent); 140 141 // Share the Vulkan handles of images and image views. 142 for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) {
+103 -50
src/xrt/compositor/util/comp_scratch.c
··· 1 // Copyright 2019-2024, Collabora, Ltd. 2 // SPDX-License-Identifier: BSL-1.0 3 /*! 4 * @file ··· 25 */ 26 27 static inline void 28 - fill_info(VkExtent2D extent, struct xrt_swapchain_create_info *out_info) 29 { 30 enum xrt_swapchain_create_flags create = 0; 31 32 - enum xrt_swapchain_usage_bits bits = // 33 - XRT_SWAPCHAIN_USAGE_COLOR | // 34 - XRT_SWAPCHAIN_USAGE_SAMPLED | // 35 - XRT_SWAPCHAIN_USAGE_TRANSFER_SRC | // 36 - XRT_SWAPCHAIN_USAGE_TRANSFER_DST | // 37 - XRT_SWAPCHAIN_USAGE_UNORDERED_ACCESS | // 38 - XRT_SWAPCHAIN_USAGE_MUTABLE_FORMAT; // 39 40 struct xrt_swapchain_create_info info = { 41 .create = create, 42 .bits = bits, 43 - .format = VK_FORMAT_R8G8B8A8_UNORM, 44 .sample_count = 1, 45 .width = extent.width, 46 .height = extent.height, ··· 49 .mip_count = 1, 50 }; 51 52 - // Use format list to get good performance everywhere. 53 - info.formats[info.format_count++] = VK_FORMAT_R8G8B8A8_UNORM; 54 - info.formats[info.format_count++] = VK_FORMAT_R8G8B8A8_SRGB; 55 56 *out_info = info; 57 } ··· 126 //! Handles retrieved. 127 xrt_graphics_buffer_handle_t handles[COMP_SCRATCH_NUM_IMAGES]; 128 129 - //! For automatic conversion to linear. 130 VkImageView srgb_views[COMP_SCRATCH_NUM_IMAGES]; 131 132 //! For storage operations in compute shaders. ··· 134 }; 135 136 static inline bool 137 - tmp_init_and_create(struct tmp *t, struct vk_bundle *vk, const struct xrt_swapchain_create_info *info) 138 { 139 VkResult ret; 140 ··· 157 */ 158 159 // Base info. 160 - const VkFormat srgb_format = VK_FORMAT_R8G8B8A8_SRGB; 161 - const VkFormat unorm_format = VK_FORMAT_R8G8B8A8_UNORM; 162 const VkImageViewType view_type = VK_IMAGE_VIEW_TYPE_2D; 163 164 // Both usages are common. ··· 178 for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) { 179 VkImage image = t->vkic.images[i].handle; 180 181 - ret = vk_create_view_usage( // 182 - vk, // vk_bundle 183 - image, // image 184 - view_type, // type 185 - srgb_format, // format 186 - srgb_usage, // image_usage 187 - subresource_range, // subresource_range 188 - &t->srgb_views[i]); // out_image_view 189 - VK_CHK_WITH_GOTO(ret, "vk_create_view_usage(srgb)", err_destroy_views); 190 191 - VK_NAME_IMAGE_VIEW(vk, t->srgb_views[i], "comp_scratch_image_view(srgb)"); 192 193 ret = vk_create_view_usage( // 194 vk, // vk_bundle ··· 255 256 /* 257 * 258 - * 'Exported' single functions. 259 * 260 */ 261 262 - void 263 - comp_scratch_single_images_init(struct comp_scratch_single_images *cssi) 264 - { 265 - // Just to be sure. 266 - U_ZERO(cssi); 267 - 268 - indices_init(&cssi->indices); 269 - 270 - u_native_images_debug_init(&cssi->unid); 271 - 272 - // Invalid handle may be different to zero. 273 - for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) { 274 - cssi->native_images[i].handle = XRT_GRAPHICS_BUFFER_HANDLE_INVALID; 275 - } 276 - } 277 - 278 bool 279 - comp_scratch_single_images_ensure(struct comp_scratch_single_images *cssi, struct vk_bundle *vk, VkExtent2D extent) 280 { 281 - if (cssi->info.width == extent.width && cssi->info.height == extent.height) { 282 // Our work here is done! 283 return true; 284 } 285 286 struct xrt_swapchain_create_info info = XRT_STRUCT_INIT; 287 - fill_info(extent, &info); 288 289 struct tmp t; // Is initialized in function. 290 - if (!tmp_init_and_create(&t, vk, &info)) { 291 VK_ERROR(vk, "Failed to allocate images"); 292 return false; 293 } ··· 305 return true; 306 } 307 308 void 309 comp_scratch_single_images_free(struct comp_scratch_single_images *cssi, struct vk_bundle *vk) 310 { ··· 402 bool 403 comp_scratch_stereo_images_ensure(struct comp_scratch_stereo_images *cssi, struct vk_bundle *vk, VkExtent2D extent) 404 { 405 if (cssi->info.width == extent.width && cssi->info.height == extent.height) { 406 // Our work here is done! 407 return true; ··· 409 410 // Get info we need to share with. 411 struct xrt_swapchain_create_info info = XRT_STRUCT_INIT; 412 - fill_info(extent, &info); 413 414 struct tmp ts[2]; // Is initialized in function. 415 - if (!tmp_init_and_create(&ts[0], vk, &info)) { 416 VK_ERROR(vk, "Failed to allocate images for view 0"); 417 return false; 418 } 419 420 - if (!tmp_init_and_create(&ts[1], vk, &info)) { 421 VK_ERROR(vk, "Failed to allocate images for view 1"); 422 goto err_destroy; 423 }
··· 1 // Copyright 2019-2024, Collabora, Ltd. 2 + // Copyright 2024-2025, NVIDIA CORPORATION. 3 // SPDX-License-Identifier: BSL-1.0 4 /*! 5 * @file ··· 26 */ 27 28 static inline void 29 + fill_info(VkExtent2D extent, VkFormat srgb_format, VkFormat unorm_format, struct xrt_swapchain_create_info *out_info) 30 { 31 + // Must be true. 32 + assert(unorm_format != VK_FORMAT_UNDEFINED); 33 + 34 enum xrt_swapchain_create_flags create = 0; 35 36 + enum xrt_swapchain_usage_bits bits = // 37 + XRT_SWAPCHAIN_USAGE_COLOR | // 38 + XRT_SWAPCHAIN_USAGE_SAMPLED | // 39 + XRT_SWAPCHAIN_USAGE_TRANSFER_SRC | // 40 + XRT_SWAPCHAIN_USAGE_TRANSFER_DST | // 41 + XRT_SWAPCHAIN_USAGE_UNORDERED_ACCESS; // 42 43 struct xrt_swapchain_create_info info = { 44 .create = create, 45 .bits = bits, 46 + .format = unorm_format, 47 .sample_count = 1, 48 .width = extent.width, 49 .height = extent.height, ··· 52 .mip_count = 1, 53 }; 54 55 + if (srgb_format != VK_FORMAT_UNDEFINED) { 56 + // Use format list to get good performance everywhere. 57 + info.bits |= XRT_SWAPCHAIN_USAGE_MUTABLE_FORMAT; 58 + info.formats[info.format_count++] = unorm_format; 59 + info.formats[info.format_count++] = srgb_format; 60 + } else { 61 + assert(info.format_count == 0); 62 + } 63 64 *out_info = info; 65 } ··· 134 //! Handles retrieved. 135 xrt_graphics_buffer_handle_t handles[COMP_SCRATCH_NUM_IMAGES]; 136 137 + //! For automatic conversion to linear, only populated on mutable. 138 VkImageView srgb_views[COMP_SCRATCH_NUM_IMAGES]; 139 140 //! For storage operations in compute shaders. ··· 142 }; 143 144 static inline bool 145 + tmp_init_and_create(struct tmp *t, 146 + struct vk_bundle *vk, 147 + const struct xrt_swapchain_create_info *info, 148 + const VkFormat srgb_format, 149 + const VkFormat unorm_format) 150 { 151 VkResult ret; 152 ··· 169 */ 170 171 // Base info. 172 const VkImageViewType view_type = VK_IMAGE_VIEW_TYPE_2D; 173 174 // Both usages are common. ··· 188 for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) { 189 VkImage image = t->vkic.images[i].handle; 190 191 + if (srgb_format != VK_FORMAT_UNDEFINED) { 192 + ret = vk_create_view_usage( // 193 + vk, // vk_bundle 194 + image, // image 195 + view_type, // type 196 + srgb_format, // format 197 + srgb_usage, // image_usage 198 + subresource_range, // subresource_range 199 + &t->srgb_views[i]); // out_image_view 200 + VK_CHK_WITH_GOTO(ret, "vk_create_view_usage(srgb)", err_destroy_views); 201 202 + VK_NAME_IMAGE_VIEW(vk, t->srgb_views[i], "comp_scratch_image_view(srgb)"); 203 + } 204 205 ret = vk_create_view_usage( // 206 vk, // vk_bundle ··· 267 268 /* 269 * 270 + * Helper single functions. 271 * 272 */ 273 274 bool 275 + ensure(struct comp_scratch_single_images *cssi, 276 + struct vk_bundle *vk, 277 + VkExtent2D extent, 278 + const VkFormat srgb_format, 279 + const VkFormat unorm_format) 280 { 281 + if (cssi->info.width == extent.width && // 282 + cssi->info.height == extent.height && // 283 + cssi->info.formats[0] == unorm_format && // 284 + cssi->info.formats[1] == srgb_format) { // 285 // Our work here is done! 286 return true; 287 } 288 289 struct xrt_swapchain_create_info info = XRT_STRUCT_INIT; 290 + fill_info(extent, srgb_format, unorm_format, &info); 291 292 struct tmp t; // Is initialized in function. 293 + if (!tmp_init_and_create(&t, vk, &info, srgb_format, unorm_format)) { 294 VK_ERROR(vk, "Failed to allocate images"); 295 return false; 296 } ··· 308 return true; 309 } 310 311 + 312 + /* 313 + * 314 + * 'Exported' single functions. 315 + * 316 + */ 317 + 318 + void 319 + comp_scratch_single_images_init(struct comp_scratch_single_images *cssi) 320 + { 321 + // Just to be sure. 322 + U_ZERO(cssi); 323 + 324 + indices_init(&cssi->indices); 325 + 326 + u_native_images_debug_init(&cssi->unid); 327 + 328 + // Invalid handle may be different to zero. 329 + for (uint32_t i = 0; i < COMP_SCRATCH_NUM_IMAGES; i++) { 330 + cssi->native_images[i].handle = XRT_GRAPHICS_BUFFER_HANDLE_INVALID; 331 + } 332 + } 333 + 334 + bool 335 + comp_scratch_single_images_ensure(struct comp_scratch_single_images *cssi, 336 + struct vk_bundle *vk, 337 + VkExtent2D extent, 338 + const VkFormat format) 339 + { 340 + return ensure(cssi, vk, extent, VK_FORMAT_UNDEFINED, format); 341 + } 342 + 343 + /*! 344 + * Ensure that the scratch images are allocated and match @p extent size, and @p srgb_format @p unorm_format formats. 345 + * 346 + * @public @memberof comp_scratch_single_images 347 + * 348 + * @ingroup comp_util 349 + */ 350 + bool 351 + comp_scratch_single_images_ensure_mutable(struct comp_scratch_single_images *cssi, 352 + struct vk_bundle *vk, 353 + VkExtent2D extent) 354 + { 355 + return ensure(cssi, vk, extent, VK_FORMAT_R8G8B8A8_SRGB, VK_FORMAT_R8G8B8A8_UNORM); 356 + } 357 + 358 void 359 comp_scratch_single_images_free(struct comp_scratch_single_images *cssi, struct vk_bundle *vk) 360 { ··· 452 bool 453 comp_scratch_stereo_images_ensure(struct comp_scratch_stereo_images *cssi, struct vk_bundle *vk, VkExtent2D extent) 454 { 455 + const VkFormat srgb_format = VK_FORMAT_R8G8B8A8_SRGB; 456 + const VkFormat unorm_format = VK_FORMAT_R8G8B8A8_UNORM; 457 + 458 if (cssi->info.width == extent.width && cssi->info.height == extent.height) { 459 // Our work here is done! 460 return true; ··· 462 463 // Get info we need to share with. 464 struct xrt_swapchain_create_info info = XRT_STRUCT_INIT; 465 + fill_info(extent, srgb_format, unorm_format, &info); 466 467 struct tmp ts[2]; // Is initialized in function. 468 + if (!tmp_init_and_create(&ts[0], vk, &info, srgb_format, unorm_format)) { 469 VK_ERROR(vk, "Failed to allocate images for view 0"); 470 return false; 471 } 472 473 + if (!tmp_init_and_create(&ts[1], vk, &info, srgb_format, unorm_format)) { 474 VK_ERROR(vk, "Failed to allocate images for view 1"); 475 goto err_destroy; 476 }
+72 -2
src/xrt/compositor/util/comp_scratch.h
··· 1 // Copyright 2019-2024, Collabora, Ltd. 2 // SPDX-License-Identifier: BSL-1.0 3 /*! 4 * @file ··· 99 comp_scratch_single_images_init(struct comp_scratch_single_images *cssi); 100 101 /*! 102 - * Ensure that the scratch images are allocated and match @p extent size. 103 * 104 * @public @memberof comp_scratch_single_images 105 * 106 * @ingroup comp_util 107 */ 108 bool 109 - comp_scratch_single_images_ensure(struct comp_scratch_single_images *cssi, struct vk_bundle *vk, VkExtent2D extent); 110 111 /*! 112 * Free all images allocated, @p init must be called before calling this ··· 129 */ 130 void 131 comp_scratch_single_images_get(struct comp_scratch_single_images *cssi, uint32_t *out_index); 132 133 /*! 134 * After calling @p get and rendering to the image you call this function to
··· 1 // Copyright 2019-2024, Collabora, Ltd. 2 + // Copyright 2024-2025, NVIDIA CORPORATION. 3 // SPDX-License-Identifier: BSL-1.0 4 /*! 5 * @file ··· 100 comp_scratch_single_images_init(struct comp_scratch_single_images *cssi); 101 102 /*! 103 + * Ensure that the scratch images are allocated and match @p extent size, and 104 + * @p format. 105 + * 106 + * @public @memberof comp_scratch_single_images 107 + * 108 + * @ingroup comp_util 109 + */ 110 + bool 111 + comp_scratch_single_images_ensure(struct comp_scratch_single_images *cssi, 112 + struct vk_bundle *vk, 113 + VkExtent2D extent, 114 + const VkFormat format); 115 + 116 + /*! 117 + * Ensure that the scratch images are allocated and match @p extent size, 118 + * the formats it will get is 8bit SRGB formats. 119 * 120 * @public @memberof comp_scratch_single_images 121 * 122 * @ingroup comp_util 123 */ 124 bool 125 + comp_scratch_single_images_ensure_mutable(struct comp_scratch_single_images *cssi, 126 + struct vk_bundle *vk, 127 + VkExtent2D extent); 128 129 /*! 130 * Free all images allocated, @p init must be called before calling this ··· 147 */ 148 void 149 comp_scratch_single_images_get(struct comp_scratch_single_images *cssi, uint32_t *out_index); 150 + 151 + /*! 152 + * Get the image for the given index. 153 + * 154 + * @public @memberof comp_scratch_single_images 155 + * 156 + * @ingroup comp_util 157 + */ 158 + static inline VkImage 159 + comp_scratch_single_images_get_image(struct comp_scratch_single_images *cssi, uint32_t index) 160 + { 161 + return cssi->images[index].image; 162 + } 163 + 164 + /*! 165 + * Get the image view for sampling, it will apply any automatic linearization, 166 + * aka sRGB gamma curve correction. 167 + * 168 + * @public @memberof comp_scratch_single_images 169 + * 170 + * @ingroup comp_util 171 + */ 172 + static inline VkImageView 173 + comp_scratch_single_images_get_sample_view(struct comp_scratch_single_images *cssi, uint32_t index) 174 + { 175 + struct render_scratch_color_image *rsci = &cssi->images[index]; 176 + 177 + VkImageView view = rsci->srgb_view; 178 + if (view != VK_NULL_HANDLE) { 179 + return view; 180 + } 181 + 182 + // Fallback to unorm view. 183 + return rsci->unorm_view; 184 + } 185 + 186 + /*! 187 + * Get the image view for storage or direct value, no linearization will be 188 + * done. 189 + * 190 + * @public @memberof comp_scratch_single_images 191 + * 192 + * @ingroup comp_util 193 + */ 194 + static inline VkImageView 195 + comp_scratch_single_images_get_storage_view(struct comp_scratch_single_images *cssi, uint32_t index) 196 + { 197 + struct render_scratch_color_image *rsci = &cssi->images[index]; 198 + 199 + // Always the storage view. 200 + return rsci->unorm_view; 201 + } 202 203 /*! 204 * After calling @p get and rendering to the image you call this function to