The open source OpenXR runtime
1// Copyright 2019-2023, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Shared resources for rendering.
6 * @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @ingroup comp_render
9 */
10
11#include "xrt/xrt_device.h"
12
13#include "math/m_api.h"
14#include "math/m_matrix_2x2.h"
15#include "math/m_vec2.h"
16
17#include "vk/vk_mini_helpers.h"
18
19#include "render/render_interface.h"
20
21
22#include <stdio.h>
23
24
25/*
26 *
27 * Gfx shared
28 *
29 */
30
31XRT_CHECK_RESULT static VkResult
32create_gfx_ubo_and_src_descriptor_set_layout(struct vk_bundle *vk,
33 uint32_t ubo_binding,
34 uint32_t src_binding,
35 VkDescriptorSetLayout *out_descriptor_set_layout)
36{
37 VkResult ret;
38
39 VkDescriptorSetLayoutBinding set_layout_bindings[2] = {
40 {
41 .binding = src_binding,
42 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
43 .descriptorCount = 1,
44 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
45 },
46 {
47 .binding = ubo_binding,
48 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
49 .descriptorCount = 1,
50 .stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
51 },
52 };
53
54 VkDescriptorSetLayoutCreateInfo set_layout_info = {
55 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
56 .bindingCount = ARRAY_SIZE(set_layout_bindings),
57 .pBindings = set_layout_bindings,
58 };
59
60 VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE;
61 ret = vk->vkCreateDescriptorSetLayout(vk->device, //
62 &set_layout_info, //
63 NULL, //
64 &descriptor_set_layout); //
65 VK_CHK_AND_RET(ret, "vkCreateDescriptorSetLayout");
66
67 *out_descriptor_set_layout = descriptor_set_layout;
68
69 return VK_SUCCESS;
70}
71
72
73/*
74 *
75 * Mesh
76 *
77 */
78
79XRT_CHECK_RESULT static bool
80init_mesh_vertex_buffers(struct vk_bundle *vk,
81 struct render_buffer *vbo,
82 struct render_buffer *ibo,
83 uint32_t vertex_count,
84 uint32_t stride,
85 void *vertices,
86 uint32_t index_counts,
87 void *indices)
88{
89 VkResult ret;
90
91 // Using the same flags for all vbos.
92 VkBufferUsageFlags vbo_usage_flags = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
93 VkBufferUsageFlags ibo_usage_flags = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
94 VkMemoryPropertyFlags memory_property_flags =
95 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
96
97 // Distortion vbo and ibo sizes.
98 VkDeviceSize vbo_size = stride * vertex_count;
99 VkDeviceSize ibo_size = sizeof(int) * index_counts;
100
101
102 // Don't create vbo if size is zero.
103 if (vbo_size == 0) {
104 return true;
105 }
106
107 ret = render_buffer_init( //
108 vk, // vk_bundle
109 vbo, // buffer
110 vbo_usage_flags, // usage_flags
111 memory_property_flags, // memory_property_flags
112 vbo_size); // size
113 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
114 VK_NAME_BUFFER(vk, vbo->buffer, "mesh vbo");
115
116 ret = render_buffer_write( //
117 vk, // vk_bundle
118 vbo, // buffer
119 vertices, // data
120 vbo_size); // size
121 VK_CHK_WITH_RET(ret, "render_buffer_write", false);
122
123
124 // Don't create index buffer if size is zero.
125 if (ibo_size == 0) {
126 return true;
127 }
128
129 ret = render_buffer_init( //
130 vk, // vk_bundle
131 ibo, // buffer
132 ibo_usage_flags, // usage_flags
133 memory_property_flags, // memory_property_flags
134 ibo_size); // size
135 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
136 VK_NAME_BUFFER(vk, ibo->buffer, "mesh ibo");
137
138 ret = render_buffer_write( //
139 vk, // vk_bundle
140 ibo, // buffer
141 indices, // data
142 ibo_size); // size
143 VK_CHK_WITH_RET(ret, "render_buffer_write", false);
144
145 return true;
146}
147
148XRT_CHECK_RESULT static bool
149init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer ubo[XRT_MAX_VIEWS], uint32_t view_count)
150{
151 VkResult ret;
152
153 // Using the same flags for all ubos.
154 VkBufferUsageFlags ubo_usage_flags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
155 VkMemoryPropertyFlags memory_property_flags =
156 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
157
158 // Distortion ubo size.
159 VkDeviceSize ubo_size = sizeof(struct render_gfx_mesh_ubo_data);
160 for (uint32_t i = 0; i < view_count; ++i) {
161 ret = render_buffer_init(vk, //
162 &ubo[i], //
163 ubo_usage_flags, //
164 memory_property_flags, //
165 ubo_size); // size
166 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
167 char name[20];
168 snprintf(name, sizeof(name), "mesh ubo %d", i);
169 VK_NAME_BUFFER(vk, ubo[i].buffer, name);
170
171 ret = render_buffer_map(vk, &ubo[i]);
172 VK_CHK_WITH_RET(ret, "render_buffer_map", false);
173 }
174 return true;
175}
176
177
178/*
179 *
180 * Compute
181 *
182 */
183
184XRT_CHECK_RESULT static VkResult
185create_compute_layer_descriptor_set_layout(struct vk_bundle *vk,
186 uint32_t src_binding,
187 uint32_t target_binding,
188 uint32_t ubo_binding,
189 uint32_t source_images_count,
190 VkDescriptorSetLayout *out_descriptor_set_layout)
191{
192 VkResult ret;
193
194 VkDescriptorSetLayoutBinding set_layout_bindings[3] = {
195 {
196 .binding = src_binding,
197 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
198 .descriptorCount = source_images_count,
199 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
200 },
201 {
202 .binding = target_binding,
203 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
204 .descriptorCount = 1,
205 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
206 },
207 {
208 .binding = ubo_binding,
209 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
210 .descriptorCount = 1,
211 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
212 },
213 };
214
215 VkDescriptorSetLayoutCreateInfo set_layout_info = {
216 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
217 .bindingCount = ARRAY_SIZE(set_layout_bindings),
218 .pBindings = set_layout_bindings,
219 };
220
221 VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE;
222 ret = vk->vkCreateDescriptorSetLayout( //
223 vk->device, //
224 &set_layout_info, //
225 NULL, //
226 &descriptor_set_layout); //
227 VK_CHK_AND_RET(ret, "vkCreateDescriptorSetLayout");
228
229 *out_descriptor_set_layout = descriptor_set_layout;
230
231 return VK_SUCCESS;
232}
233
234XRT_CHECK_RESULT static VkResult
235create_compute_distortion_descriptor_set_layout(struct vk_bundle *vk,
236 uint32_t src_binding,
237 uint32_t distortion_binding,
238 uint32_t target_binding,
239 uint32_t ubo_binding,
240 VkDescriptorSetLayout *out_descriptor_set_layout)
241{
242 VkResult ret;
243
244 VkDescriptorSetLayoutBinding set_layout_bindings[4] = {
245 {
246 .binding = src_binding,
247 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
248 .descriptorCount = 2,
249 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
250 },
251 {
252 .binding = distortion_binding,
253 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
254 .descriptorCount = 6,
255 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
256 },
257 {
258 .binding = target_binding,
259 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
260 .descriptorCount = 1,
261 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
262 },
263 {
264 .binding = ubo_binding,
265 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
266 .descriptorCount = 1,
267 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
268 },
269 };
270
271 VkDescriptorSetLayoutCreateInfo set_layout_info = {
272 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
273 .bindingCount = ARRAY_SIZE(set_layout_bindings),
274 .pBindings = set_layout_bindings,
275 };
276
277 VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE;
278 ret = vk->vkCreateDescriptorSetLayout( //
279 vk->device, //
280 &set_layout_info, //
281 NULL, //
282 &descriptor_set_layout); //
283 VK_CHK_AND_RET(ret, "vkCreateDescriptorSetLayout");
284
285 *out_descriptor_set_layout = descriptor_set_layout;
286
287 return VK_SUCCESS;
288}
289
290struct compute_layer_params
291{
292 VkBool32 do_timewarp;
293 VkBool32 do_color_correction;
294 uint32_t max_layers;
295 uint32_t image_array_size;
296};
297
298struct compute_distortion_params
299{
300 uint32_t distortion_texel_count;
301 VkBool32 do_timewarp;
302};
303
304XRT_CHECK_RESULT static VkResult
305create_compute_layer_pipeline(struct vk_bundle *vk,
306 VkPipelineCache pipeline_cache,
307 VkShaderModule shader,
308 VkPipelineLayout pipeline_layout,
309 const struct compute_layer_params *params,
310 VkPipeline *out_compute_pipeline)
311{
312#define ENTRY(ID, FIELD) \
313 { \
314 .constantID = ID, \
315 .offset = offsetof(struct compute_layer_params, FIELD), \
316 sizeof(params->FIELD), \
317 }
318
319 VkSpecializationMapEntry entries[] = {
320 ENTRY(1, do_timewarp), //
321 ENTRY(2, do_color_correction), //
322 ENTRY(3, max_layers), //
323 ENTRY(4, image_array_size), //
324 };
325#undef ENTRY
326
327 VkSpecializationInfo specialization_info = {
328 .mapEntryCount = ARRAY_SIZE(entries),
329 .pMapEntries = entries,
330 .dataSize = sizeof(*params),
331 .pData = params,
332 };
333
334 return vk_create_compute_pipeline( //
335 vk, // vk_bundle
336 pipeline_cache, // pipeline_cache
337 shader, // shader
338 pipeline_layout, // pipeline_layout
339 &specialization_info, // specialization_info
340 out_compute_pipeline); // out_compute_pipeline
341}
342
343XRT_CHECK_RESULT static VkResult
344create_compute_distortion_pipeline(struct vk_bundle *vk,
345 VkPipelineCache pipeline_cache,
346 VkShaderModule shader,
347 VkPipelineLayout pipeline_layout,
348 const struct compute_distortion_params *params,
349 VkPipeline *out_compute_pipeline)
350{
351#define ENTRY(ID, FIELD) \
352 { \
353 .constantID = ID, \
354 .offset = offsetof(struct compute_distortion_params, FIELD), \
355 sizeof(params->FIELD), \
356 }
357
358 VkSpecializationMapEntry entries[2] = {
359 ENTRY(0, distortion_texel_count),
360 ENTRY(1, do_timewarp),
361 };
362#undef ENTRY
363
364 VkSpecializationInfo specialization_info = {
365 .mapEntryCount = ARRAY_SIZE(entries),
366 .pMapEntries = entries,
367 .dataSize = sizeof(*params),
368 .pData = params,
369 };
370
371 return vk_create_compute_pipeline( //
372 vk, // vk_bundle
373 pipeline_cache, // pipeline_cache
374 shader, // shader
375 pipeline_layout, // pipeline_layout
376 &specialization_info, // specialization_info
377 out_compute_pipeline); // out_compute_pipeline
378}
379
380
381/*
382 *
383 * Mock image.
384 *
385 */
386
387XRT_CHECK_RESULT static VkResult
388prepare_mock_image_locked(struct vk_bundle *vk, VkCommandBuffer cmd, VkImage dst)
389{
390 VkImageSubresourceRange subresource_range = {
391 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
392 .baseMipLevel = 0,
393 .levelCount = VK_REMAINING_MIP_LEVELS,
394 .baseArrayLayer = 0,
395 .layerCount = VK_REMAINING_ARRAY_LAYERS,
396 };
397
398 vk_cmd_image_barrier_gpu_locked( //
399 vk, //
400 cmd, //
401 dst, //
402 0, //
403 VK_ACCESS_TRANSFER_WRITE_BIT, //
404 VK_IMAGE_LAYOUT_UNDEFINED, //
405 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, //
406 subresource_range); //
407
408 return VK_SUCCESS;
409}
410
411
412/*
413 *
414 * Scratch image.
415 *
416 */
417
418XRT_CHECK_RESULT static bool
419create_scratch_image_and_view(struct vk_bundle *vk, VkExtent2D extent, struct render_scratch_color_image *rsci)
420{
421 VkResult ret;
422
423 VkFormat srgb_format = VK_FORMAT_R8G8B8A8_SRGB;
424 VkFormat unorm_format = VK_FORMAT_R8G8B8A8_UNORM;
425 VkImageViewType view_type = VK_IMAGE_VIEW_TYPE_2D;
426
427 VkDeviceMemory device_memory = VK_NULL_HANDLE;
428 VkImage image = VK_NULL_HANDLE;
429 VkImageView srgb_view = VK_NULL_HANDLE;
430 VkImageView unorm_view = VK_NULL_HANDLE;
431
432 // Both usages are common.
433 VkImageUsageFlags unorm_usage = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
434
435 // Very few cards support SRGB storage.
436 VkImageUsageFlags srgb_usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
437
438 // Combination of both.
439 VkImageUsageFlags image_usage = unorm_usage | srgb_usage;
440
441 ret = vk_create_image_mutable_rgba( //
442 vk, // vk_bundle
443 extent, // extent
444 image_usage, // usage
445 &device_memory, // out_device_memory
446 &image); // out_image
447 VK_CHK_WITH_RET(ret, "vk_create_image_mutable_rgba", false);
448
449 VK_NAME_DEVICE_MEMORY(vk, device_memory, "render_scratch_color_image device_memory");
450 VK_NAME_IMAGE(vk, image, "render_scratch_color_image image");
451
452 VkImageSubresourceRange subresource_range = {
453 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
454 .baseMipLevel = 0,
455 .levelCount = VK_REMAINING_MIP_LEVELS,
456 .baseArrayLayer = 0,
457 .layerCount = VK_REMAINING_ARRAY_LAYERS,
458 };
459
460 ret = vk_create_view_usage( //
461 vk, // vk_bundle
462 image, // image
463 view_type, // type
464 srgb_format, // format
465 srgb_usage, // image_usage
466 subresource_range, // subresource_range
467 &srgb_view); // out_image_view
468 VK_CHK_WITH_RET(ret, "vk_create_view_usage", false);
469
470 VK_NAME_IMAGE_VIEW(vk, srgb_view, "render_scratch_color_image image view srgb");
471
472 ret = vk_create_view_usage( //
473 vk, // vk_bundle
474 image, // image
475 view_type, // type
476 unorm_format, // format
477 unorm_usage, // image_usage
478 subresource_range, // subresource_range
479 &unorm_view); // out_image_view
480 VK_CHK_WITH_RET(ret, "vk_create_view_usage", false);
481
482 VK_NAME_IMAGE_VIEW(vk, unorm_view, "render_scratch_color_image image view unorm");
483
484 rsci->device_memory = device_memory;
485 rsci->image = image;
486 rsci->srgb_view = srgb_view;
487 rsci->unorm_view = unorm_view;
488
489 return true;
490}
491
492static void
493teardown_scratch_color_image(struct vk_bundle *vk, struct render_scratch_color_image *rsci)
494{
495 D(ImageView, rsci->unorm_view);
496 D(ImageView, rsci->srgb_view);
497 D(Image, rsci->image);
498 DF(Memory, rsci->device_memory);
499}
500
501
502/*
503 *
504 * 'Exported' renderer functions.
505 *
506 */
507
508bool
509render_resources_init(struct render_resources *r,
510 struct render_shaders *shaders,
511 struct vk_bundle *vk,
512 struct xrt_device *xdev)
513{
514 VkResult ret;
515 bool bret;
516
517 /*
518 * Main pointers.
519 */
520
521 r->vk = vk;
522 r->shaders = shaders;
523
524
525 /*
526 * Constants
527 */
528
529 r->view_count = xdev->hmd->view_count;
530 r->mesh.src_binding = 0;
531 r->mesh.ubo_binding = 1;
532 struct xrt_hmd_parts *parts = xdev->hmd;
533 r->mesh.vertex_count = parts->distortion.mesh.vertex_count;
534 r->mesh.stride = parts->distortion.mesh.stride;
535 r->mesh.index_count_total = parts->distortion.mesh.index_count_total;
536 for (uint32_t i = 0; i < r->view_count; ++i) {
537 r->mesh.index_counts[i] = parts->distortion.mesh.index_counts[i];
538 r->mesh.index_offsets[i] = parts->distortion.mesh.index_offsets[i];
539 }
540 r->compute.src_binding = 0;
541 r->compute.distortion_binding = 1;
542 r->compute.target_binding = 2;
543 r->compute.ubo_binding = 3;
544
545 r->compute.layer.image_array_size =
546 MIN(vk->limits.max_per_stage_descriptor_sampled_images, RENDER_MAX_IMAGES_SIZE);
547
548
549 /*
550 * Common samplers.
551 */
552
553 ret = vk_create_sampler( //
554 vk, // vk_bundle
555 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // clamp_mode
556 &r->samplers.mock); // out_sampler
557 VK_CHK_WITH_RET(ret, "vk_create_sampler", false);
558
559 VK_NAME_SAMPLER(vk, r->samplers.mock, "render_resources sampler mock");
560
561 ret = vk_create_sampler( //
562 vk, // vk_bundle
563 VK_SAMPLER_ADDRESS_MODE_REPEAT, // clamp_mode
564 &r->samplers.repeat); // out_sampler
565 VK_CHK_WITH_RET(ret, "vk_create_sampler", false);
566
567 VK_NAME_SAMPLER(vk, r->samplers.repeat, "render_resources sampler repeat");
568
569 ret = vk_create_sampler( //
570 vk, // vk_bundle
571 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // clamp_mode
572 &r->samplers.clamp_to_edge); // out_sampler
573 VK_CHK_WITH_RET(ret, "vk_create_sampler", false);
574
575 VK_NAME_SAMPLER(vk, r->samplers.clamp_to_edge, "render_resources sampler clamp_to_edge");
576
577 ret = vk_create_sampler( //
578 vk, // vk_bundle
579 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // clamp_mode
580 &r->samplers.clamp_to_border_black); // out_sampler
581 VK_CHK_WITH_RET(ret, "vk_create_sampler", false);
582
583 VK_NAME_SAMPLER(vk, r->samplers.clamp_to_border_black, "render_resources sampler clamp_to_border_black");
584
585
586 /*
587 * Command buffer pool, needs to go first.
588 */
589
590 ret = vk_cmd_pool_init(vk, &r->distortion_pool, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT);
591 VK_CHK_WITH_RET(ret, "vk_cmd_pool_init", false);
592
593 VK_NAME_COMMAND_POOL(vk, r->distortion_pool.pool, "render_resources distortion command pool");
594
595 VkCommandPoolCreateInfo command_pool_info = {
596 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
597 .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
598 .queueFamilyIndex = vk->main_queue->family_index,
599 };
600
601 ret = vk->vkCreateCommandPool(vk->device, &command_pool_info, NULL, &r->cmd_pool);
602 VK_CHK_WITH_RET(ret, "vkCreateCommandPool", false);
603
604 VK_NAME_COMMAND_POOL(vk, r->cmd_pool, "render_resources command pool");
605
606
607 /*
608 * Mock, used as a default image empty image.
609 */
610
611 {
612 VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
613 VkImageUsageFlags usage = VK_IMAGE_USAGE_SAMPLED_BIT;
614 VkExtent2D extent = {1, 1};
615
616 VkImageSubresourceRange subresource_range = {
617 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
618 .baseMipLevel = 0,
619 .levelCount = 1,
620 .baseArrayLayer = 0,
621 .layerCount = 1,
622 };
623
624 ret = vk_create_image_simple( //
625 vk, // vk_bundle
626 extent, // extent
627 format, // format
628 usage, // usage
629 &r->mock.color.memory, // out_mem
630 &r->mock.color.image); // out_image
631 VK_CHK_WITH_RET(ret, "vk_create_image_simple", false);
632
633 VK_NAME_DEVICE_MEMORY(vk, r->mock.color.memory, "render_resources mock color device memory");
634 VK_NAME_IMAGE(vk, r->mock.color.image, "render_resources mock color image");
635
636 ret = vk_create_view( //
637 vk, // vk_bundle
638 r->mock.color.image, // image
639 VK_IMAGE_VIEW_TYPE_2D, // type
640 format, // format
641 subresource_range, // subresource_range
642 &r->mock.color.image_view); // out_view
643 VK_CHK_WITH_RET(ret, "vk_create_view", false);
644
645 VK_NAME_IMAGE_VIEW(vk, r->mock.color.image_view, "render_resources mock color image view");
646
647
648 VkCommandBuffer cmd = VK_NULL_HANDLE;
649 ret = vk_cmd_create_and_begin_cmd_buffer_locked(vk, r->cmd_pool, 0, &cmd);
650 VK_CHK_WITH_RET(ret, "vk_cmd_create_and_begin_cmd_buffer_locked", false);
651
652 VK_NAME_COMMAND_BUFFER(vk, cmd, "render_resources mock command buffer");
653
654 ret = prepare_mock_image_locked( //
655 vk, // vk_bundle
656 cmd, // cmd
657 r->mock.color.image); // dst
658 VK_CHK_WITH_RET(ret, "prepare_mock_image_locked", false);
659
660 ret = vk_cmd_end_submit_wait_and_free_cmd_buffer_locked(vk, vk->main_queue, r->cmd_pool, cmd);
661 VK_CHK_WITH_RET(ret, "vk_cmd_end_submit_wait_and_free_cmd_buffer_locked", false);
662
663 // No need to wait, submit waits on the fence.
664 }
665
666
667 /*
668 * Shared
669 */
670
671 ret = vk_create_pipeline_cache(vk, &r->pipeline_cache);
672 VK_CHK_WITH_RET(ret, "vk_create_pipeline_cache", false);
673
674 VK_NAME_PIPELINE_CACHE(vk, r->pipeline_cache, "render_resources pipeline cache");
675
676 VkCommandBufferAllocateInfo cmd_buffer_info = {
677 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
678 .commandPool = r->cmd_pool,
679 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
680 .commandBufferCount = 1,
681 };
682
683 ret = vk->vkAllocateCommandBuffers( //
684 vk->device, // device
685 &cmd_buffer_info, // pAllocateInfo
686 &r->cmd); // pCommandBuffers
687 VK_CHK_WITH_RET(ret, "vkAllocateCommandBuffers", false);
688
689 VK_NAME_COMMAND_BUFFER(vk, r->cmd, "render_resources command buffer");
690
691
692 /*
693 * Gfx.
694 */
695
696 {
697 // Number of layer shader runs (views) times number of layers.
698 const uint32_t layer_shader_count = RENDER_MAX_LAYER_RUNS_COUNT(r) * RENDER_MAX_LAYERS;
699
700 // Two mesh distortion runs.
701 const uint32_t mesh_shader_count = RENDER_MAX_LAYER_RUNS_COUNT(r);
702
703 struct vk_descriptor_pool_info mesh_pool_info = {
704 .uniform_per_descriptor_count = 1,
705 .sampler_per_descriptor_count = 1,
706 .storage_image_per_descriptor_count = 0,
707 .storage_buffer_per_descriptor_count = 0,
708 .descriptor_count = layer_shader_count + mesh_shader_count,
709 .freeable = false,
710 };
711
712 ret = vk_create_descriptor_pool( //
713 vk, // vk_bundle
714 &mesh_pool_info, // info
715 &r->gfx.ubo_and_src_descriptor_pool); // out_descriptor_pool
716 VK_CHK_WITH_RET(ret, "vk_create_descriptor_pool", false);
717
718 VK_NAME_DESCRIPTOR_POOL(vk, r->gfx.ubo_and_src_descriptor_pool,
719 "render_resources ubo and src descriptor pool");
720
721 VkBufferUsageFlags usage_flags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
722 VkMemoryPropertyFlags memory_property_flags = //
723 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | //
724 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; //
725
726 uint32_t buffer_count = 0;
727
728 // One UBO per layer shader.
729 buffer_count += layer_shader_count;
730
731 // One UBO per mesh shader.
732 buffer_count += RENDER_MAX_LAYER_RUNS_COUNT(r);
733
734 // We currently use the aligmnent as max UBO size.
735 static_assert(sizeof(struct render_gfx_mesh_ubo_data) <= RENDER_ALWAYS_SAFE_UBO_ALIGNMENT, "MAX");
736
737 // Calculate size.
738 VkDeviceSize size = buffer_count * RENDER_ALWAYS_SAFE_UBO_ALIGNMENT;
739
740 ret = render_buffer_init( //
741 vk, // vk_bundle
742 &r->gfx.shared_ubo, // buffer
743 usage_flags, // usage_flags
744 memory_property_flags, // memory_property_flags
745 size); // size
746 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
747 VK_NAME_BUFFER(vk, r->gfx.shared_ubo.buffer, "render_resources gfx shared ubo");
748
749 ret = render_buffer_map( //
750 vk, // vk_bundle
751 &r->gfx.shared_ubo); // buffer
752 VK_CHK_WITH_RET(ret, "render_buffer_map", false);
753 }
754
755
756 /*
757 * Gfx layer.
758 */
759
760 ret = create_gfx_ubo_and_src_descriptor_set_layout( //
761 vk, // vk_bundle
762 RENDER_BINDING_LAYER_SHARED_UBO, // ubo_binding
763 RENDER_BINDING_LAYER_SHARED_SRC, // src_binding
764 &r->gfx.layer.shared.descriptor_set_layout); // out_descriptor_set_layout
765 VK_CHK_WITH_RET(ret, "create_gfx_ubo_and_src_descriptor_set_layout", false);
766
767 VK_NAME_DESCRIPTOR_SET_LAYOUT(vk, r->gfx.layer.shared.descriptor_set_layout,
768 "render_resources gfx layer shared descriptor set layout");
769
770 ret = vk_create_pipeline_layout( //
771 vk, // vk_bundle
772 r->gfx.layer.shared.descriptor_set_layout, // descriptor_set_layout
773 &r->gfx.layer.shared.pipeline_layout); // out_pipeline_layout
774 VK_CHK_WITH_RET(ret, "vk_create_pipeline_layout", false);
775
776 VK_NAME_PIPELINE_LAYOUT(vk, r->gfx.layer.shared.pipeline_layout,
777 "render_resources gfx layer shared pipeline layout");
778
779
780 /*
781 * Mesh static.
782 */
783
784 ret = create_gfx_ubo_and_src_descriptor_set_layout( //
785 vk, // vk_bundle
786 r->mesh.ubo_binding, // ubo_binding
787 r->mesh.src_binding, // src_binding
788 &r->mesh.descriptor_set_layout); // out_mesh_descriptor_set_layout
789 VK_CHK_WITH_RET(ret, "create_gfx_ubo_and_src_descriptor_set_layout", false);
790
791 VK_NAME_DESCRIPTOR_SET_LAYOUT(vk, r->mesh.descriptor_set_layout, "render_resources mesh descriptor set layout");
792
793 ret = vk_create_pipeline_layout( //
794 vk, // vk_bundle
795 r->mesh.descriptor_set_layout, // descriptor_set_layout
796 &r->mesh.pipeline_layout); // out_pipeline_layout
797 VK_CHK_WITH_RET(ret, "vk_create_pipeline_layout", false);
798
799 VK_NAME_PIPELINE_LAYOUT(vk, r->mesh.pipeline_layout, "render_resources mesh pipeline layout");
800
801 bret = init_mesh_vertex_buffers( //
802 vk, //
803 &r->mesh.vbo, //
804 &r->mesh.ibo, //
805 r->mesh.vertex_count, //
806 r->mesh.stride, //
807 parts->distortion.mesh.vertices, //
808 r->mesh.index_count_total, //
809 parts->distortion.mesh.indices); //
810 if (!bret) {
811 return false;
812 }
813
814 bret = init_mesh_ubo_buffers( //
815 vk, //
816 r->mesh.ubos, r->view_count); //
817 if (!bret) {
818 return false;
819 }
820
821
822 /*
823 * Compute static.
824 */
825
826 VkBufferUsageFlags ubo_usage_flags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
827 VkMemoryPropertyFlags memory_property_flags =
828 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
829
830 const uint32_t compute_descriptor_count = //
831 1 + // Shared/distortion run(s).
832 RENDER_MAX_LAYER_RUNS_COUNT(r); // Layer shader run(s).
833
834 struct vk_descriptor_pool_info compute_pool_info = {
835 .uniform_per_descriptor_count = 1,
836 // layer images
837 .sampler_per_descriptor_count = r->compute.layer.image_array_size + RENDER_DISTORTION_IMAGES_COUNT(r),
838 .storage_image_per_descriptor_count = 1,
839 .storage_buffer_per_descriptor_count = 0,
840 .descriptor_count = compute_descriptor_count,
841 .freeable = false,
842 };
843
844 ret = vk_create_descriptor_pool( //
845 vk, // vk_bundle
846 &compute_pool_info, // info
847 &r->compute.descriptor_pool); // out_descriptor_pool
848 VK_CHK_WITH_RET(ret, "vk_create_descriptor_pool", false);
849
850 VK_NAME_DESCRIPTOR_POOL(vk, r->compute.descriptor_pool, "render_resources compute descriptor pool");
851
852 /*
853 * Layer pipeline
854 */
855
856 ret = create_compute_layer_descriptor_set_layout( //
857 vk, // vk_bundle
858 r->compute.src_binding, // src_binding,
859 r->compute.target_binding, // target_binding,
860 r->compute.ubo_binding, // ubo_binding,
861 r->compute.layer.image_array_size, // source_images_count,
862 &r->compute.layer.descriptor_set_layout); // out_descriptor_set_layout
863 VK_CHK_WITH_RET(ret, "create_compute_layer_descriptor_set_layout", false);
864
865 VK_NAME_DESCRIPTOR_SET_LAYOUT(vk, r->compute.layer.descriptor_set_layout,
866 "render_resources compute layer descriptor set layout");
867
868 ret = vk_create_pipeline_layout( //
869 vk, // vk_bundle
870 r->compute.layer.descriptor_set_layout, // descriptor_set_layout
871 &r->compute.layer.pipeline_layout); // out_pipeline_layout
872 VK_CHK_WITH_RET(ret, "vk_create_pipeline_layout", false);
873
874 VK_NAME_PIPELINE_LAYOUT(vk, r->compute.layer.pipeline_layout, "render_resources compute layer pipeline layout");
875
876 struct compute_layer_params layer_params = {
877 .do_timewarp = false,
878 .do_color_correction = true,
879 .max_layers = RENDER_MAX_LAYERS,
880 .image_array_size = r->compute.layer.image_array_size,
881 };
882
883 ret = create_compute_layer_pipeline( //
884 vk, // vk_bundle
885 r->pipeline_cache, // pipeline_cache
886 r->shaders->layer_comp, // shader
887 r->compute.layer.pipeline_layout, // pipeline_layout
888 &layer_params, // params
889 &r->compute.layer.non_timewarp_pipeline); // out_compute_pipeline
890 VK_CHK_WITH_RET(ret, "create_compute_layer_pipeline", false);
891
892 VK_NAME_PIPELINE(vk, r->compute.layer.non_timewarp_pipeline,
893 "render_resources compute layer non timewarp pipeline");
894
895 struct compute_layer_params layer_timewarp_params = {
896 .do_timewarp = true,
897 .do_color_correction = true,
898 .max_layers = RENDER_MAX_LAYERS,
899 .image_array_size = r->compute.layer.image_array_size,
900 };
901
902 ret = create_compute_layer_pipeline( //
903 vk, // vk_bundle
904 r->pipeline_cache, // pipeline_cache
905 r->shaders->layer_comp, // shader
906 r->compute.layer.pipeline_layout, // pipeline_layout
907 &layer_timewarp_params, // params
908 &r->compute.layer.timewarp_pipeline); // out_compute_pipeline
909 VK_CHK_WITH_RET(ret, "create_compute_layer_pipeline", false);
910
911 VK_NAME_PIPELINE(vk, r->compute.layer.timewarp_pipeline, "render_resources compute layer timewarp pipeline");
912
913 size_t layer_ubo_size = sizeof(struct render_compute_layer_ubo_data);
914
915 for (uint32_t i = 0; i < r->view_count; i++) {
916 ret = render_buffer_init( //
917 vk, // vk_bundle
918 &r->compute.layer.ubos[i], // buffer
919 ubo_usage_flags, // usage_flags
920 memory_property_flags, // memory_property_flags
921 layer_ubo_size); // size
922 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
923 VK_NAME_BUFFER(vk, r->compute.layer.ubos[i].buffer, "render_resources compute layer ubo");
924
925 ret = render_buffer_map( //
926 vk, // vk_bundle
927 &r->compute.layer.ubos[i]); // buffer
928 VK_CHK_WITH_RET(ret, "render_buffer_map", false);
929 }
930
931
932 /*
933 * Distortion pipeline
934 */
935
936 ret = create_compute_distortion_descriptor_set_layout( //
937 vk, // vk_bundle
938 r->compute.src_binding, // src_binding,
939 r->compute.distortion_binding, // distortion_binding,
940 r->compute.target_binding, // target_binding,
941 r->compute.ubo_binding, // ubo_binding,
942 &r->compute.distortion.descriptor_set_layout); // out_descriptor_set_layout
943 VK_CHK_WITH_RET(ret, "create_compute_distortion_descriptor_set_layout", false);
944
945 VK_NAME_DESCRIPTOR_SET_LAYOUT(vk, r->compute.distortion.descriptor_set_layout,
946 "render_resources compute distortion descriptor set layout");
947
948 ret = vk_create_pipeline_layout( //
949 vk, // vk_bundle
950 r->compute.distortion.descriptor_set_layout, // descriptor_set_layout
951 &r->compute.distortion.pipeline_layout); // out_pipeline_layout
952 VK_CHK_WITH_RET(ret, "vk_create_pipeline_layout", false);
953
954 VK_NAME_PIPELINE_LAYOUT(vk, r->compute.distortion.pipeline_layout,
955 "render_resources compute distortion pipeline layout");
956
957 struct compute_distortion_params distortion_params = {
958 .distortion_texel_count = RENDER_DISTORTION_IMAGE_DIMENSIONS,
959 .do_timewarp = false,
960 };
961
962 ret = create_compute_distortion_pipeline( //
963 vk, // vk_bundle
964 r->pipeline_cache, // pipeline_cache
965 r->shaders->distortion_comp, // shader
966 r->compute.distortion.pipeline_layout, // pipeline_layout
967 &distortion_params, // params
968 &r->compute.distortion.pipeline); // out_compute_pipeline
969 VK_CHK_WITH_RET(ret, "create_compute_distortion_pipeline", false);
970
971 VK_NAME_PIPELINE(vk, r->compute.distortion.pipeline, "render_resources compute distortion pipeline");
972
973 struct compute_distortion_params distortion_timewarp_params = {
974 .distortion_texel_count = RENDER_DISTORTION_IMAGE_DIMENSIONS,
975 .do_timewarp = true,
976 };
977
978 ret = create_compute_distortion_pipeline( //
979 vk, // vk_bundle
980 r->pipeline_cache, // pipeline_cache
981 r->shaders->distortion_comp, // shader
982 r->compute.distortion.pipeline_layout, // pipeline_layout
983 &distortion_timewarp_params, // params
984 &r->compute.distortion.timewarp_pipeline); // out_compute_pipeline
985 VK_CHK_WITH_RET(ret, "create_compute_distortion_pipeline", false);
986
987 VK_NAME_PIPELINE(vk, r->compute.distortion.timewarp_pipeline,
988 "render_resources compute distortion timewarp pipeline");
989
990 size_t distortion_ubo_size = sizeof(struct render_compute_distortion_ubo_data);
991
992 ret = render_buffer_init( //
993 vk, // vk_bundle
994 &r->compute.distortion.ubo, // buffer
995 ubo_usage_flags, // usage_flags
996 memory_property_flags, // memory_property_flags
997 distortion_ubo_size); // size
998 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
999 VK_NAME_BUFFER(vk, r->compute.distortion.ubo.buffer, "render_resources compute distortion ubo");
1000 ret = render_buffer_map( //
1001 vk, // vk_bundle
1002 &r->compute.distortion.ubo); // buffer
1003 VK_CHK_WITH_RET(ret, "render_buffer_map", false);
1004
1005
1006 /*
1007 * Clear pipeline.
1008 */
1009
1010 ret = vk_create_compute_pipeline( //
1011 vk, // vk_bundle
1012 r->pipeline_cache, // pipeline_cache
1013 r->shaders->clear_comp, // shader
1014 r->compute.distortion.pipeline_layout, // pipeline_layout
1015 NULL, // specialization_info
1016 &r->compute.clear.pipeline); // out_compute_pipeline
1017 VK_CHK_WITH_RET(ret, "vk_create_compute_pipeline", false);
1018
1019 VK_NAME_PIPELINE(vk, r->compute.clear.pipeline, "render_resources compute clear pipeline");
1020
1021 size_t clear_ubo_size = sizeof(struct render_compute_distortion_ubo_data);
1022
1023 ret = render_buffer_init( //
1024 vk, // vk_bundle
1025 &r->compute.clear.ubo, // buffer
1026 ubo_usage_flags, // usage_flags
1027 memory_property_flags, // memory_property_flags
1028 clear_ubo_size); // size
1029 VK_CHK_WITH_RET(ret, "render_buffer_init", false);
1030 VK_NAME_BUFFER(vk, r->compute.clear.ubo.buffer, "render_resources compute clear ubo");
1031
1032 ret = render_buffer_map( //
1033 vk, // vk_bundle
1034 &r->compute.clear.ubo); // buffer
1035 VK_CHK_WITH_RET(ret, "render_buffer_map", false);
1036
1037
1038 /*
1039 * Compute distortion textures, not created until later.
1040 */
1041
1042 for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT(r); i++) {
1043 r->distortion.image_views[i] = VK_NULL_HANDLE;
1044 }
1045 for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT(r); i++) {
1046 r->distortion.images[i] = VK_NULL_HANDLE;
1047 }
1048 for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT(r); i++) {
1049 r->distortion.device_memories[i] = VK_NULL_HANDLE;
1050 }
1051
1052
1053 /*
1054 * Timestamp pool.
1055 */
1056
1057 VkQueryPoolCreateInfo poolInfo = {
1058 .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
1059 .pNext = NULL,
1060 .flags = 0, // Reserved.
1061 .queryType = VK_QUERY_TYPE_TIMESTAMP,
1062 .queryCount = 2, // Start & end
1063 .pipelineStatistics = 0, // Not used.
1064 };
1065
1066 vk->vkCreateQueryPool( //
1067 vk->device, // device
1068 &poolInfo, // pCreateInfo
1069 NULL, // pAllocator
1070 &r->query_pool); // pQueryPool
1071
1072 VK_NAME_QUERY_POOL(vk, r->query_pool, "render_resources query pool");
1073
1074 /*
1075 * Done
1076 */
1077
1078 U_LOG_I("New renderer initialized!");
1079
1080 return true;
1081}
1082
1083void
1084render_resources_fini(struct render_resources *r)
1085{
1086 // We were never initialised or already closed, always safe to call this function.
1087 if (r->vk == NULL) {
1088 return;
1089 }
1090
1091 struct vk_bundle *vk = r->vk;
1092
1093 D(Sampler, r->samplers.mock);
1094 D(Sampler, r->samplers.repeat);
1095 D(Sampler, r->samplers.clamp_to_edge);
1096 D(Sampler, r->samplers.clamp_to_border_black);
1097
1098 D(ImageView, r->mock.color.image_view);
1099 D(Image, r->mock.color.image);
1100 DF(Memory, r->mock.color.memory);
1101
1102 render_buffer_fini(vk, &r->gfx.shared_ubo);
1103 D(DescriptorPool, r->gfx.ubo_and_src_descriptor_pool);
1104
1105 D(DescriptorSetLayout, r->gfx.layer.shared.descriptor_set_layout);
1106 D(PipelineLayout, r->gfx.layer.shared.pipeline_layout);
1107
1108 D(DescriptorSetLayout, r->mesh.descriptor_set_layout);
1109 D(PipelineLayout, r->mesh.pipeline_layout);
1110 D(PipelineCache, r->pipeline_cache);
1111 D(QueryPool, r->query_pool);
1112 render_buffer_fini(vk, &r->mesh.vbo);
1113 render_buffer_fini(vk, &r->mesh.ibo);
1114 for (uint32_t i = 0; i < r->view_count; ++i) {
1115 render_buffer_fini(vk, &r->mesh.ubos[i]);
1116 }
1117
1118 D(DescriptorPool, r->compute.descriptor_pool);
1119
1120 D(DescriptorSetLayout, r->compute.layer.descriptor_set_layout);
1121 D(Pipeline, r->compute.layer.non_timewarp_pipeline);
1122 D(Pipeline, r->compute.layer.timewarp_pipeline);
1123 D(PipelineLayout, r->compute.layer.pipeline_layout);
1124
1125 D(DescriptorSetLayout, r->compute.distortion.descriptor_set_layout);
1126 D(Pipeline, r->compute.distortion.pipeline);
1127 D(Pipeline, r->compute.distortion.timewarp_pipeline);
1128 D(PipelineLayout, r->compute.distortion.pipeline_layout);
1129
1130 D(Pipeline, r->compute.clear.pipeline);
1131
1132 render_distortion_images_fini(r);
1133 render_buffer_fini(vk, &r->compute.clear.ubo);
1134 for (uint32_t i = 0; i < r->view_count; i++) {
1135 render_buffer_fini(vk, &r->compute.layer.ubos[i]);
1136 }
1137 render_buffer_fini(vk, &r->compute.distortion.ubo);
1138
1139 vk_cmd_pool_destroy(vk, &r->distortion_pool);
1140 D(CommandPool, r->cmd_pool);
1141
1142 // Finally forget about the vk bundle. We do not own it!
1143 r->vk = NULL;
1144}
1145
1146bool
1147render_resources_get_timestamps(struct render_resources *r, uint64_t *out_gpu_start_ns, uint64_t *out_gpu_end_ns)
1148{
1149 struct vk_bundle *vk = r->vk;
1150 VkResult ret = VK_SUCCESS;
1151
1152 // Simple pre-check, needed by vk_convert_timestamps_to_host_ns.
1153 if (!vk->has_EXT_calibrated_timestamps) {
1154 return false;
1155 }
1156
1157
1158 /*
1159 * Query how long things took.
1160 */
1161
1162 VkQueryResultFlags flags = VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT;
1163 uint64_t timestamps[2] = {0};
1164
1165 vk->vkGetQueryPoolResults( //
1166 vk->device, // device
1167 r->query_pool, // queryPool
1168 0, // firstQuery
1169 2, // queryCount
1170 sizeof(uint64_t) * 2, // dataSize
1171 timestamps, // pData
1172 sizeof(uint64_t), // stride
1173 flags); // flags
1174
1175
1176 /*
1177 * Convert from GPU context to CPU context, has to be
1178 * done fairly quickly after timestamps has been made.
1179 */
1180 ret = vk_convert_timestamps_to_host_ns(vk, 2, timestamps);
1181 if (ret != VK_SUCCESS) {
1182 return false;
1183 }
1184
1185 uint64_t gpu_start_ns = timestamps[0];
1186 uint64_t gpu_end_ns = timestamps[1];
1187
1188
1189 /*
1190 * Done
1191 */
1192
1193 *out_gpu_start_ns = gpu_start_ns;
1194 *out_gpu_end_ns = gpu_end_ns;
1195
1196 return true;
1197}
1198
1199bool
1200render_resources_get_duration(struct render_resources *r, uint64_t *out_gpu_duration_ns)
1201{
1202 struct vk_bundle *vk = r->vk;
1203 VkResult ret = VK_SUCCESS;
1204
1205 /*
1206 * Query how long things took.
1207 */
1208
1209 VkQueryResultFlags flags = VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT;
1210 uint64_t timestamps[2] = {0};
1211
1212 ret = vk->vkGetQueryPoolResults( //
1213 vk->device, // device
1214 r->query_pool, // queryPool
1215 0, // firstQuery
1216 2, // queryCount
1217 sizeof(uint64_t) * 2, // dataSize
1218 timestamps, // pData
1219 sizeof(uint64_t), // stride
1220 flags); // flags
1221
1222 if (ret != VK_SUCCESS) {
1223 return false;
1224 }
1225
1226
1227 /*
1228 * Convert from ticks to nanoseconds
1229 */
1230
1231 double duration_ticks = (double)(timestamps[1] - timestamps[0]);
1232 *out_gpu_duration_ns = (uint64_t)(duration_ticks * vk->features.timestamp_period);
1233
1234 return true;
1235}
1236
1237
1238/*
1239 *
1240 * 'Exported' scratch functions.
1241 *
1242 */
1243
1244bool
1245render_scratch_images_ensure(struct render_resources *r, struct render_scratch_images *rsi, VkExtent2D extent)
1246{
1247 bool bret;
1248
1249 if (rsi->extent.width == extent.width && //
1250 rsi->extent.height == extent.height && //
1251 rsi->color[0].srgb_view != VK_NULL_HANDLE && //
1252 rsi->color[0].unorm_view != VK_NULL_HANDLE) {
1253 return true;
1254 }
1255
1256 render_scratch_images_fini(r, rsi);
1257
1258 for (uint32_t i = 0; i < r->view_count; i++) {
1259 bret = create_scratch_image_and_view( //
1260 r->vk, //
1261 extent, //
1262 &rsi->color[i]); //
1263 if (!bret) {
1264 break;
1265 }
1266 }
1267
1268 if (!bret) {
1269 render_scratch_images_fini(r, rsi);
1270 return false;
1271 }
1272
1273 rsi->extent = extent;
1274
1275 return true;
1276}
1277
1278void
1279render_scratch_images_fini(struct render_resources *r, struct render_scratch_images *rsi)
1280{
1281 struct vk_bundle *vk = r->vk;
1282
1283 for (uint32_t i = 0; i < r->view_count; i++) {
1284 teardown_scratch_color_image(vk, &rsi->color[i]);
1285 }
1286
1287 U_ZERO(&rsi->extent);
1288}