The open source OpenXR runtime
1// Copyright 2023-2024, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Compositor (gfx - graphics shader) rendering code.
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @author Rylie Pavlik <rylie.pavlik@collabora.com>
9 * @ingroup comp_util
10 */
11
12#include "xrt/xrt_compositor.h"
13#include "util/comp_swapchain.h"
14
15#include "math/m_api.h"
16#include "math/m_mathinclude.h"
17
18#include "util/u_trace_marker.h"
19
20#include "vk/vk_helpers.h"
21
22#include "render/render_interface.h"
23
24#include "util/comp_render.h"
25#include "util/comp_render_helpers.h"
26
27
28/*
29 *
30 * Internal structs.
31 *
32 */
33
34/**
35 * Internal per-view for the layer squashing render step.
36 */
37struct gfx_layer_view_state
38{
39 /// Filled out descriptor sets.
40 VkDescriptorSet descriptor_sets[RENDER_MAX_LAYERS];
41
42 /// The type of layer.
43 enum xrt_layer_type types[RENDER_MAX_LAYERS];
44
45 /// Is the alpha premultipled, false means unpremultiplied.
46 bool premultiplied_alphas[RENDER_MAX_LAYERS];
47
48 /// To go to this view's tangent lengths.
49 struct xrt_normalized_rect to_tangent;
50
51 /// Number of layers filled in.
52 /// TODO move to parent struct
53 uint32_t layer_count;
54
55 /// Full rotation and translation VP matrix, in world space.
56 struct xrt_matrix_4x4 world_vp_full;
57 /// Full rotation and translation VP matrix, in view space.
58 struct xrt_matrix_4x4 eye_vp_full;
59
60 /// Full rotation and translation inverse V matrix, in world space.
61 struct xrt_matrix_4x4 world_v_inv_full;
62 /// Full rotation and translation inverse V matrix, in view space.
63 struct xrt_matrix_4x4 eye_v_inv_full;
64
65 /// Only rotation and translation VP matrix, in world space.
66 struct xrt_matrix_4x4 world_vp_rot_only;
67 /// Only rotation and translation VP matrix, in view space.
68 struct xrt_matrix_4x4 eye_vp_rot_only;
69};
70
71/**
72 * Internal state for the layer squashing render step, contains all per-view state
73 */
74struct gfx_layer_state
75{
76 struct gfx_layer_view_state views[XRT_MAX_VIEWS];
77};
78
79/*
80 * Internal state for the mesh rendering step.
81 */
82struct gfx_mesh_state
83{
84 VkDescriptorSet descriptor_sets[XRT_MAX_VIEWS];
85};
86
87/*
88 * Per-view input data for the mesh rendering step.
89 */
90struct gfx_mesh_view_data
91{
92 struct xrt_pose src_pose;
93 struct xrt_fov src_fov;
94 struct xrt_normalized_rect src_norm_rect;
95 VkSampler src_sampler;
96 VkImageView src_image_view;
97};
98
99/*
100 * Input data for the mesh rendering step,
101 * combined with comp_render_dispatch_data.
102 */
103struct gfx_mesh_data
104{
105 struct gfx_mesh_view_data views[XRT_MAX_VIEWS];
106};
107
108
109/*
110 *
111 * Static data.
112 *
113 */
114
115static const VkClearColorValue background_color_idle = {
116 .float32 = {0.0f, 0.0f, 0.0f, 1.0f},
117};
118
119static const VkClearColorValue background_color_active = {
120 .float32 = {0.0f, 0.0f, 0.0f, 1.0f},
121};
122
123
124/*
125 *
126 * Input builder functions.
127 *
128 */
129
130inline static void
131gfx_mesh_add_view(struct gfx_mesh_data *md,
132 uint32_t view_index,
133 const struct xrt_pose *src_pose,
134 const struct xrt_fov *src_fov,
135 const struct xrt_normalized_rect *src_norm_rect,
136 VkSampler src_sampler,
137 VkImageView src_image_view)
138{
139 md->views[view_index].src_pose = *src_pose;
140 md->views[view_index].src_fov = *src_fov;
141 md->views[view_index].src_norm_rect = *src_norm_rect;
142 md->views[view_index].src_sampler = src_sampler;
143 md->views[view_index].src_image_view = src_image_view;
144}
145
146
147
148/*
149 *
150 * Model view projection helper functions.
151 *
152 */
153
154static inline void
155calc_mvp_full(struct gfx_layer_view_state *state,
156 const struct xrt_layer_data *layer_data,
157 const struct xrt_pose *pose,
158 const struct xrt_vec3 *scale,
159 struct xrt_matrix_4x4 *result)
160{
161 struct xrt_matrix_4x4 model;
162 math_matrix_4x4_model(pose, scale, &model);
163
164 if (is_layer_view_space(layer_data)) {
165 math_matrix_4x4_multiply(&state->eye_vp_full, &model, result);
166 } else {
167 math_matrix_4x4_multiply(&state->world_vp_full, &model, result);
168 }
169}
170
171static inline void
172calc_mv_inv_full(struct gfx_layer_view_state *state,
173 const struct xrt_layer_data *layer_data,
174 const struct xrt_pose *pose,
175 const struct xrt_vec3 *scale,
176 struct xrt_matrix_4x4 *result)
177{
178 struct xrt_matrix_4x4 model;
179 math_matrix_4x4_model(pose, scale, &model);
180
181 struct xrt_matrix_4x4 model_inv;
182 math_matrix_4x4_inverse(&model, &model_inv);
183
184 struct xrt_matrix_4x4 *v;
185 if (is_layer_view_space(layer_data)) {
186 v = &state->eye_v_inv_full;
187 } else {
188 v = &state->world_v_inv_full;
189 }
190
191 math_matrix_4x4_multiply(&model_inv, v, result);
192}
193
194static inline void
195calc_mvp_rot_only(struct gfx_layer_view_state *state,
196 const struct xrt_layer_data *data,
197 const struct xrt_pose *pose,
198 const struct xrt_vec3 *scale,
199 struct xrt_matrix_4x4 *result)
200{
201 struct xrt_matrix_4x4 model;
202 struct xrt_pose rot_only = {
203 .orientation = pose->orientation,
204 .position = XRT_VEC3_ZERO,
205 };
206 math_matrix_4x4_model(&rot_only, scale, &model);
207
208 if (is_layer_view_space(data)) {
209 math_matrix_4x4_multiply(&state->eye_vp_rot_only, &model, result);
210 } else {
211 math_matrix_4x4_multiply(&state->world_vp_rot_only, &model, result);
212 }
213}
214
215
216/*
217 *
218 * Graphics layer data builders.
219 *
220 */
221
222static inline const struct comp_swapchain_image *
223get_layer_image(const struct comp_layer *layer, uint32_t swapchain_index, uint32_t image_index)
224{
225
226 const struct comp_swapchain *sc = (struct comp_swapchain *)(comp_layer_get_swapchain(layer, swapchain_index));
227 return &sc->images[image_index];
228}
229
230static inline void
231add_layer(struct gfx_layer_view_state *state, const struct xrt_layer_data *data, VkDescriptorSet descriptor_set)
232{
233 uint32_t cur_layer = state->layer_count++;
234 state->descriptor_sets[cur_layer] = descriptor_set;
235 state->types[cur_layer] = data->type;
236 state->premultiplied_alphas[cur_layer] = !is_layer_unpremultiplied(data);
237}
238
239/// Data setup for a cylinder layer
240/// Also allocates and writes a descriptor set!
241static VkResult
242do_cylinder_layer(struct render_gfx *render,
243 const struct comp_layer *layer,
244 uint32_t view_index,
245 VkSampler clamp_to_edge,
246 VkSampler clamp_to_border_black,
247 struct gfx_layer_view_state *state)
248{
249 const struct xrt_layer_data *layer_data = &layer->data;
250 const struct xrt_layer_cylinder_data *c = &layer_data->cylinder;
251 const uint32_t array_index = c->sub.array_index;
252 const struct comp_swapchain_image *image = get_layer_image(layer, 0, c->sub.image_index);
253
254 struct vk_bundle *vk = render->r->vk;
255 VkResult ret;
256
257 // Color
258 VkSampler src_sampler = clamp_to_edge; // WIP: Is this correct?
259 VkImageView src_image_view = get_image_view(image, layer_data->flags, array_index);
260
261 // Fully initialised below.
262 struct render_gfx_layer_cylinder_data data;
263
264 // Used for Subimage and OpenGL flip.
265 set_post_transform_rect( //
266 layer_data, // data
267 &c->sub.norm_rect, // src_norm_rect
268 false, // invert_flip
269 &data.post_transform); // out_norm_rect
270
271 // Shared scale for all paths.
272 struct xrt_vec3 scale = {1, 1, 1};
273
274 // Handle infinite radius.
275 if (c->radius == 0 || c->radius == INFINITY) {
276 // Use rotation only to center the cylinder on the eye.
277 calc_mvp_rot_only(state, layer_data, &c->pose, &scale, &data.mvp);
278 data.radius = 1.0; // Fixed radius at one.
279 data.central_angle = c->central_angle;
280 data.aspect_ratio = c->aspect_ratio;
281 } else {
282 calc_mvp_full(state, layer_data, &c->pose, &scale, &data.mvp);
283 data.radius = c->radius;
284 data.central_angle = c->central_angle;
285 data.aspect_ratio = c->aspect_ratio;
286 }
287
288 // Can fail if we have too many layers.
289 VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
290 ret = render_gfx_layer_cylinder_alloc_and_write( //
291 render, //
292 &data, //
293 src_sampler, //
294 src_image_view, //
295 &descriptor_set); // out_descriptor_set
296 VK_CHK_AND_RET(ret, "render_gfx_layer_quad_alloc_and_write");
297
298 VK_NAME_DESCRIPTOR_SET(vk, descriptor_set, "render_gfx layer quad descriptor set");
299
300 add_layer(state, layer_data, descriptor_set);
301
302 return VK_SUCCESS;
303}
304
305/// Data setup for an "equirect2" layer
306/// Also allocates and writes a descriptor set!
307static VkResult
308do_equirect2_layer(struct render_gfx *render,
309 const struct comp_layer *layer,
310 uint32_t view_index,
311 VkSampler clamp_to_edge,
312 VkSampler clamp_to_border_black,
313 struct gfx_layer_view_state *state)
314{
315 const struct xrt_layer_data *layer_data = &layer->data;
316 const struct xrt_layer_equirect2_data *eq2 = &layer_data->equirect2;
317 const uint32_t array_index = eq2->sub.array_index;
318 const struct comp_swapchain_image *image = get_layer_image(layer, 0, eq2->sub.image_index);
319
320 struct vk_bundle *vk = render->r->vk;
321 VkResult ret;
322
323 // Color
324 VkSampler src_sampler = clamp_to_edge;
325 VkImageView src_image_view = get_image_view(image, layer_data->flags, array_index);
326
327 // Fully initialised below.
328 struct render_gfx_layer_equirect2_data data;
329
330 // Used for Subimage and OpenGL flip.
331 set_post_transform_rect( //
332 layer_data, // data
333 &eq2->sub.norm_rect, // src_norm_rect
334 false, // invert_flip
335 &data.post_transform); // out_norm_rect
336
337 struct xrt_vec3 scale = {1.f, 1.f, 1.f};
338 calc_mv_inv_full(state, layer_data, &eq2->pose, &scale, &data.mv_inverse);
339
340 // Make it possible to go tangent lengths.
341 data.to_tangent = state->to_tangent;
342
343 // Simplifies the shader.
344 if (eq2->radius >= INFINITY) {
345 data.radius = 0.0;
346 } else {
347 data.radius = eq2->radius;
348 }
349
350 data.central_horizontal_angle = eq2->central_horizontal_angle;
351 data.upper_vertical_angle = eq2->upper_vertical_angle;
352 data.lower_vertical_angle = eq2->lower_vertical_angle;
353
354 // Can fail if we have too many layers.
355 VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
356 ret = render_gfx_layer_equirect2_alloc_and_write( //
357 render, //
358 &data, //
359 src_sampler, //
360 src_image_view, //
361 &descriptor_set); // out_descriptor_set
362 VK_CHK_AND_RET(ret, "render_gfx_layer_quad_alloc_and_write");
363
364 VK_NAME_DESCRIPTOR_SET(vk, descriptor_set, "render_gfx layer quad descriptor set");
365
366 add_layer(state, layer_data, descriptor_set);
367
368 return VK_SUCCESS;
369}
370
371/// Data setup for a projection layer
372/// Also allocates and writes a descriptor set!
373static VkResult
374do_projection_layer(struct render_gfx *render,
375 const struct comp_layer *layer,
376 uint32_t view_index,
377 VkSampler clamp_to_edge,
378 VkSampler clamp_to_border_black,
379 struct gfx_layer_view_state *state)
380{
381 const struct xrt_layer_data *layer_data = &layer->data;
382 const struct xrt_layer_projection_view_data *vd = NULL;
383 const struct xrt_layer_depth_data *dvd = NULL;
384
385 if (layer_data->type == XRT_LAYER_PROJECTION) {
386 view_index_to_projection_data(view_index, layer_data, &vd);
387 } else {
388 view_index_to_depth_data(view_index, layer_data, &vd, &dvd);
389 }
390
391 uint32_t sc_array_index = is_view_index_right(view_index) ? 1 : 0;
392 uint32_t array_index = vd->sub.array_index;
393 const struct comp_swapchain_image *image = get_layer_image(layer, sc_array_index, vd->sub.image_index);
394
395 struct vk_bundle *vk = render->r->vk;
396 VkResult ret;
397 // Color
398 VkSampler src_sampler = clamp_to_border_black;
399 VkImageView src_image_view = get_image_view(image, layer_data->flags, array_index);
400
401 // Fully initialised below.
402 struct render_gfx_layer_projection_data data;
403
404 // Used for Subimage and OpenGL flip.
405 set_post_transform_rect( //
406 layer_data, // data
407 &vd->sub.norm_rect, // src_norm_rect
408 false, // invert_flip
409 &data.post_transform); // out_norm_rect
410
411 // Used to go from UV to tangent space.
412 render_calc_uv_to_tangent_lengths_rect(&vd->fov, &data.to_tanget);
413
414 // Create MVP matrix, rotation only so we get 3dof timewarp.
415 struct xrt_vec3 scale = {1, 1, 1};
416 calc_mvp_rot_only(state, layer_data, &vd->pose, &scale, &data.mvp);
417
418 // Can fail if we have too many layers.
419 VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
420 ret = render_gfx_layer_projection_alloc_and_write( //
421 render, //
422 &data, //
423 src_sampler, //
424 src_image_view, //
425 &descriptor_set); // out_descriptor_set
426 VK_CHK_AND_RET(ret, "render_gfx_layer_projection_alloc_and_write");
427
428 VK_NAME_DESCRIPTOR_SET(vk, descriptor_set, "render_gfx layer proj descriptor set");
429
430 add_layer(state, layer_data, descriptor_set);
431
432 return VK_SUCCESS;
433}
434
435/// Data setup for a quad layer
436/// Also allocates and writes a descriptor set!
437static VkResult
438do_quad_layer(struct render_gfx *render,
439 const struct comp_layer *layer,
440 uint32_t view_index,
441 VkSampler clamp_to_edge,
442 VkSampler clamp_to_border_black,
443 struct gfx_layer_view_state *state)
444{
445 const struct xrt_layer_data *layer_data = &layer->data;
446 const struct xrt_layer_quad_data *q = &layer_data->quad;
447 const uint32_t array_index = q->sub.array_index;
448 const struct comp_swapchain_image *image = get_layer_image(layer, 0, q->sub.image_index);
449
450 struct vk_bundle *vk = render->r->vk;
451 VkResult ret;
452
453 // Color
454 VkSampler src_sampler = clamp_to_edge;
455 VkImageView src_image_view = get_image_view(image, layer_data->flags, array_index);
456
457 // Fully initialised below.
458 struct render_gfx_layer_quad_data data;
459
460 // Used for Subimage and OpenGL flip.
461 set_post_transform_rect( //
462 layer_data, // data
463 &q->sub.norm_rect, // src_norm_rect
464 false, // invert_flip
465 &data.post_transform); // out_norm_rect
466
467 // Create MVP matrix, full 6dof mvp needed.
468 struct xrt_vec3 scale = {q->size.x, q->size.y, 1};
469 calc_mvp_full(state, layer_data, &q->pose, &scale, &data.mvp);
470
471 // Can fail if we have too many layers.
472 VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
473 ret = render_gfx_layer_quad_alloc_and_write( //
474 render, //
475 &data, //
476 src_sampler, //
477 src_image_view, //
478 &descriptor_set); // out_descriptor_set
479 VK_CHK_AND_RET(ret, "render_gfx_layer_quad_alloc_and_write");
480
481 VK_NAME_DESCRIPTOR_SET(vk, descriptor_set, "render_gfx layer quad descriptor set");
482
483 add_layer(state, layer_data, descriptor_set);
484
485 return VK_SUCCESS;
486}
487
488static void
489crg_clear_output(struct render_gfx *render, const struct comp_render_dispatch_data *d)
490{
491 render_gfx_begin_target( //
492 render, //
493 d->target.gfx.rtr, //
494 &background_color_idle); //
495
496 render_gfx_end_target(render);
497}
498
499/*
500 *
501 * Graphics distortion helpers.
502 *
503 */
504
505/// Used in both fast-path and layer-squashed routes
506static void
507crg_distortion_common(struct render_gfx *render,
508 bool do_timewarp,
509 const struct gfx_mesh_data *md,
510 const struct comp_render_dispatch_data *d)
511{
512 struct vk_bundle *vk = render->r->vk;
513 VkResult ret;
514
515 /*
516 * Reserve UBOs, create descriptor sets, and fill in any data ahead of
517 * time. If we ever want to copy UBO data this lets us do that easily:
518 * write a copy command before the other gfx commands.
519 */
520
521 struct gfx_mesh_state ms = XRT_STRUCT_INIT;
522
523 for (uint32_t i = 0; i < d->target.view_count; i++) {
524
525 struct render_gfx_mesh_ubo_data data = {
526 .vertex_rot = d->views[i].target.gfx.vertex_rot,
527 .post_transform = md->views[i].src_norm_rect,
528 };
529
530 // Extra arguments for timewarp.
531 if (do_timewarp) {
532 data.pre_transform = d->views[i].pre_transform;
533
534 render_calc_time_warp_matrix( //
535 &md->views[i].src_pose, //
536 &md->views[i].src_fov, //
537 &d->views[i].world_pose_scanout_begin, //
538 &data.transform); //
539 }
540
541 ret = render_gfx_mesh_alloc_and_write( //
542 render, //
543 &data, //
544 md->views[i].src_sampler, //
545 md->views[i].src_image_view, //
546 &ms.descriptor_sets[i]); //
547 VK_CHK_WITH_GOTO(ret, "render_gfx_mesh_alloc", err_no_memory);
548
549 VK_NAME_DESCRIPTOR_SET(vk, ms.descriptor_sets[i], "render_gfx mesh descriptor sets");
550 }
551
552
553 /*
554 * Do command writing here.
555 */
556
557 render_gfx_begin_target( //
558 render, //
559 d->target.gfx.rtr, //
560 &background_color_active); //
561
562 for (uint32_t i = 0; i < d->target.view_count; i++) {
563 // Convenience.
564 const struct render_viewport_data *viewport_data = &d->views[i].target.viewport_data;
565
566 render_gfx_begin_view( //
567 render, //
568 i, // view_index
569 viewport_data); //
570
571 render_gfx_mesh_draw( //
572 render, //
573 i, // mesh_index
574 ms.descriptor_sets[i], //
575 do_timewarp); //
576
577 render_gfx_end_view(render);
578 }
579
580 render_gfx_end_target(render);
581
582 return;
583
584err_no_memory:
585 // Allocator reset at end of frame, nothing to clean up.
586 VK_ERROR(vk, "Could not allocate all UBOs for frame, that's really strange and shouldn't happen!");
587}
588
589/// For use after squashing layers
590static void
591crg_distortion_after_squash(struct render_gfx *render, const struct comp_render_dispatch_data *d)
592{
593
594 // Shared between all views.
595 VkSampler clamp_to_border_black = render->r->samplers.clamp_to_border_black;
596
597 struct gfx_mesh_data md = XRT_STRUCT_INIT;
598 for (uint32_t i = 0; i < d->target.view_count; i++) {
599 struct xrt_pose src_pose = d->views[i].world_pose_scanout_begin;
600 struct xrt_fov src_fov = d->views[i].fov;
601 VkImageView src_image_view = d->views[i].squash_as_src.sample_view;
602 struct xrt_normalized_rect src_norm_rect = d->views[i].squash_as_src.norm_rect;
603
604 gfx_mesh_add_view( //
605 &md, //
606 i, // view_index
607 &src_pose, //
608 &src_fov, //
609 &src_norm_rect, //
610 clamp_to_border_black, // src_sampler
611 src_image_view); //
612 }
613
614 // We are passing in the same old and new poses.
615 crg_distortion_common( //
616 render, //
617 false, // do_timewarp
618 &md, //
619 d); //
620}
621
622/// Fast path
623static void
624crg_distortion_fast_path(struct render_gfx *render,
625 const struct comp_render_dispatch_data *d,
626 const struct comp_layer *layer,
627 const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS])
628{
629 const struct xrt_layer_data *data = &layer->data;
630
631 const VkSampler clamp_to_border_black = render->r->samplers.clamp_to_border_black;
632
633 struct gfx_mesh_data md = XRT_STRUCT_INIT;
634 for (uint32_t i = 0; i < d->target.view_count; i++) {
635 const uint32_t array_index = vds[i]->sub.array_index;
636
637 const struct comp_swapchain_image *image = get_layer_image(layer, i, vds[i]->sub.image_index);
638
639 struct xrt_pose src_pose;
640 struct xrt_fov src_fov;
641 struct xrt_normalized_rect src_norm_rect;
642
643 src_pose = vds[i]->pose;
644 src_fov = vds[i]->fov;
645 src_norm_rect = vds[i]->sub.norm_rect;
646 const VkImageView src_image_view = get_image_view(image, data->flags, array_index);
647
648 if (data->flip_y) {
649 src_norm_rect.y += src_norm_rect.h;
650 src_norm_rect.h = -src_norm_rect.h;
651 }
652
653 gfx_mesh_add_view( //
654 &md, // md
655 i, // view_index
656 &src_pose, // src_pose
657 &src_fov, // src_fov
658 &src_norm_rect, // src_norm_rect
659 clamp_to_border_black, // src_sampler
660 src_image_view); // src_image_view
661 }
662
663 crg_distortion_common( //
664 render, //
665 d->do_timewarp, //
666 &md, //
667 d); //
668}
669
670
671/*
672 *
673 * 'Exported' function(s).
674 *
675 */
676
677void
678comp_render_gfx_layers(struct render_gfx *render,
679 const struct comp_layer *layers,
680 uint32_t layer_count,
681 const struct comp_render_dispatch_data *d,
682 VkImageLayout transition_to)
683{
684 COMP_TRACE_MARKER();
685
686 struct vk_bundle *vk = render->r->vk;
687 VkResult ret;
688
689 struct gfx_layer_state ls = XRT_STRUCT_INIT;
690
691 // Compute MVP matrices per eye: populates gfx_layer_view_state elements in `ls`
692 // from `comp_render_dispatch_data *d`
693 for (uint32_t view = 0; view < d->squash_view_count; view++) {
694
695 // Data for this view, convenience.
696 const struct xrt_pose world_pose = d->views[view].world_pose_scanout_begin;
697 const struct xrt_pose eye_pose = d->views[view].eye_pose;
698 const struct xrt_fov new_fov = d->views[view].fov;
699
700 // Current state we are writing to.
701 struct gfx_layer_view_state *state = &ls.views[view];
702
703 // Used to go from UV to tangent space.
704 render_calc_uv_to_tangent_lengths_rect(&new_fov, &state->to_tangent);
705
706 // Projection
707 struct xrt_matrix_4x4 p;
708 math_matrix_4x4_projection_vulkan_infinite_reverse(&new_fov, 0.1, &p);
709
710 // Reused view matrix.
711 struct xrt_matrix_4x4 v;
712
713 // World
714 math_matrix_4x4_view_from_pose(&world_pose, &v);
715 math_matrix_4x4_multiply(&p, &v, &state->world_vp_full);
716 math_matrix_4x4_inverse(&v, &state->world_v_inv_full);
717
718 struct xrt_pose world_rot_only = {world_pose.orientation, XRT_VEC3_ZERO};
719 math_matrix_4x4_view_from_pose(&world_rot_only, &v);
720 math_matrix_4x4_multiply(&p, &v, &state->world_vp_rot_only);
721
722 // Eye
723 math_matrix_4x4_view_from_pose(&eye_pose, &v);
724 math_matrix_4x4_multiply(&p, &v, &state->eye_vp_full);
725 math_matrix_4x4_inverse(&v, &state->eye_v_inv_full);
726
727 struct xrt_pose eye_rot_only = {eye_pose.orientation, XRT_VEC3_ZERO};
728 math_matrix_4x4_view_from_pose(&eye_rot_only, &v);
729 math_matrix_4x4_multiply(&p, &v, &state->eye_vp_rot_only);
730 }
731
732 /*
733 * Reserve UBOs, create descriptor sets, and fill in any data ahead of
734 * time. If we ever want to copy UBO data this lets us do that easily:
735 * write a copy command before the other gfx commands.
736 */
737
738 assert(layer_count <= RENDER_MAX_LAYERS && "Too many layers");
739
740 VkSampler clamp_to_edge = render->r->samplers.clamp_to_edge;
741 VkSampler clamp_to_border_black = render->r->samplers.clamp_to_border_black;
742
743 for (uint32_t view = 0; view < d->squash_view_count; view++) {
744
745 // Source for data and written to as well, read and write.
746 struct gfx_layer_view_state *state = &ls.views[view];
747
748 for (uint32_t i = 0; i < layer_count; i++) {
749 const struct xrt_layer_data *data = &layers[i].data;
750 if (!is_layer_view_visible(data, view)) {
751 continue;
752 }
753
754 switch (data->type) {
755 case XRT_LAYER_CYLINDER:
756 ret = do_cylinder_layer( //
757 render, //
758 &layers[i], //
759 view, // view_index
760 clamp_to_edge, //
761 clamp_to_border_black, //
762 state); //
763 VK_CHK_WITH_GOTO(ret, "do_cylinder_layer", err_layer);
764 break;
765 case XRT_LAYER_EQUIRECT2:
766 ret = do_equirect2_layer( //
767 render, //
768 &layers[i], //
769 view, // view_index
770 clamp_to_edge, //
771 clamp_to_border_black, //
772 state); //
773 VK_CHK_WITH_GOTO(ret, "do_equirect2_layer", err_layer);
774 break;
775 case XRT_LAYER_PROJECTION:
776 case XRT_LAYER_PROJECTION_DEPTH:
777 ret = do_projection_layer( //
778 render, //
779 &layers[i], //
780 view, // view_index
781 clamp_to_edge, //
782 clamp_to_border_black, //
783 state); //
784 VK_CHK_WITH_GOTO(ret, "do_projection_layer", err_layer);
785 break;
786 case XRT_LAYER_QUAD:
787 ret = do_quad_layer( //
788 render, //
789 &layers[i], //
790 view, // view_index
791 clamp_to_edge, //
792 clamp_to_border_black, //
793 state); //
794 VK_CHK_WITH_GOTO(ret, "do_quad_layer", err_layer);
795 break;
796 default: break;
797 }
798 }
799 }
800
801
802 /*
803 * Do command writing here.
804 */
805
806 const VkClearColorValue *color = layer_count == 0 ? &background_color_idle : &background_color_active;
807
808 for (uint32_t view = 0; view < d->squash_view_count; view++) {
809
810 // Convenience.
811 const struct render_viewport_data *viewport_data = &d->views[view].squash.viewport_data;
812
813 render_gfx_begin_target( //
814 render, //
815 d->views[view].squash.gfx.rtr, //
816 color); //
817
818 render_gfx_begin_view( //
819 render, //
820 view, // view_index
821 viewport_data); // viewport_data
822
823 // Only source for data here, read only.
824 const struct gfx_layer_view_state *state = &ls.views[view];
825
826 for (uint32_t i = 0; i < state->layer_count; i++) {
827 switch (state->types[i]) {
828 case XRT_LAYER_CYLINDER:
829 render_gfx_layer_cylinder( //
830 render, //
831 state->premultiplied_alphas[i], //
832 state->descriptor_sets[i]); //
833 break;
834 case XRT_LAYER_EQUIRECT2:
835 render_gfx_layer_equirect2( //
836 render, //
837 state->premultiplied_alphas[i], //
838 state->descriptor_sets[i]); //
839 break;
840 case XRT_LAYER_PROJECTION:
841 case XRT_LAYER_PROJECTION_DEPTH:
842 render_gfx_layer_projection( //
843 render, //
844 state->premultiplied_alphas[i], //
845 state->descriptor_sets[i]); //
846 break;
847 case XRT_LAYER_QUAD:
848 render_gfx_layer_quad( //
849 render, //
850 state->premultiplied_alphas[i], //
851 state->descriptor_sets[i]); //
852 break;
853 default: break;
854 }
855 }
856
857 render_gfx_end_view(render);
858
859 render_gfx_end_target(render);
860 }
861
862
863 cmd_barrier_view_squash_images( //
864 render->r->vk, //
865 d, //
866 render->r->cmd, // cmd
867 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // src_access_mask
868 VK_ACCESS_SHADER_READ_BIT, // dst_access_mask
869 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // transition_from
870 transition_to, //
871 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // src_stage_mask
872 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); // dst_stage_mask
873
874 return;
875
876err_layer:
877 // Allocator reset at end of frame, nothing to clean up.
878 VK_ERROR(vk, "Layer processing failed, that shouldn't happen!");
879}
880
881
882
883void
884comp_render_gfx_dispatch(struct render_gfx *render,
885 const struct comp_layer *layers,
886 const uint32_t layer_count,
887 const struct comp_render_dispatch_data *d)
888{
889 if (!d->target.initialized) {
890 VK_ERROR(render->r->vk, "Target hasn't been initialized, not rendering anything.");
891 assert(d->target.initialized);
892 return;
893 }
894
895 // Convenience.
896 bool fast_path = d->fast_path;
897
898 // Only used if fast_path is true.
899 const struct comp_layer *layer = &layers[0];
900
901 // Consistency check.
902 assert(!fast_path || layer_count >= 1);
903
904 // We want to read from the images afterwards.
905 VkImageLayout transition_to = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
906
907 if (fast_path && layer->data.type == XRT_LAYER_PROJECTION) {
908 // Fast path.
909 const struct xrt_layer_projection_data *proj = &layer->data.proj;
910 const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS];
911 for (uint32_t view = 0; view < d->target.view_count; ++view) {
912 vds[view] = &proj->v[view];
913 }
914 crg_distortion_fast_path( //
915 render, //
916 d, //
917 layer, //
918 vds); //
919
920 } else if (fast_path && layer->data.type == XRT_LAYER_PROJECTION_DEPTH) {
921 // Fast path.
922 const struct xrt_layer_projection_depth_data *depth = &layer->data.depth;
923 const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS];
924 for (uint32_t view = 0; view < d->target.view_count; ++view) {
925 vds[view] = &depth->v[view];
926 }
927 crg_distortion_fast_path( //
928 render, //
929 d, //
930 layer, //
931 vds); //
932
933 } else if (layer_count > 0) {
934 // Graphics layer squasher
935 if (fast_path) {
936 U_LOG_W("Wanted fast path but no projection layer, falling back to layer squasher.");
937 }
938
939 /*
940 * Layer squashing.
941 */
942 comp_render_gfx_layers( //
943 render, //
944 layers, //
945 layer_count, //
946 d, //
947 transition_to); //
948
949 /*
950 * Distortion.
951 */
952 crg_distortion_after_squash( //
953 render, //
954 d);
955
956 } else {
957 // Just clear the screen
958 crg_clear_output( //
959 render, //
960 d); //
961 }
962}