The open source OpenXR runtime
1// Copyright 2020, Collabora, Ltd.
2// Copyright 2024-2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Abstracted compositor rendering target.
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @ingroup comp_main
9 */
10
11#pragma once
12
13#include "xrt/xrt_compiler.h"
14#include "xrt/xrt_defines.h"
15
16#include "vk/vk_helpers.h"
17
18#include "util/u_trace_marker.h"
19
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
25
26/*!
27 * For marking timepoints on a frame's lifetime, not a async event.
28 *
29 * @ingroup comp_main
30 */
31enum comp_target_timing_point
32{
33 //! Woke up after sleeping in wait frame.
34 COMP_TARGET_TIMING_POINT_WAKE_UP,
35
36 //! Began CPU side work for GPU.
37 COMP_TARGET_TIMING_POINT_BEGIN,
38
39 //! Just before submitting work to the GPU.
40 COMP_TARGET_TIMING_POINT_SUBMIT_BEGIN,
41
42 //! Just after submitting work to the GPU.
43 COMP_TARGET_TIMING_POINT_SUBMIT_END,
44};
45
46/*!
47 * If the target should use the display timing information.
48 *
49 * @ingroup comp_main
50 */
51enum comp_target_display_timing_usage
52{
53 COMP_TARGET_FORCE_FAKE_DISPLAY_TIMING = 0,
54 COMP_TARGET_USE_DISPLAY_IF_AVAILABLE = 1,
55};
56
57/*!
58 * Image and view pair for @ref comp_target.
59 *
60 * @ingroup comp_main
61 */
62struct comp_target_image
63{
64 VkImage handle;
65 VkImageView view;
66};
67
68/*!
69 * Information given in when creating the swapchain images,
70 * argument to @ref comp_target_create_images.
71 *
72 * @ingroup comp_main
73 */
74struct comp_target_create_images_info
75{
76 //! Image usage for the images, must be followed.
77 VkImageUsageFlags image_usage;
78
79 //! Acceptable formats for the images, must be followed.
80 VkFormat formats[XRT_MAX_SWAPCHAIN_FORMATS];
81
82 // Number of formats.
83 uint32_t format_count;
84
85 //! Preferred extent, can be ignored by the target.
86 VkExtent2D extent;
87
88 //! Preferred color space, can be ignored by the target.
89 VkColorSpaceKHR color_space;
90
91 // Preferred present_mode, can be ignored by the target.
92 VkPresentModeKHR present_mode;
93};
94
95/*!
96 * Collection of semaphores needed for a target.
97 *
98 * @ingroup comp_main
99 */
100struct comp_target_semaphores
101{
102 /*!
103 * Optional semaphore the target should signal when present is complete.
104 */
105 VkSemaphore present_complete;
106
107 /*!
108 * Semaphore the renderer (consuming this target)
109 * should signal when rendering is complete.
110 */
111 VkSemaphore render_complete;
112
113 /*!
114 * If true, @ref render_complete is a timeline
115 * semaphore instead of a binary semaphore.
116 */
117 bool render_complete_is_timeline;
118};
119
120/*!
121 * @brief A compositor target: where the compositor renders to.
122 *
123 * A target is essentially a swapchain, but it is such a overloaded term so
124 * we are differentiating swapchains that the compositor provides to clients and
125 * swapchains that the compositor renders by naming the latter to target.
126 *
127 * For design purposes, when amending this interface, remember that targets may not necessarily be backed by a
128 * swapchain in all cases, for instance with remote rendering.
129 *
130 * @ingroup comp_main
131 */
132struct comp_target
133{
134 //! Owning compositor.
135 struct comp_compositor *c;
136
137 //! Name of the backing system.
138 const char *name;
139
140 //! Current dimensions of the target.
141 uint32_t width, height;
142
143 //! The format that the renderpass targeting this target should use.
144 VkFormat format;
145
146 //! The final layout that the renderpass should leave this target in.
147 VkImageLayout final_layout;
148
149 //! Number of images that this target has.
150 uint32_t image_count;
151 //! Array of images and image views for rendering.
152 struct comp_target_image *images;
153
154 //! Transformation of the current surface, required for pre-rotation
155 VkSurfaceTransformFlagBitsKHR surface_transform;
156
157 //! Holds semaphore information.
158 struct comp_target_semaphores semaphores;
159
160 //! Whether wait_for_present is supported by this comp_target.
161 bool wait_for_present_supported;
162
163 /*
164 *
165 * Vulkan functions.
166 *
167 */
168
169 /*!
170 * Do any initialization that is required to happen before Vulkan has
171 * been loaded.
172 */
173 bool (*init_pre_vulkan)(struct comp_target *ct);
174
175 /*!
176 * Do any initialization that requires Vulkan to be loaded, you need to
177 * call @ref create_images after calling this function.
178 */
179 bool (*init_post_vulkan)(struct comp_target *ct, uint32_t preferred_width, uint32_t preferred_height);
180
181 /*!
182 * Is this target ready for image creation?
183 *
184 * Call before calling @ref create_images
185 */
186 bool (*check_ready)(struct comp_target *ct);
187
188 /*!
189 * Create or recreate the image(s) of the target, for swapchain based
190 * targets this will (re)create the swapchain.
191 *
192 * @pre @ref check_ready returns true
193 */
194 void (*create_images)(struct comp_target *ct, const struct comp_target_create_images_info *create_info);
195
196 /*!
197 * Has this target successfully had images created?
198 *
199 * Call before calling @ref acquire - if false but @ref check_ready is
200 * true, you'll need to call @ref create_images.
201 */
202 bool (*has_images)(struct comp_target *ct);
203
204 /*!
205 * Acquire the next image for rendering.
206 *
207 * If @ref comp_target_semaphores::present_complete is not null,
208 * your use of this image should wait on it..
209 *
210 * @pre @ref has_images() returns true
211 */
212 VkResult (*acquire)(struct comp_target *ct, uint32_t *out_index);
213
214 /*!
215 * Present the image at index to the screen.
216 *
217 * @pre @ref acquire succeeded for the same @p semaphore and @p index you are passing
218 *
219 * @param ct self
220 * @param queue The Vulkan queue being used
221 * @param index The swapchain image index to present
222 * @param timeline_semaphore_value The value to await on @ref comp_target_semaphores::render_complete
223 * if @ref comp_target_semaphores::render_complete_is_timeline is true.
224 * @param desired_present_time_ns The timestamp to present at, ideally.
225 * @param present_slop_ns TODO
226 */
227 VkResult (*present)(struct comp_target *ct,
228 VkQueue queue,
229 uint32_t index,
230 uint64_t timeline_semaphore_value,
231 int64_t desired_present_time_ns,
232 int64_t present_slop_ns);
233
234 /*!
235 * Wait for the latest presented image to be displayed to the user.
236 *
237 * @param ct self
238 * @param timeout_ns The amount of time to wait for presentation to succeed.
239 */
240 VkResult (*wait_for_present)(struct comp_target *ct, time_duration_ns timeout_ns);
241
242 /*!
243 * Flush any WSI state before rendering.
244 */
245 void (*flush)(struct comp_target *ct);
246
247
248 /*
249 *
250 * Timing functions.
251 *
252 */
253
254 /*!
255 * Predict when the next frame should be started and when it will be
256 * turned into photons by the hardware.
257 */
258 void (*calc_frame_pacing)(struct comp_target *ct,
259 int64_t *out_frame_id,
260 int64_t *out_wake_up_time_ns,
261 int64_t *out_desired_present_time_ns,
262 int64_t *out_present_slop_ns,
263 int64_t *out_predicted_display_time_ns);
264
265 /*!
266 * The compositor tells the target a timing information about a single
267 * timing point on the frames lifecycle.
268 */
269 void (*mark_timing_point)(struct comp_target *ct,
270 enum comp_target_timing_point point,
271 int64_t frame_id,
272 int64_t when_ns);
273
274 /*!
275 * Update timing information for this target, this function should be
276 * lightweight and is called multiple times during a frame to make sure
277 * that we get the timing data as soon as possible.
278 */
279 VkResult (*update_timings)(struct comp_target *ct);
280
281 /*!
282 * Provide frame timing information about GPU start and stop time.
283 *
284 * Depend on when the information is delivered this can be called at any
285 * point of the following frames.
286 *
287 * @param[in] ct The compositor target.
288 * @param[in] frame_id The frame ID to record for.
289 * @param[in] gpu_start_ns When the GPU work startred.
290 * @param[in] gpu_end_ns When the GPU work stopped.
291 * @param[in] when_ns When the informatioon collected, nominally
292 * from @ref os_monotonic_get_ns.
293 *
294 * @see @ref frame-pacing.
295 */
296 void (*info_gpu)(
297 struct comp_target *ct, int64_t frame_id, int64_t gpu_start_ns, int64_t gpu_end_ns, int64_t when_ns);
298
299 /*
300 *
301 * Misc functions.
302 *
303 */
304
305 /*!
306 * If the target can show a title (like a window) set the title.
307 */
308 void (*set_title)(struct comp_target *ct, const char *title);
309
310 /*!
311 * Get the available refresh rates for the compositor target
312 *
313 * @param ct The compositor target.
314 * @param count The number or refresh rates.
315 * @param refresh_rates_hz The refresh rates, in Hz. Must be allocated by caller, and have at least
316 * XRT_MAX_SUPPORTED_REFRESH_RATES elements
317 */
318 xrt_result_t (*get_refresh_rates)(struct comp_target *ct,
319 uint32_t *out_count,
320 float *out_display_refresh_rates_hz);
321
322 /*!
323 * Get the current refresh rate for the compositor target
324 *
325 * @param ct The compositor target.
326 * @param out_display_refresh_rate_hz The current refresh rate, in Hz
327 */
328 xrt_result_t (*get_current_refresh_rate)(struct comp_target *ct, float *out_display_refresh_rate_hz);
329
330 /*!
331 * Get the current refresh rate for the compositor target
332 *
333 * @param ct The compositor target.
334 * @param display_refresh_rate_hz The requested refresh rate, in Hz.
335 */
336 xrt_result_t (*request_refresh_rate)(struct comp_target *ct, float display_refresh_rate_hz);
337
338
339 /*!
340 * Destroys this target.
341 */
342 void (*destroy)(struct comp_target *ct);
343};
344
345/*!
346 * @copydoc comp_target::init_pre_vulkan
347 *
348 * @public @memberof comp_target
349 * @ingroup comp_main
350 */
351static inline bool
352comp_target_init_pre_vulkan(struct comp_target *ct)
353{
354 COMP_TRACE_MARKER();
355
356 return ct->init_pre_vulkan(ct);
357}
358
359/*!
360 * @copydoc comp_target::init_post_vulkan
361 *
362 * @public @memberof comp_target
363 * @ingroup comp_main
364 */
365static inline bool
366comp_target_init_post_vulkan(struct comp_target *ct, uint32_t preferred_width, uint32_t preferred_height)
367{
368 COMP_TRACE_MARKER();
369
370 return ct->init_post_vulkan(ct, preferred_width, preferred_height);
371}
372
373/*!
374 * @copydoc comp_target::check_ready
375 *
376 * @public @memberof comp_target
377 * @ingroup comp_main
378 */
379static inline bool
380comp_target_check_ready(struct comp_target *ct)
381{
382 COMP_TRACE_MARKER();
383
384 return ct->check_ready(ct);
385}
386
387/*!
388 * @copydoc comp_target::create_images
389 *
390 * @public @memberof comp_target
391 * @ingroup comp_main
392 */
393static inline void
394comp_target_create_images(struct comp_target *ct, const struct comp_target_create_images_info *create_info)
395{
396 COMP_TRACE_MARKER();
397
398 ct->create_images(ct, create_info);
399}
400
401/*!
402 * @copydoc comp_target::has_images
403 *
404 * @public @memberof comp_target
405 * @ingroup comp_main
406 */
407static inline bool
408comp_target_has_images(struct comp_target *ct)
409{
410 COMP_TRACE_MARKER();
411
412 return ct->has_images(ct);
413}
414
415/*!
416 * @copydoc comp_target::acquire
417 *
418 * @public @memberof comp_target
419 * @ingroup comp_main
420 */
421static inline VkResult
422comp_target_acquire(struct comp_target *ct, uint32_t *out_index)
423{
424 COMP_TRACE_MARKER();
425
426 return ct->acquire(ct, out_index);
427}
428
429/*!
430 * @copydoc comp_target::present
431 *
432 * @public @memberof comp_target
433 * @ingroup comp_main
434 */
435static inline VkResult
436comp_target_present(struct comp_target *ct,
437 VkQueue queue,
438 uint32_t index,
439 uint64_t timeline_semaphore_value,
440 int64_t desired_present_time_ns,
441 int64_t present_slop_ns)
442
443{
444 COMP_TRACE_MARKER();
445
446 return ct->present( //
447 ct, //
448 queue, //
449 index, //
450 timeline_semaphore_value, //
451 desired_present_time_ns, //
452 present_slop_ns); //
453}
454
455/*!
456 * @copydoc comp_target::wait_for_present
457 *
458 * @public @memberof comp_target
459 * @ingroup comp_main
460 */
461static inline VkResult
462comp_target_wait_for_present(struct comp_target *ct, time_duration_ns timeout)
463{
464 COMP_TRACE_MARKER();
465
466 return ct->wait_for_present( //
467 ct, //
468 timeout); //
469}
470
471/*!
472 * @copydoc comp_target::flush
473 *
474 * @public @memberof comp_target
475 * @ingroup comp_main
476 */
477static inline void
478comp_target_flush(struct comp_target *ct)
479{
480 COMP_TRACE_MARKER();
481
482 ct->flush(ct);
483}
484
485/*!
486 * @copydoc comp_target::calc_frame_pacing
487 *
488 * @public @memberof comp_target
489 * @ingroup comp_main
490 */
491static inline void
492comp_target_calc_frame_pacing(struct comp_target *ct,
493 int64_t *out_frame_id,
494 int64_t *out_wake_up_time_ns,
495 int64_t *out_desired_present_time_ns,
496 int64_t *out_present_slop_ns,
497 int64_t *out_predicted_display_time_ns)
498{
499 COMP_TRACE_MARKER();
500
501 ct->calc_frame_pacing( //
502 ct, //
503 out_frame_id, //
504 out_wake_up_time_ns, //
505 out_desired_present_time_ns, //
506 out_present_slop_ns, //
507 out_predicted_display_time_ns); //
508}
509
510/*!
511 * Quick helper for marking wake up.
512 * @copydoc comp_target::mark_timing_point
513 *
514 * @public @memberof comp_target
515 * @ingroup comp_main
516 */
517static inline void
518comp_target_mark_wake_up(struct comp_target *ct, int64_t frame_id, int64_t when_woke_ns)
519{
520 COMP_TRACE_MARKER();
521
522 ct->mark_timing_point(ct, COMP_TARGET_TIMING_POINT_WAKE_UP, frame_id, when_woke_ns);
523}
524
525/*!
526 * Quick helper for marking begin.
527 * @copydoc comp_target::mark_timing_point
528 *
529 * @public @memberof comp_target
530 * @ingroup comp_main
531 */
532static inline void
533comp_target_mark_begin(struct comp_target *ct, int64_t frame_id, int64_t when_began_ns)
534{
535 COMP_TRACE_MARKER();
536
537 ct->mark_timing_point(ct, COMP_TARGET_TIMING_POINT_BEGIN, frame_id, when_began_ns);
538}
539
540/*!
541 * Quick helper for marking submit began.
542 * @copydoc comp_target::mark_timing_point
543 *
544 * @public @memberof comp_target
545 * @ingroup comp_main
546 */
547static inline void
548comp_target_mark_submit_begin(struct comp_target *ct, int64_t frame_id, int64_t when_submit_began_ns)
549{
550 COMP_TRACE_MARKER();
551
552 ct->mark_timing_point(ct, COMP_TARGET_TIMING_POINT_SUBMIT_BEGIN, frame_id, when_submit_began_ns);
553}
554
555/*!
556 * Quick helper for marking submit end.
557 * @copydoc comp_target::mark_timing_point
558 *
559 * @public @memberof comp_target
560 * @ingroup comp_main
561 */
562static inline void
563comp_target_mark_submit_end(struct comp_target *ct, int64_t frame_id, int64_t when_submit_end_ns)
564{
565 COMP_TRACE_MARKER();
566
567 ct->mark_timing_point(ct, COMP_TARGET_TIMING_POINT_SUBMIT_END, frame_id, when_submit_end_ns);
568}
569
570/*!
571 * @copydoc comp_target::update_timings
572 *
573 * @public @memberof comp_target
574 * @ingroup comp_main
575 */
576static inline VkResult
577comp_target_update_timings(struct comp_target *ct)
578{
579 COMP_TRACE_MARKER();
580
581 return ct->update_timings(ct);
582}
583
584/*!
585 * @copydoc comp_target::info_gpu
586 *
587 * @public @memberof comp_target
588 * @ingroup comp_main
589 */
590static inline void
591comp_target_info_gpu(
592 struct comp_target *ct, int64_t frame_id, int64_t gpu_start_ns, int64_t gpu_end_ns, int64_t when_ns)
593{
594 COMP_TRACE_MARKER();
595
596 ct->info_gpu(ct, frame_id, gpu_start_ns, gpu_end_ns, when_ns);
597}
598
599/*!
600 * @copydoc comp_target::set_title
601 *
602 * @public @memberof comp_target
603 * @ingroup comp_main
604 */
605static inline void
606comp_target_set_title(struct comp_target *ct, const char *title)
607{
608 COMP_TRACE_MARKER();
609
610 ct->set_title(ct, title);
611}
612
613/*!
614 * @copydoc comp_target::get_refresh_rates
615 *
616 * @public @memberof comp_target
617 * @ingroup comp_main
618 */
619static inline xrt_result_t
620comp_target_get_refresh_rates(struct comp_target *ct, uint32_t *count, float *rates)
621{
622 COMP_TRACE_MARKER();
623
624 return ct->get_refresh_rates(ct, count, rates);
625}
626
627/*!
628 * @copydoc comp_target::get_current_refresh_rate
629 *
630 * @public @memberof comp_target
631 * @ingroup comp_main
632 */
633static inline xrt_result_t
634comp_target_get_current_refresh_rate(struct comp_target *ct, float *out_display_refresh_rate_hz)
635{
636 COMP_TRACE_MARKER();
637
638 return ct->get_current_refresh_rate(ct, out_display_refresh_rate_hz);
639}
640
641/*!
642 * @copydoc comp_target::request_refresh_rate
643 *
644 * @public @memberof comp_target
645 * @ingroup comp_main
646 */
647static inline xrt_result_t
648comp_target_request_refresh_rate(struct comp_target *ct, float ratedisplay_refresh_rate_hz)
649{
650 COMP_TRACE_MARKER();
651
652 return ct->request_refresh_rate(ct, ratedisplay_refresh_rate_hz);
653}
654
655/*!
656 * @copydoc comp_target::destroy
657 *
658 * Helper for calling through the function pointer: does a null check and sets
659 * ct_ptr to null if freed.
660 *
661 * @public @memberof comp_target
662 * @ingroup comp_main
663 */
664static inline void
665comp_target_destroy(struct comp_target **ct_ptr)
666{
667 struct comp_target *ct = *ct_ptr;
668 if (ct == NULL) {
669 return;
670 }
671
672 ct->destroy(ct);
673 *ct_ptr = NULL;
674}
675
676/*!
677 * A factory of targets.
678 *
679 * @ingroup comp_main
680 */
681struct comp_target_factory
682{
683 //! Pretty loggable name of target type.
684 const char *name;
685
686 //! Short all lowercase identifier for target type.
687 const char *identifier;
688
689 //! Does this factory require Vulkan to have been initialized.
690 bool requires_vulkan_for_create;
691
692 /*!
693 * Is this a deferred target that can have it's creation
694 * delayed even further then after Vulkan initialization.
695 */
696 bool is_deferred;
697
698 /*!
699 * Vulkan version that is required or 0 if no specific
700 * requirement, equivalent to VK_MAKE_VERSION(1, 0, 0)
701 */
702 uint32_t required_instance_version;
703
704 //! Required instance extensions.
705 const char **required_instance_extensions;
706
707 //! Required instance extension count.
708 size_t required_instance_extension_count;
709
710 //! Optional device extensions.
711 const char **optional_device_extensions;
712
713 //! Optional device extension count.
714 size_t optional_device_extension_count;
715
716 /*!
717 * Checks if this target can be detected, is the preferred target or
718 * some other special consideration that this target should be used over
719 * all other targets.
720 *
721 * This is needed for NVIDIA direct mode which window must be created
722 * after vulkan has initialized.
723 */
724 bool (*detect)(const struct comp_target_factory *ctf, struct comp_compositor *c);
725
726 /*!
727 * Create a target from this factory, some targets requires Vulkan to
728 * have been initialised, see @ref requires_vulkan_for_create.
729 */
730 bool (*create_target)(const struct comp_target_factory *ctf,
731 struct comp_compositor *c,
732 struct comp_target **out_ct);
733};
734
735/*!
736 * @copydoc comp_target_factory::detect
737 *
738 * @public @memberof comp_target_factory
739 * @ingroup comp_main
740 */
741static inline bool
742comp_target_factory_detect(const struct comp_target_factory *ctf, struct comp_compositor *c)
743{
744 COMP_TRACE_MARKER();
745
746 return ctf->detect(ctf, c);
747}
748
749/*!
750 * @copydoc comp_target_factory::create_target
751 *
752 * @public @memberof comp_target_factory
753 * @ingroup comp_main
754 */
755static inline bool
756comp_target_factory_create_target(const struct comp_target_factory *ctf,
757 struct comp_compositor *c,
758 struct comp_target **out_ct)
759{
760 COMP_TRACE_MARKER();
761
762 return ctf->create_target(ctf, c, out_ct);
763}
764
765
766#ifdef __cplusplus
767}
768#endif