The open source OpenXR runtime
1// Copyright 2019-2023, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Direct mode window code.
6 * @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
7 * @author Jakob Bornecrantz <jakob@collabora.com>
8 * @ingroup comp_main
9 */
10
11#include "util/u_misc.h"
12#include "util/u_pretty_print.h"
13
14#include "main/comp_window_direct.h"
15
16
17/*
18 *
19 * Private structs and defines.
20 *
21 */
22
23//! NVIDIA Vendor ID.
24#define NVIDIA_VENDOR_ID (0x10DE)
25
26/*!
27 * Probed display.
28 */
29struct comp_window_direct_nvidia_display
30{
31 char *name;
32 VkDisplayPropertiesKHR display_properties;
33 VkDisplayKHR display;
34};
35
36/*!
37 * Direct mode "window" into a device, using Vulkan direct mode extension
38 * and xcb.
39 *
40 * @implements comp_target_swapchain
41 */
42struct comp_window_direct_nvidia
43{
44 struct comp_target_swapchain base;
45
46 Display *dpy;
47 struct comp_window_direct_nvidia_display *displays;
48 uint16_t display_count;
49};
50
51
52/*
53 *
54 * Forward declare functions
55 *
56 */
57
58static void
59comp_window_direct_nvidia_destroy(struct comp_target *ct);
60
61static bool
62comp_window_direct_nvidia_init(struct comp_target *ct);
63
64static struct comp_window_direct_nvidia_display *
65comp_window_direct_nvidia_current_display(struct comp_window_direct_nvidia *w);
66
67static bool
68comp_window_direct_nvidia_init_swapchain(struct comp_target *ct, uint32_t width, uint32_t height);
69
70
71/*
72 *
73 * Functions.
74 *
75 */
76
77static inline struct vk_bundle *
78get_vk(struct comp_target *ct)
79{
80 return &ct->c->base.vk;
81}
82
83static void
84_flush(struct comp_target *ct)
85{
86 (void)ct;
87}
88
89static void
90_update_window_title(struct comp_target *ct, const char *title)
91{
92 (void)ct;
93 (void)title;
94}
95
96struct comp_target *
97comp_window_direct_nvidia_create(struct comp_compositor *c)
98{
99 struct comp_window_direct_nvidia *w = U_TYPED_CALLOC(struct comp_window_direct_nvidia);
100
101 // The display timing code hasn't been tested on nVidia and may be broken.
102 comp_target_swapchain_init_and_set_fnptrs(&w->base, COMP_TARGET_FORCE_FAKE_DISPLAY_TIMING);
103
104 w->base.base.name = "direct";
105 w->base.display = VK_NULL_HANDLE;
106 w->base.base.destroy = comp_window_direct_nvidia_destroy;
107 w->base.base.flush = _flush;
108 w->base.base.init_pre_vulkan = comp_window_direct_nvidia_init;
109 w->base.base.init_post_vulkan = comp_window_direct_nvidia_init_swapchain;
110 w->base.base.set_title = _update_window_title;
111 w->base.base.c = c;
112
113 return &w->base.base;
114}
115
116static void
117comp_window_direct_nvidia_destroy(struct comp_target *ct)
118{
119 struct comp_window_direct_nvidia *w_direct = (struct comp_window_direct_nvidia *)ct;
120
121 comp_target_swapchain_cleanup(&w_direct->base);
122
123 for (uint32_t i = 0; i < w_direct->display_count; i++) {
124 struct comp_window_direct_nvidia_display *d = &w_direct->displays[i];
125 d->display = VK_NULL_HANDLE;
126 free(d->name);
127 }
128
129 if (w_direct->displays != NULL)
130 free(w_direct->displays);
131
132 if (w_direct->dpy) {
133 XCloseDisplay(w_direct->dpy);
134 w_direct->dpy = NULL;
135 }
136
137 free(ct);
138}
139
140static bool
141append_nvidia_entry_on_match(struct comp_window_direct_nvidia *w,
142 const char *wl_entry,
143 struct VkDisplayPropertiesKHR *disp)
144{
145 unsigned long wl_entry_length = strlen(wl_entry);
146 unsigned long disp_entry_length = strlen(disp->displayName);
147
148 // If the entry is shorter then it will never match.
149 if (disp_entry_length < wl_entry_length) {
150 return false;
151 }
152
153 // We only check the first part of the string, extra characters ignored.
154 if (strncmp(wl_entry, disp->displayName, wl_entry_length) != 0) {
155 return false;
156 }
157
158 /*
159 * We have a match with this allow list entry.
160 */
161
162 // Make the compositor use this size.
163 comp_target_swapchain_override_extents(&w->base, disp->physicalResolution);
164
165 // Create the entry.
166 struct comp_window_direct_nvidia_display d = {
167 .name = U_TYPED_ARRAY_CALLOC(char, disp_entry_length + 1),
168 .display_properties = *disp,
169 .display = disp->display,
170 };
171
172 memcpy(d.name, disp->displayName, disp_entry_length);
173 d.name[disp_entry_length] = '\0';
174
175 w->display_count += 1;
176
177 U_ARRAY_REALLOC_OR_FREE(w->displays, struct comp_window_direct_nvidia_display, w->display_count);
178
179 if (w->displays == NULL) {
180 COMP_ERROR(w->base.base.c, "Unable to reallocate NVIDIA displays");
181
182 // Reset the count.
183 w->display_count = 0;
184 return false;
185 }
186
187 w->displays[w->display_count - 1] = d;
188
189 return true;
190}
191
192static bool
193comp_window_direct_nvidia_init(struct comp_target *ct)
194{
195 struct comp_window_direct_nvidia *w_direct = (struct comp_window_direct_nvidia *)ct;
196 struct vk_bundle *vk = get_vk(ct);
197 VkDisplayPropertiesKHR *display_props = NULL;
198 uint32_t display_count = 0;
199 VkResult ret;
200
201 if (vk->instance == VK_NULL_HANDLE) {
202 COMP_ERROR(ct->c, "Vulkan not initialized before NVIDIA init!");
203 return false;
204 }
205
206 if (!comp_window_direct_connect(&w_direct->base, &w_direct->dpy)) {
207 return false;
208 }
209
210 // find our display using nvidia allowlist, enumerate its modes, and
211 // pick the best one get a list of attached displays
212
213 ret = vk_enumerate_physical_device_display_properties( //
214 vk, //
215 vk->physical_device, //
216 &display_count, //
217 &display_props); //
218 if (ret != VK_SUCCESS) {
219 COMP_ERROR(ct->c, "vk_enumerate_physical_device_display_properties: %s", vk_result_string(ret));
220 return false;
221 }
222
223 if (display_count == 0) {
224 COMP_ERROR(ct->c, "NVIDIA: No Vulkan displays found.");
225 return false;
226 }
227
228 /// @todo what if we have multiple allowlisted HMD displays connected?
229 for (uint32_t i = 0; i < display_count; i++) {
230 struct VkDisplayPropertiesKHR disp = *(display_props + i);
231
232 if (ct->c->settings.nvidia_display) {
233 append_nvidia_entry_on_match(w_direct, ct->c->settings.nvidia_display, &disp);
234 }
235
236 // check this display against our allowlist
237 for (uint32_t j = 0; j < ARRAY_SIZE(NV_DIRECT_ALLOWLIST); j++)
238 if (append_nvidia_entry_on_match(w_direct, NV_DIRECT_ALLOWLIST[j], &disp))
239 break;
240 }
241
242 free(display_props);
243
244 return true;
245}
246
247static struct comp_window_direct_nvidia_display *
248comp_window_direct_nvidia_current_display(struct comp_window_direct_nvidia *w)
249{
250 int index = w->base.base.c->settings.display;
251 if (index == -1)
252 index = 0;
253
254 if (w->display_count <= (uint32_t)index)
255 return NULL;
256
257 return &w->displays[index];
258}
259
260static bool
261comp_window_direct_nvidia_init_swapchain(struct comp_target *ct, uint32_t width, uint32_t height)
262{
263 struct comp_window_direct_nvidia *w_direct = (struct comp_window_direct_nvidia *)ct;
264
265 struct comp_window_direct_nvidia_display *d = comp_window_direct_nvidia_current_display(w_direct);
266 if (!d) {
267 COMP_ERROR(ct->c, "NVIDIA could not find any HMDs.");
268 return false;
269 }
270
271 COMP_DEBUG(ct->c, "Will use display: %s", d->name);
272 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct;
273 cts->display = d->display;
274
275 return comp_window_direct_init_swapchain(&w_direct->base, w_direct->dpy, d->display, width, height);
276}
277
278
279/*
280 *
281 * Factory
282 *
283 */
284
285static const char *instance_extensions[] = {
286 VK_KHR_DISPLAY_EXTENSION_NAME,
287 VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME,
288 VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME,
289};
290
291static bool
292_match_allowlist_entry(const char *al_entry, VkDisplayPropertiesKHR *disp)
293{
294 unsigned long al_entry_length = strlen(al_entry);
295 unsigned long disp_entry_length = strlen(disp->displayName);
296 if (disp_entry_length < al_entry_length)
297 return false;
298
299 // we have a match with this allowlist entry.
300 if (strncmp(al_entry, disp->displayName, al_entry_length) == 0)
301 return true;
302
303 return false;
304}
305
306/*
307 * our physical device is an nvidia card, we can potentially select
308 * nvidia-specific direct mode.
309 *
310 * we need to also check if we are confident that we can create a direct mode
311 * display, if not we need to abandon the attempt here, and allow desktop-window
312 * fallback to occur.
313 */
314
315static bool
316_test_for_nvidia(struct comp_compositor *c, struct vk_bundle *vk)
317{
318 VkDisplayPropertiesKHR *display_props;
319 uint32_t display_count;
320 VkResult ret;
321
322 VkPhysicalDeviceProperties physical_device_properties;
323 vk->vkGetPhysicalDeviceProperties(vk->physical_device, &physical_device_properties);
324
325 // Only run this code on NVIDIA hardware.
326 if (physical_device_properties.vendorID != NVIDIA_VENDOR_ID) {
327 return false;
328 }
329
330 // Get a list of attached displays.
331 ret = vk_enumerate_physical_device_display_properties( //
332 vk, //
333 vk->physical_device, //
334 &display_count, //
335 &display_props); //
336 if (ret != VK_SUCCESS) {
337 CVK_ERROR(c, "vk_enumerate_physical_device_display_properties", "Failed to get display properties ",
338 ret);
339 return false;
340 }
341
342 for (uint32_t i = 0; i < display_count; i++) {
343 VkDisplayPropertiesKHR *disp = display_props + i;
344
345 // Check this display against our allowlist.
346 for (uint32_t j = 0; j < ARRAY_SIZE(NV_DIRECT_ALLOWLIST); j++) {
347 if (_match_allowlist_entry(NV_DIRECT_ALLOWLIST[j], disp)) {
348 free(display_props);
349 return true;
350 }
351 }
352
353 // Also check against any extra displays given by the user.
354 if (c->settings.nvidia_display && _match_allowlist_entry(c->settings.nvidia_display, disp)) {
355 free(display_props);
356 return true;
357 }
358 }
359
360 struct u_pp_sink_stack_only sink;
361 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
362
363 u_pp(dg, "NVIDIA: No allowlisted displays found!");
364
365 u_pp(dg, "\n\t== Current Allowlist (%u) ==", (uint32_t)ARRAY_SIZE(NV_DIRECT_ALLOWLIST));
366 for (uint32_t i = 0; i < ARRAY_SIZE(NV_DIRECT_ALLOWLIST); i++) {
367 u_pp(dg, "\n\t\t%s", NV_DIRECT_ALLOWLIST[i]);
368 }
369
370 if (c->settings.nvidia_display != NULL) {
371 u_pp(dg, "\n\t\t%s (extra)", c->settings.nvidia_display);
372 }
373
374 u_pp(dg, "\n\t== Found Displays (%u) ==", display_count);
375 for (uint32_t i = 0; i < display_count; i++) {
376 u_pp(dg, "\n\t\t%s", display_props[i].displayName);
377 }
378
379 COMP_ERROR(c, "%s", sink.buffer);
380
381 free(display_props);
382
383 return false;
384}
385
386static bool
387check_vulkan_caps(struct comp_compositor *c, bool *out_detected)
388{
389 VkResult ret;
390
391 *out_detected = false;
392
393 // this is duplicative, but seems to be the easiest way to
394 // 'pre-check' capabilities when window creation precedes vulkan
395 // instance creation. we also need to load the VK_KHR_DISPLAY
396 // extension.
397
398 COMP_DEBUG(c, "Checking for NVIDIA vulkan driver.");
399
400 struct vk_bundle temp_vk_storage = {0};
401 struct vk_bundle *temp_vk = &temp_vk_storage;
402 temp_vk->log_level = U_LOGGING_WARN;
403
404 ret = vk_get_loader_functions(temp_vk, vkGetInstanceProcAddr);
405 if (ret != VK_SUCCESS) {
406 CVK_ERROR(c, "vk_get_loader_functions", "Failed to get loader functions.", ret);
407 return false;
408 }
409
410 const char *extension_names[] = {
411 COMP_INSTANCE_EXTENSIONS_COMMON,
412 VK_KHR_DISPLAY_EXTENSION_NAME,
413 };
414
415 VkInstanceCreateInfo instance_create_info = {
416 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
417 .enabledExtensionCount = ARRAY_SIZE(extension_names),
418 .ppEnabledExtensionNames = extension_names,
419 };
420
421 ret = temp_vk->vkCreateInstance(&instance_create_info, NULL, &(temp_vk->instance));
422 if (ret != VK_SUCCESS) {
423 CVK_ERROR(c, "vkCreateInstance", "Failed to create VkInstance.", ret);
424 return false;
425 }
426
427 ret = vk_get_instance_functions(temp_vk);
428 if (ret != VK_SUCCESS) {
429 CVK_ERROR(c, "vk_get_instance_functions", "Failed to get Vulkan instance functions.", ret);
430 return false;
431 }
432
433 ret = vk_select_physical_device(temp_vk, c->settings.selected_gpu_index);
434 if (ret != VK_SUCCESS) {
435 CVK_ERROR(c, "vk_select_physical_device", "Failed to select physical device.", ret);
436 return false;
437 }
438
439 if (_test_for_nvidia(c, temp_vk)) {
440 *out_detected = true;
441 COMP_DEBUG(c, "Selecting direct NVIDIA window type!");
442 }
443
444 temp_vk->vkDestroyInstance(temp_vk->instance, NULL);
445
446 return true;
447}
448
449static bool
450detect(const struct comp_target_factory *ctf, struct comp_compositor *c)
451{
452 bool detected = false;
453
454 if (!check_vulkan_caps(c, &detected)) {
455 return false;
456 }
457
458 return detected;
459}
460
461static bool
462create_target(const struct comp_target_factory *ctf, struct comp_compositor *c, struct comp_target **out_ct)
463{
464 struct comp_target *ct = comp_window_direct_nvidia_create(c);
465 if (ct == NULL) {
466 return false;
467 }
468
469 *out_ct = ct;
470
471 return true;
472}
473
474const struct comp_target_factory comp_target_factory_direct_nvidia = {
475 .name = "NVIDIA Direct-Mode",
476 .identifier = "x11_direct_nvidia",
477 .requires_vulkan_for_create = true,
478 .is_deferred = false,
479 .required_instance_version = 0,
480 .required_instance_extensions = instance_extensions,
481 .required_instance_extension_count = ARRAY_SIZE(instance_extensions),
482 .optional_device_extensions = NULL,
483 .optional_device_extension_count = 0,
484 .detect = detect,
485 .create_target = create_target,
486};