The open source OpenXR runtime
1// Copyright 2019-2023, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief OpenGL client side glue to compositor implementation.
6 * @author Jakob Bornecrantz <jakob@collabora.com>
7 * @ingroup comp_client
8 */
9
10#include <stdio.h>
11#include <string.h>
12#include <assert.h>
13#include <stdlib.h>
14
15#include "xrt/xrt_config_os.h"
16#include "util/u_misc.h"
17
18#include <xrt/xrt_config_have.h>
19#if defined(XRT_HAVE_EGL)
20#include "ogl/egl_api.h"
21#endif
22#if defined(XRT_HAVE_OPENGL) || defined(XRT_HAVE_OPENGLES)
23#include "ogl/ogl_api.h"
24#endif
25
26#include "ogl/ogl_helpers.h"
27
28#include "client/comp_gl_client.h"
29
30#include "util/u_logging.h"
31#include "util/u_trace_marker.h"
32
33
34
35/*
36 *
37 * Helpers.
38 *
39 */
40
41/*!
42 * Down-cast helper.
43 * @private @memberof client_gl_swapchain
44 */
45static inline struct client_gl_swapchain *
46client_gl_swapchain(struct xrt_swapchain *xsc)
47{
48 return (struct client_gl_swapchain *)xsc;
49}
50
51static inline struct xrt_swapchain *
52to_native_swapchain(struct xrt_swapchain *xsc)
53{
54 return &client_gl_swapchain(xsc)->xscn->base;
55}
56
57static inline struct xrt_compositor *
58to_native_compositor(struct xrt_compositor *xc)
59{
60 return &client_gl_compositor(xc)->xcn->base;
61}
62
63static int64_t
64gl_format_to_vk(int64_t format)
65{
66 switch (format) {
67 case GL_RGB8: return 23 /*VK_FORMAT_R8G8B8_UNORM*/; // Should not be used, colour precision.
68 case GL_SRGB8: return 29 /*VK_FORMAT_R8G8B8_SRGB*/;
69 case GL_RGBA8: return 37 /*VK_FORMAT_R8G8B8A8_UNORM*/; // Should not be used, colour precision.
70 case GL_SRGB8_ALPHA8: return 43 /*VK_FORMAT_R8G8B8A8_SRGB*/;
71 case GL_RGB10_A2: return 64 /*VK_FORMAT_A2B10G10R10_UNORM_PACK32*/;
72 case GL_RGB16: return 84 /*VK_FORMAT_R16G16B16_UNORM*/;
73 case GL_RGB16F: return 90 /*VK_FORMAT_R16G16B16_SFLOAT*/;
74 case GL_RGBA16: return 91 /*VK_FORMAT_R16G16B16A16_UNORM*/;
75 case GL_RGBA16F: return 97 /*VK_FORMAT_R16G16B16A16_SFLOAT*/;
76 case GL_DEPTH_COMPONENT16: return 124 /*VK_FORMAT_D16_UNORM*/;
77 case GL_DEPTH_COMPONENT32F: return 126 /*VK_FORMAT_D32_SFLOAT*/;
78 case GL_DEPTH24_STENCIL8: return 129 /*VK_FORMAT_D24_UNORM_S8_UINT*/;
79 case GL_DEPTH32F_STENCIL8: return 130 /*VK_FORMAT_D32_SFLOAT_S8_UINT*/;
80 default: U_LOG_W("Cannot convert GL format %" PRIu64 " to VK format!", format); return 0;
81 }
82}
83
84static int64_t
85vk_format_to_gl(int64_t format)
86{
87 switch (format) {
88 case 4 /* VK_FORMAT_R5G6B5_UNORM_PACK16 */: return 0; // GL_RGB565?
89 case 23 /* VK_FORMAT_R8G8B8_UNORM */: return GL_RGB8; // Should not be used, colour precision.
90 case 29 /* VK_FORMAT_R8G8B8_SRGB */: return GL_SRGB8;
91 case 30 /* VK_FORMAT_B8G8R8_UNORM */: return 0;
92 case 37 /* VK_FORMAT_R8G8B8A8_UNORM */: return GL_RGBA8; // Should not be used, colour precision.
93 case 43 /* VK_FORMAT_R8G8B8A8_SRGB */: return GL_SRGB8_ALPHA8;
94 case 44 /* VK_FORMAT_B8G8R8A8_UNORM */: return 0;
95 case 50 /* VK_FORMAT_B8G8R8A8_SRGB */: return 0;
96 case 64 /* VK_FORMAT_A2B10G10R10_UNORM_PACK32 */: return GL_RGB10_A2;
97 case 84 /* VK_FORMAT_R16G16B16_UNORM */: return GL_RGB16;
98 case 90 /* VK_FORMAT_R16G16B16_SFLOAT */: return GL_RGB16F;
99 case 91 /* VK_FORMAT_R16G16B16A16_UNORM */: return GL_RGBA16;
100 case 97 /* VK_FORMAT_R16G16B16A16_SFLOAT */: return GL_RGBA16F;
101 case 100 /* VK_FORMAT_R32_SFLOAT */: return 0;
102 case 124 /* VK_FORMAT_D16_UNORM */: return GL_DEPTH_COMPONENT16;
103 case 125 /* VK_FORMAT_X8_D24_UNORM_PACK32 */: return 0; // GL_DEPTH_COMPONENT24?
104 case 126 /* VK_FORMAT_D32_SFLOAT */: return GL_DEPTH_COMPONENT32F;
105 case 127 /* VK_FORMAT_S8_UINT */: return 0; // GL_STENCIL_INDEX8?
106 case 129 /* VK_FORMAT_D24_UNORM_S8_UINT */: return GL_DEPTH24_STENCIL8;
107 case 130 /* VK_FORMAT_D32_SFLOAT_S8_UINT */: return GL_DEPTH32F_STENCIL8;
108 default: U_LOG_W("Cannot convert VK format %" PRIu64 " to GL format!", format); return 0;
109 }
110}
111
112/*!
113 * Called with the right context made current.
114 */
115static xrt_graphics_sync_handle_t
116handle_fencing_or_finish(struct client_gl_compositor *c)
117{
118 xrt_graphics_sync_handle_t sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
119 xrt_result_t xret = XRT_SUCCESS;
120
121 if (c->insert_fence != NULL) {
122 COMP_TRACE_IDENT(insert_fence);
123
124 xret = c->insert_fence(&c->base.base, &sync_handle);
125 if (xret != XRT_SUCCESS) {
126 U_LOG_E("Failed to insert a fence");
127 }
128 }
129
130 // Fallback to glFinish if we haven't inserted a fence.
131 if (sync_handle == XRT_GRAPHICS_SYNC_HANDLE_INVALID) {
132 COMP_TRACE_IDENT(glFinish);
133
134 glFinish();
135 }
136
137 return sync_handle;
138}
139
140
141/*
142 *
143 * Swapchain functions.
144 *
145 */
146
147static xrt_result_t
148client_gl_swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *out_index)
149{
150 // Pipe down call into native swapchain.
151 return xrt_swapchain_acquire_image(to_native_swapchain(xsc), out_index);
152}
153
154static xrt_result_t
155client_gl_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index)
156{
157 // Pipe down call into native swapchain.
158 return xrt_swapchain_wait_image(to_native_swapchain(xsc), timeout_ns, index);
159}
160
161static xrt_result_t
162client_gl_swapchain_barrier_image(struct xrt_swapchain *xsc, enum xrt_barrier_direction direction, uint32_t index)
163{
164 return XRT_SUCCESS;
165}
166
167static xrt_result_t
168client_gl_swapchain_release_image(struct xrt_swapchain *xsc, uint32_t index)
169{
170 // Pipe down call into native swapchain.
171 return xrt_swapchain_release_image(to_native_swapchain(xsc), index);
172}
173
174
175/*
176 *
177 * Compositor functions.
178 *
179 */
180
181static xrt_result_t
182client_gl_compositor_begin_session(struct xrt_compositor *xc, const struct xrt_begin_session_info *info)
183{
184 // Pipe down call into native compositor.
185 return xrt_comp_begin_session(to_native_compositor(xc), info);
186}
187
188static xrt_result_t
189client_gl_compositor_end_session(struct xrt_compositor *xc)
190{
191 // Pipe down call into native compositor.
192 return xrt_comp_end_session(to_native_compositor(xc));
193}
194
195static xrt_result_t
196client_gl_compositor_wait_frame(struct xrt_compositor *xc,
197 int64_t *out_frame_id,
198 int64_t *predicted_display_time,
199 int64_t *predicted_display_period)
200{
201 // Pipe down call into native compositor.
202 return xrt_comp_wait_frame( //
203 to_native_compositor(xc), //
204 out_frame_id, //
205 predicted_display_time, //
206 predicted_display_period); //
207}
208
209static xrt_result_t
210client_gl_compositor_begin_frame(struct xrt_compositor *xc, int64_t frame_id)
211{
212 // Pipe down call into native compositor.
213 return xrt_comp_begin_frame(to_native_compositor(xc), frame_id);
214}
215
216static xrt_result_t
217client_gl_compositor_discard_frame(struct xrt_compositor *xc, int64_t frame_id)
218{
219 // Pipe down call into native compositor.
220 return xrt_comp_discard_frame(to_native_compositor(xc), frame_id);
221}
222
223static xrt_result_t
224client_gl_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_data *data)
225{
226 // Pipe down call into native compositor.
227 return xrt_comp_layer_begin(to_native_compositor(xc), data);
228}
229
230static xrt_result_t
231client_gl_compositor_layer_projection(struct xrt_compositor *xc,
232 struct xrt_device *xdev,
233 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
234 const struct xrt_layer_data *data)
235{
236 struct xrt_compositor *xcn;
237 struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
238
239 xcn = to_native_compositor(xc);
240 assert(data->type == XRT_LAYER_PROJECTION);
241 for (uint32_t i = 0; i < data->view_count; ++i) {
242 xscn[i] = &client_gl_swapchain(xsc[i])->xscn->base;
243 }
244 struct xrt_layer_data d = *data;
245 d.flip_y = !d.flip_y;
246
247 return xrt_comp_layer_projection(xcn, xdev, xscn, &d);
248}
249
250static xrt_result_t
251client_gl_compositor_layer_projection_depth(struct xrt_compositor *xc,
252 struct xrt_device *xdev,
253 struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
254 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS],
255 const struct xrt_layer_data *data)
256{
257 struct xrt_compositor *xcn;
258 struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
259 struct xrt_swapchain *d_xscn[XRT_MAX_VIEWS];
260
261 assert(data->type == XRT_LAYER_PROJECTION_DEPTH);
262
263 xcn = to_native_compositor(xc);
264 for (uint32_t i = 0; i < data->view_count; ++i) {
265 xscn[i] = to_native_swapchain(xsc[i]);
266 d_xscn[i] = to_native_swapchain(d_xsc[i]);
267 }
268
269 struct xrt_layer_data d = *data;
270 d.flip_y = !d.flip_y;
271
272 return xrt_comp_layer_projection_depth(xcn, xdev, xscn, d_xscn, &d);
273}
274
275static xrt_result_t
276client_gl_compositor_layer_quad(struct xrt_compositor *xc,
277 struct xrt_device *xdev,
278 struct xrt_swapchain *xsc,
279 const struct xrt_layer_data *data)
280{
281 struct xrt_compositor *xcn;
282 struct xrt_swapchain *xscfb;
283
284 assert(data->type == XRT_LAYER_QUAD);
285
286 xcn = to_native_compositor(xc);
287 xscfb = to_native_swapchain(xsc);
288
289 struct xrt_layer_data d = *data;
290 d.flip_y = !d.flip_y;
291
292 return xrt_comp_layer_quad(xcn, xdev, xscfb, &d);
293}
294
295static xrt_result_t
296client_gl_compositor_layer_cube(struct xrt_compositor *xc,
297 struct xrt_device *xdev,
298 struct xrt_swapchain *xsc,
299 const struct xrt_layer_data *data)
300{
301 struct xrt_compositor *xcn;
302 struct xrt_swapchain *xscfb;
303
304 assert(data->type == XRT_LAYER_CUBE);
305
306 xcn = to_native_compositor(xc);
307 xscfb = to_native_swapchain(xsc);
308
309 struct xrt_layer_data d = *data;
310 d.flip_y = !d.flip_y;
311
312 return xrt_comp_layer_cube(xcn, xdev, xscfb, &d);
313}
314
315static xrt_result_t
316client_gl_compositor_layer_cylinder(struct xrt_compositor *xc,
317 struct xrt_device *xdev,
318 struct xrt_swapchain *xsc,
319 const struct xrt_layer_data *data)
320{
321 struct xrt_compositor *xcn;
322 struct xrt_swapchain *xscfb;
323
324 assert(data->type == XRT_LAYER_CYLINDER);
325
326 xcn = to_native_compositor(xc);
327 xscfb = to_native_swapchain(xsc);
328
329 struct xrt_layer_data d = *data;
330 d.flip_y = !d.flip_y;
331
332 return xrt_comp_layer_cylinder(xcn, xdev, xscfb, &d);
333}
334
335static xrt_result_t
336client_gl_compositor_layer_equirect1(struct xrt_compositor *xc,
337 struct xrt_device *xdev,
338 struct xrt_swapchain *xsc,
339 const struct xrt_layer_data *data)
340{
341 struct xrt_compositor *xcn;
342 struct xrt_swapchain *xscfb;
343
344 assert(data->type == XRT_LAYER_EQUIRECT1);
345
346 xcn = to_native_compositor(xc);
347 xscfb = to_native_swapchain(xsc);
348
349 struct xrt_layer_data d = *data;
350 d.flip_y = !d.flip_y;
351
352 return xrt_comp_layer_equirect1(xcn, xdev, xscfb, &d);
353}
354
355static xrt_result_t
356client_gl_compositor_layer_equirect2(struct xrt_compositor *xc,
357 struct xrt_device *xdev,
358 struct xrt_swapchain *xsc,
359 const struct xrt_layer_data *data)
360{
361 struct xrt_compositor *xcn;
362 struct xrt_swapchain *xscfb;
363
364 assert(data->type == XRT_LAYER_EQUIRECT2);
365
366 xcn = to_native_compositor(xc);
367 xscfb = to_native_swapchain(xsc);
368
369 struct xrt_layer_data d = *data;
370 d.flip_y = !d.flip_y;
371
372 return xrt_comp_layer_equirect2(xcn, xdev, xscfb, &d);
373}
374
375static xrt_result_t
376client_gl_compositor_layer_passthrough(struct xrt_compositor *xc,
377 struct xrt_device *xdev,
378 const struct xrt_layer_data *data)
379{
380 struct client_gl_compositor *c = client_gl_compositor(xc);
381
382 assert(data->type == XRT_LAYER_PASSTHROUGH);
383
384 struct xrt_layer_data d = *data;
385 d.flip_y = !d.flip_y;
386
387 return xrt_comp_layer_passthrough(&c->xcn->base, xdev, &d);
388}
389
390static xrt_result_t
391client_gl_compositor_layer_commit(struct xrt_compositor *xc, xrt_graphics_sync_handle_t sync_handle)
392{
393 COMP_TRACE_MARKER();
394
395 struct client_gl_compositor *c = client_gl_compositor(xc);
396
397 if (c->renderdoc_enabled) {
398 glDebugMessageInsert(GL_DEBUG_SOURCE_THIRD_PARTY, GL_DEBUG_TYPE_MARKER, 1,
399 GL_DEBUG_SEVERITY_NOTIFICATION, -1, "vr-marker,frame_end,type,application");
400 }
401
402 // We make the sync object, not st/oxr which is our user.
403 assert(!xrt_graphics_sync_handle_is_valid(sync_handle));
404
405 sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
406
407 xrt_result_t xret = client_gl_compositor_context_begin(xc, CLIENT_GL_CONTEXT_REASON_SYNCHRONIZE);
408 if (xret == XRT_SUCCESS) {
409 sync_handle = handle_fencing_or_finish(c);
410 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_SYNCHRONIZE);
411 }
412
413 COMP_TRACE_IDENT(layer_commit);
414
415 return xrt_comp_layer_commit(&c->xcn->base, sync_handle);
416}
417
418static xrt_result_t
419client_gl_compositor_get_swapchain_create_properties(struct xrt_compositor *xc,
420 const struct xrt_swapchain_create_info *info,
421 struct xrt_swapchain_create_properties *xsccp)
422{
423 struct client_gl_compositor *c = client_gl_compositor(xc);
424
425 int64_t vk_format = gl_format_to_vk(info->format);
426 if (vk_format == 0) {
427 U_LOG_E("Invalid format!");
428 return XRT_ERROR_SWAPCHAIN_FORMAT_UNSUPPORTED;
429 }
430
431 struct xrt_swapchain_create_info vkinfo = *info;
432 vkinfo.format = vk_format;
433
434 return xrt_comp_get_swapchain_create_properties(&c->xcn->base, &vkinfo, xsccp);
435}
436
437static xrt_result_t
438client_gl_swapchain_create(struct xrt_compositor *xc,
439 const struct xrt_swapchain_create_info *info,
440 struct xrt_swapchain **out_xsc)
441{
442 struct client_gl_compositor *c = client_gl_compositor(xc);
443 struct xrt_swapchain_create_properties xsccp = {0};
444 xrt_result_t xret = XRT_SUCCESS;
445
446 // Do before getting the context, not using ourselves.
447 xret = xrt_comp_get_swapchain_create_properties(xc, info, &xsccp);
448 if (xret != XRT_SUCCESS) {
449 U_LOG_E("Failed to get create properties: %u", xret);
450 return xret;
451 }
452
453 // Check before setting the context.
454 int64_t vk_format = gl_format_to_vk(info->format);
455 if (vk_format == 0) {
456 U_LOG_E("Invalid format!");
457 return XRT_ERROR_SWAPCHAIN_FORMAT_UNSUPPORTED;
458 }
459
460 xret = client_gl_compositor_context_begin(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
461 if (xret != XRT_SUCCESS) {
462 return xret;
463 }
464
465 if (info->array_size > 1) {
466 const char *version_str = (const char *)glGetString(GL_VERSION);
467 if (strstr(version_str, "OpenGL ES 2.") == version_str) {
468 U_LOG_E("Only one array layer is supported with OpenGL ES 2");
469 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
470 return XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED;
471 }
472 }
473
474 struct xrt_swapchain_create_info xinfo = *info;
475 struct xrt_swapchain_create_info vkinfo = *info;
476
477 // Update the create info.
478 xinfo.bits |= xsccp.extra_bits;
479 vkinfo.format = vk_format;
480 vkinfo.bits |= xsccp.extra_bits;
481
482 struct xrt_swapchain_native *xscn = NULL; // Has to be NULL.
483 xret = xrt_comp_native_create_swapchain(c->xcn, &vkinfo, &xscn);
484
485 if (xret != XRT_SUCCESS) {
486 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
487 return xret;
488 }
489 assert(xscn != NULL);
490
491 // Save texture binding
492 GLint prev_texture = 0;
493 GLuint binding_enum = 0;
494 GLuint tex_target = 0;
495 ogl_texture_target_for_swapchain_info(&xinfo, &tex_target, &binding_enum);
496
497 glGetIntegerv(binding_enum, &prev_texture);
498
499 struct xrt_swapchain *xsc = &xscn->base;
500
501 struct client_gl_swapchain *sc = NULL;
502 if (NULL == c->create_swapchain(xc, &xinfo, xscn, &sc)) {
503 // Drop our reference, does NULL checking.
504 xrt_swapchain_reference(&xsc, NULL);
505 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
506 return XRT_ERROR_OPENGL;
507 }
508
509 if (sc == NULL) {
510 U_LOG_E("Could not create OpenGL swapchain.");
511 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
512 return XRT_ERROR_OPENGL;
513 }
514
515 if (NULL == sc->base.base.acquire_image) {
516 sc->base.base.acquire_image = client_gl_swapchain_acquire_image;
517 }
518 if (NULL == sc->base.base.wait_image) {
519 sc->base.base.wait_image = client_gl_swapchain_wait_image;
520 }
521 if (NULL == sc->base.base.barrier_image) {
522 sc->base.base.barrier_image = client_gl_swapchain_barrier_image;
523 }
524 if (NULL == sc->base.base.release_image) {
525 sc->base.base.release_image = client_gl_swapchain_release_image;
526 }
527 // Fetch the number of images from the native swapchain.
528 sc->base.base.image_count = xsc->image_count;
529 sc->xscn = xscn;
530
531 glBindTexture(tex_target, prev_texture);
532
533 client_gl_compositor_context_end(xc, CLIENT_GL_CONTEXT_REASON_OTHER);
534
535 *out_xsc = &sc->base.base;
536 return XRT_SUCCESS;
537}
538
539static xrt_result_t
540client_gl_compositor_passthrough_create(struct xrt_compositor *xc, const struct xrt_passthrough_create_info *info)
541{
542 struct client_gl_compositor *c = client_gl_compositor(xc);
543
544 // Pipe down call into native compositor.
545 return xrt_comp_create_passthrough(&c->xcn->base, info);
546}
547
548static xrt_result_t
549client_gl_compositor_passthrough_layer_create(struct xrt_compositor *xc,
550 const struct xrt_passthrough_layer_create_info *info)
551{
552 struct client_gl_compositor *c = client_gl_compositor(xc);
553
554 // Pipe down call into native compositor.
555 return xrt_comp_create_passthrough_layer(&c->xcn->base, info);
556}
557
558static xrt_result_t
559client_gl_compositor_passthrough_destroy(struct xrt_compositor *xc)
560{
561 struct client_gl_compositor *c = client_gl_compositor(xc);
562
563 // Pipe down call into native compositor.
564 return xrt_comp_destroy_passthrough(&c->xcn->base);
565}
566
567static void
568client_gl_compositor_destroy(struct xrt_compositor *xc)
569{
570 assert(!"Destroy should be implemented by the winsys code that uses the GL code.");
571}
572
573
574/*
575 *
576 * 'Exported' functions.
577 *
578 */
579
580void
581client_gl_compositor_fini(struct client_gl_compositor *c)
582{
583 os_mutex_destroy(&c->context_mutex);
584}
585
586bool
587client_gl_compositor_init(struct client_gl_compositor *c,
588 struct xrt_compositor_native *xcn,
589 client_gl_context_begin_locked_func_t context_begin_locked,
590 client_gl_context_end_locked_func_t context_end_locked,
591 client_gl_swapchain_create_func_t create_swapchain,
592 client_gl_insert_fence_func_t insert_fence)
593{
594 assert(context_begin_locked != NULL);
595 assert(context_end_locked != NULL);
596
597 c->base.base.get_swapchain_create_properties = client_gl_compositor_get_swapchain_create_properties;
598 c->base.base.create_swapchain = client_gl_swapchain_create;
599 c->base.base.create_passthrough = client_gl_compositor_passthrough_create;
600 c->base.base.create_passthrough_layer = client_gl_compositor_passthrough_layer_create;
601 c->base.base.destroy_passthrough = client_gl_compositor_passthrough_destroy;
602 c->base.base.begin_session = client_gl_compositor_begin_session;
603 c->base.base.end_session = client_gl_compositor_end_session;
604 c->base.base.wait_frame = client_gl_compositor_wait_frame;
605 c->base.base.begin_frame = client_gl_compositor_begin_frame;
606 c->base.base.discard_frame = client_gl_compositor_discard_frame;
607 c->base.base.layer_begin = client_gl_compositor_layer_begin;
608 c->base.base.layer_projection = client_gl_compositor_layer_projection;
609 c->base.base.layer_projection_depth = client_gl_compositor_layer_projection_depth;
610 c->base.base.layer_quad = client_gl_compositor_layer_quad;
611 c->base.base.layer_cube = client_gl_compositor_layer_cube;
612 c->base.base.layer_cylinder = client_gl_compositor_layer_cylinder;
613 c->base.base.layer_equirect1 = client_gl_compositor_layer_equirect1;
614 c->base.base.layer_equirect2 = client_gl_compositor_layer_equirect2;
615 c->base.base.layer_passthrough = client_gl_compositor_layer_passthrough;
616 c->base.base.layer_commit = client_gl_compositor_layer_commit;
617 c->base.base.destroy = client_gl_compositor_destroy;
618 c->context_begin_locked = context_begin_locked;
619 c->context_end_locked = context_end_locked;
620 c->create_swapchain = create_swapchain;
621 c->insert_fence = insert_fence;
622 c->xcn = xcn;
623
624 // Passthrough our formats from the native compositor to the client.
625 uint32_t count = 0;
626
627 // Make sure that we can fit all formats in the destination.
628 static_assert(ARRAY_SIZE(xcn->base.info.formats) == ARRAY_SIZE(c->base.base.info.formats), "mismatch");
629
630 for (uint32_t i = 0; i < xcn->base.info.format_count; i++) {
631 int64_t f = vk_format_to_gl(xcn->base.info.formats[i]);
632 if (f == 0) {
633 continue;
634 }
635
636 c->base.base.info.formats[count++] = f;
637 }
638 c->base.base.info.format_count = count;
639
640 // Get max texture size.
641 GLint max_texture_size = 0;
642 glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size);
643 if (max_texture_size > 0) {
644 c->base.base.info.max_texture_size = (uint32_t)max_texture_size;
645 }
646
647 os_mutex_init(&c->context_mutex);
648
649 return true;
650}