The open source OpenXR runtime
1// Copyright 2020-2024, Collabora, Ltd.
2// Copyright 2024-2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Server process functions.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @author Rylie Pavlik <rylie.pavlik@collabora.com>
10 * @author Korcan Hussein <korcan.hussein@collabora.com>
11 * @ingroup ipc_server
12 */
13
14#include "xrt/xrt_device.h"
15#include "xrt/xrt_system.h"
16#include "xrt/xrt_instance.h"
17#include "xrt/xrt_compositor.h"
18#include "xrt/xrt_config_have.h"
19#include "xrt/xrt_config_os.h"
20
21#include "os/os_time.h"
22#include "util/u_var.h"
23#include "util/u_misc.h"
24#include "util/u_debug.h"
25#include "util/u_trace_marker.h"
26#include "util/u_verify.h"
27#include "util/u_process.h"
28#include "util/u_debug_gui.h"
29#include "util/u_pretty_print.h"
30
31#include "util/u_git_tag.h"
32
33#include "shared/ipc_protocol.h"
34#include "shared/ipc_shmem.h"
35#include "server/ipc_server.h"
36#include "server/ipc_server_interface.h"
37
38#include <stdlib.h>
39#include <stdbool.h>
40#include <sys/types.h>
41#include <sys/stat.h>
42#include <fcntl.h>
43#include <errno.h>
44#include <stdio.h>
45#include <string.h>
46#include <assert.h>
47#include <limits.h>
48
49#if defined(XRT_OS_WINDOWS)
50#include <timeapi.h>
51#endif
52
53
54/*
55 *
56 * Defines and helpers.
57 *
58 */
59
60DEBUG_GET_ONCE_BOOL_OPTION(exit_on_disconnect, "IPC_EXIT_ON_DISCONNECT", false)
61DEBUG_GET_ONCE_BOOL_OPTION(exit_when_idle, "IPC_EXIT_WHEN_IDLE", false)
62DEBUG_GET_ONCE_NUM_OPTION(exit_when_idle_delay_ms, "IPC_EXIT_WHEN_IDLE_DELAY_MS", 5000)
63DEBUG_GET_ONCE_LOG_OPTION(ipc_log, "IPC_LOG", U_LOGGING_INFO)
64
65
66/*
67 *
68 * Idev functions.
69 *
70 */
71
72static int32_t
73find_xdev_index(struct ipc_server *s, struct xrt_device *xdev)
74{
75 if (xdev == NULL) {
76 return -1;
77 }
78
79 for (int32_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
80 if (s->xsysd->xdevs[i] == xdev) {
81 return i;
82 }
83 }
84
85 IPC_WARN(s, "Could not find index for xdev: '%s'", xdev->str);
86
87 return -1;
88}
89
90static void
91init_idev(struct ipc_device *idev, struct xrt_device *xdev)
92{
93 if (xdev != NULL) {
94 idev->io_active = true;
95 idev->xdev = xdev;
96 } else {
97 idev->io_active = false;
98 }
99}
100
101static void
102teardown_idev(struct ipc_device *idev)
103{
104 idev->io_active = false;
105}
106
107static void
108init_idevs(struct ipc_server *s)
109{
110 // Copy the devices over into the idevs array.
111 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
112 if (s->xsysd->xdevs[i] == NULL) {
113 continue;
114 }
115
116 init_idev(&s->idevs[i], s->xsysd->xdevs[i]);
117 }
118}
119
120static void
121teardown_idevs(struct ipc_server *s)
122{
123 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
124 teardown_idev(&s->idevs[i]);
125 }
126}
127
128
129/*
130 *
131 * Static functions.
132 *
133 */
134
135XRT_MAYBE_UNUSED static void
136print_linux_end_user_failed_information(enum u_logging_level log_level)
137{
138 struct u_pp_sink_stack_only sink;
139 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
140
141 // Print Newline
142#define PN() u_pp(dg, "\n")
143 // Print Newline, Hash, Space
144#define PNH() u_pp(dg, "\n#")
145 // Print Newline, Hash, Space
146#define PNHS(...) u_pp(dg, "\n# "__VA_ARGS__)
147 // Print Newline, 80 Hashes
148#define PN80H() \
149 do { \
150 PN(); \
151 for (uint32_t i = 0; i < 8; i++) { \
152 u_pp(dg, "##########"); \
153 } \
154 } while (false)
155
156 PN80H();
157 PNHS(" #");
158 PNHS(" The Monado service has failed to start. #");
159 PNHS(" #");
160 PNHS("If you want to report please upload the logs of the service as a text file. #");
161 PNHS("You can also capture the output the monado-cli info command to provide more #");
162 PNHS("information about your system, that will help diagnosing your problem. The #");
163 PNHS("below commands is how you best capture the information from the commands. #");
164 PNHS(" #");
165 PNHS(" monado-cli info 2>&1 | tee info.txt #");
166 PNHS(" monado-service 2>&1 | tee logs.txt #");
167 PNHS(" #");
168 PN80H();
169
170 U_LOG_IFL_I(log_level, "%s", sink.buffer);
171}
172
173XRT_MAYBE_UNUSED static void
174print_linux_end_user_started_information(enum u_logging_level log_level)
175{
176 struct u_pp_sink_stack_only sink;
177 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
178
179
180 PN80H();
181 PNHS(" #");
182 PNHS(" The Monado service has started. #");
183 PNHS(" #");
184 PN80H();
185
186#undef PN
187#undef PNH
188#undef PNHS
189#undef PN80H
190
191 U_LOG_IFL_I(log_level, "%s", sink.buffer);
192}
193
194static void
195teardown_all(struct ipc_server *s)
196{
197 u_var_remove_root(s);
198
199 xrt_syscomp_destroy(&s->xsysc);
200
201 teardown_idevs(s);
202
203 xrt_space_overseer_destroy(&s->xso);
204 xrt_system_devices_destroy(&s->xsysd);
205 xrt_system_destroy(&s->xsys);
206
207 xrt_instance_destroy(&s->xinst);
208
209 ipc_server_mainloop_deinit(&s->ml);
210
211 u_process_destroy(s->process);
212
213 // Destroyed last.
214 os_mutex_destroy(&s->global_state.lock);
215}
216
217static void
218init_tracking_origins(struct ipc_server *s)
219{
220 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
221 struct xrt_device *xdev = s->idevs[i].xdev;
222 if (xdev == NULL) {
223 continue;
224 }
225
226 struct xrt_tracking_origin *xtrack = xdev->tracking_origin;
227 assert(xtrack != NULL);
228 size_t index = 0;
229
230 for (; index < XRT_SYSTEM_MAX_DEVICES; index++) {
231 if (s->xtracks[index] == NULL) {
232 s->xtracks[index] = xtrack;
233 break;
234 }
235 if (s->xtracks[index] == xtrack) {
236 break;
237 }
238 }
239 }
240}
241
242static void
243handle_binding(struct ipc_shared_memory *ism,
244 struct xrt_binding_profile *xbp,
245 struct ipc_shared_binding_profile *isbp,
246 uint32_t *input_pair_index_ptr,
247 uint32_t *output_pair_index_ptr)
248{
249 uint32_t input_pair_index = *input_pair_index_ptr;
250 uint32_t output_pair_index = *output_pair_index_ptr;
251
252 isbp->name = xbp->name;
253
254 // Copy the initial state and also count the number in input_pairs.
255 uint32_t input_pair_start = input_pair_index;
256 for (size_t k = 0; k < xbp->input_count; k++) {
257 ism->input_pairs[input_pair_index++] = xbp->inputs[k];
258 }
259
260 // Setup the 'offsets' and number of input_pairs.
261 if (input_pair_start != input_pair_index) {
262 isbp->input_count = input_pair_index - input_pair_start;
263 isbp->first_input_index = input_pair_start;
264 }
265
266 // Copy the initial state and also count the number in outputs.
267 uint32_t output_pair_start = output_pair_index;
268 for (size_t k = 0; k < xbp->output_count; k++) {
269 ism->output_pairs[output_pair_index++] = xbp->outputs[k];
270 }
271
272 // Setup the 'offsets' and number of output_pairs.
273 if (output_pair_start != output_pair_index) {
274 isbp->output_count = output_pair_index - output_pair_start;
275 isbp->first_output_index = output_pair_start;
276 }
277
278 *input_pair_index_ptr = input_pair_index;
279 *output_pair_index_ptr = output_pair_index;
280}
281
282XRT_CHECK_RESULT static xrt_result_t
283init_shm(struct ipc_server *s, volatile struct ipc_client_state *cs)
284{
285 const size_t size = sizeof(struct ipc_shared_memory);
286 xrt_shmem_handle_t handle;
287
288 xrt_result_t xret = ipc_shmem_create(size, &handle, (void **)&s->isms[cs->server_thread_index]);
289 IPC_CHK_AND_RET(s, xret, "ipc_shmem_create");
290
291 // we have a filehandle, we will pass this to our client
292 cs->ism_handle = handle;
293
294
295 /*
296 *
297 * Setup the shared memory state.
298 *
299 */
300
301 uint32_t count = 0;
302 struct ipc_shared_memory *ism = s->isms[cs->server_thread_index];
303
304 ism->startup_timestamp = os_monotonic_get_ns();
305
306 // Setup the tracking origins.
307 count = 0;
308 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
309 struct xrt_tracking_origin *xtrack = s->xtracks[i];
310 if (xtrack == NULL) {
311 continue;
312 }
313
314 // The position of the tracking origin matches that in the
315 // server's memory.
316 assert(i < XRT_SYSTEM_MAX_DEVICES);
317
318 struct ipc_shared_tracking_origin *itrack = &ism->itracks[count++];
319 memcpy(itrack->name, xtrack->name, sizeof(itrack->name));
320 itrack->type = xtrack->type;
321 itrack->offset = xtrack->initial_offset;
322 }
323
324 ism->itrack_count = count;
325
326 count = 0;
327 uint32_t input_index = 0;
328 uint32_t output_index = 0;
329 uint32_t binding_index = 0;
330 uint32_t input_pair_index = 0;
331 uint32_t output_pair_index = 0;
332
333 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) {
334 struct xrt_device *xdev = s->idevs[i].xdev;
335 if (xdev == NULL) {
336 continue;
337 }
338
339 struct ipc_shared_device *isdev = &ism->isdevs[count++];
340
341 isdev->name = xdev->name;
342 memcpy(isdev->str, xdev->str, sizeof(isdev->str));
343 memcpy(isdev->serial, xdev->serial, sizeof(isdev->serial));
344
345 // Copy information.
346 isdev->device_type = xdev->device_type;
347 isdev->supported = xdev->supported;
348
349 // Setup the tracking origin.
350 isdev->tracking_origin_index = (uint32_t)-1;
351 for (uint32_t k = 0; k < XRT_SYSTEM_MAX_DEVICES; k++) {
352 if (xdev->tracking_origin != s->xtracks[k]) {
353 continue;
354 }
355
356 isdev->tracking_origin_index = k;
357 break;
358 }
359
360 assert(isdev->tracking_origin_index != (uint32_t)-1);
361
362 // Initial update.
363 xrt_device_update_inputs(xdev);
364
365 // Bindings
366 uint32_t binding_start = binding_index;
367 for (size_t k = 0; k < xdev->binding_profile_count; k++) {
368 handle_binding(ism, &xdev->binding_profiles[k], &ism->binding_profiles[binding_index++],
369 &input_pair_index, &output_pair_index);
370 }
371
372 // Setup the 'offsets' and number of bindings.
373 if (binding_start != binding_index) {
374 isdev->binding_profile_count = binding_index - binding_start;
375 isdev->first_binding_profile_index = binding_start;
376 }
377
378 // Copy the initial state and also count the number in inputs.
379 uint32_t input_start = input_index;
380 for (size_t k = 0; k < xdev->input_count; k++) {
381 ism->inputs[input_index++] = xdev->inputs[k];
382 }
383
384 // Setup the 'offsets' and number of inputs.
385 if (input_start != input_index) {
386 isdev->input_count = input_index - input_start;
387 isdev->first_input_index = input_start;
388 }
389
390 // Copy the initial state and also count the number in outputs.
391 uint32_t output_start = output_index;
392 for (size_t k = 0; k < xdev->output_count; k++) {
393 ism->outputs[output_index++] = xdev->outputs[k];
394 }
395
396 // Setup the 'offsets' and number of outputs.
397 if (output_start != output_index) {
398 isdev->output_count = output_index - output_start;
399 isdev->first_output_index = output_start;
400 }
401 }
402
403 // Setup the HMD
404 // set view count
405 assert(s->xsysd->static_roles.head->hmd);
406 ism->hmd.view_count = s->xsysd->static_roles.head->hmd->view_count;
407 for (uint32_t view = 0; view < s->xsysd->static_roles.head->hmd->view_count; ++view) {
408 ism->hmd.views[view].display.w_pixels = s->xsysd->static_roles.head->hmd->views[view].display.w_pixels;
409 ism->hmd.views[view].display.h_pixels = s->xsysd->static_roles.head->hmd->views[view].display.h_pixels;
410 }
411
412 for (size_t i = 0; i < s->xsysd->static_roles.head->hmd->blend_mode_count; i++) {
413 // Not super necessary, we also do this assert in oxr_system.c
414 assert(u_verify_blend_mode_valid(s->xsysd->static_roles.head->hmd->blend_modes[i]));
415 ism->hmd.blend_modes[i] = s->xsysd->static_roles.head->hmd->blend_modes[i];
416 }
417 ism->hmd.blend_mode_count = s->xsysd->static_roles.head->hmd->blend_mode_count;
418
419 // Finally tell the client how many devices we have.
420 ism->isdev_count = count;
421
422 // Assign all of the roles.
423 ism->roles.head = find_xdev_index(s, s->xsysd->static_roles.head);
424 ism->roles.eyes = find_xdev_index(s, s->xsysd->static_roles.eyes);
425 ism->roles.face = find_xdev_index(s, s->xsysd->static_roles.face);
426 ism->roles.body = find_xdev_index(s, s->xsysd->static_roles.body);
427#define SET_HT_ROLE(SRC) \
428 ism->roles.hand_tracking.SRC.left = find_xdev_index(s, s->xsysd->static_roles.hand_tracking.SRC.left); \
429 ism->roles.hand_tracking.SRC.right = find_xdev_index(s, s->xsysd->static_roles.hand_tracking.SRC.right);
430 SET_HT_ROLE(unobstructed)
431 SET_HT_ROLE(conforming)
432#undef SET_HT_ROLE
433
434 // Fill out git version info.
435 snprintf(ism->u_git_tag, IPC_VERSION_NAME_LEN, "%s", u_git_tag);
436
437 return XRT_SUCCESS;
438}
439
440static void
441init_server_state(struct ipc_server *s)
442{
443 // set up initial state for global vars, and each client state
444
445 s->global_state.active_client_index = -1; // we start off with no active client.
446 s->global_state.last_active_client_index = -1;
447 s->global_state.connected_client_count = 0; // No clients connected initially
448 s->current_slot_index = 0;
449
450 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
451 volatile struct ipc_client_state *ics = &s->threads[i].ics;
452 ics->server = s;
453 ics->server_thread_index = -1;
454 }
455}
456
457static xrt_result_t
458init_all(struct ipc_server *s, enum u_logging_level log_level)
459{
460 xrt_result_t xret = XRT_SUCCESS;
461 int ret;
462
463 // First order of business set the log level.
464 s->log_level = log_level;
465
466 // This should never fail.
467 ret = os_mutex_init(&s->global_state.lock);
468 if (ret < 0) {
469 IPC_ERROR(s, "Global state lock mutex failed to init!");
470 // Do not call teardown_all here, os_mutex_destroy will assert.
471 return XRT_ERROR_SYNC_PRIMITIVE_CREATION_FAILED;
472 }
473
474 s->process = u_process_create_if_not_running();
475 if (!s->process) {
476 IPC_ERROR(s, "monado-service is already running! Use XRT_LOG=trace for more information.");
477 xret = XRT_ERROR_IPC_SERVICE_ALREADY_RUNNING;
478 }
479 IPC_CHK_WITH_GOTO(s, xret, "u_process_create_if_not_running", error);
480
481 // Yes we should be running.
482 s->running = true;
483 s->exit_on_disconnect = debug_get_bool_option_exit_on_disconnect();
484 s->exit_when_idle = debug_get_bool_option_exit_when_idle();
485 s->last_client_disconnect_ns = 0;
486 uint64_t delay_ms = debug_get_num_option_exit_when_idle_delay_ms();
487 s->exit_when_idle_delay_ns = delay_ms * U_TIME_1MS_IN_NS;
488
489 xret = xrt_instance_create(NULL, &s->xinst);
490 IPC_CHK_WITH_GOTO(s, xret, "xrt_instance_create", error);
491
492 xret = xrt_instance_create_system(s->xinst, &s->xsys, &s->xsysd, &s->xso, &s->xsysc);
493 IPC_CHK_WITH_GOTO(s, xret, "xrt_instance_create_system", error);
494
495 // Always succeeds.
496 init_idevs(s);
497 init_tracking_origins(s);
498
499 ret = ipc_server_mainloop_init(&s->ml);
500 if (ret < 0) {
501 xret = XRT_ERROR_IPC_MAINLOOP_FAILED_TO_INIT;
502 }
503 IPC_CHK_WITH_GOTO(s, xret, "ipc_server_mainloop_init", error);
504
505 // Never fails, do this second last.
506 init_server_state(s);
507
508 u_var_add_root(s, "IPC Server", false);
509 u_var_add_log_level(s, &s->log_level, "Log level");
510 u_var_add_bool(s, &s->exit_on_disconnect, "exit_on_disconnect");
511 u_var_add_bool(s, &s->exit_when_idle, "exit_when_idle");
512 u_var_add_u64(s, &s->exit_when_idle_delay_ns, "exit_when_idle_delay_ns");
513 u_var_add_bool(s, (bool *)&s->running, "running");
514
515 return XRT_SUCCESS;
516
517error:
518 teardown_all(s);
519
520 return xret;
521}
522
523static int
524main_loop(struct ipc_server *s)
525{
526 while (s->running) {
527 os_nanosleep(U_TIME_1S_IN_NS / 20);
528
529 // Check polling.
530 ipc_server_mainloop_poll(s, &s->ml);
531 }
532
533 return 0;
534}
535
536
537/*
538 *
539 * Client management functions.
540 *
541 */
542
543static void
544handle_overlay_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
545{
546 // Is an overlay session?
547 if (!ics->client_state.session_overlay) {
548 return;
549 }
550
551 // Does this client have a compositor yet, if not return?
552 if (ics->xc == NULL) {
553 return;
554 }
555
556 // Switch between main applications
557 if (active_id >= 0 && prev_active_id >= 0) {
558 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false);
559 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true);
560 }
561
562 // Switch from idle to active application
563 if (active_id >= 0 && prev_active_id < 0) {
564 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true);
565 }
566
567 // Switch from active application to idle
568 if (active_id < 0 && prev_active_id >= 0) {
569 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false);
570 }
571}
572
573static void
574handle_focused_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
575{
576 // Set start z_order at the bottom.
577 int64_t z_order = INT64_MIN;
578
579 // Set visibility/focus to false on all applications.
580 bool focused = false;
581 bool visible = false;
582
583 // Set visible + focused if we are the primary application
584 if (ics->server_thread_index == active_id) {
585 visible = true;
586 focused = true;
587 z_order = INT64_MIN;
588 }
589
590 // Set all overlays to always active and focused.
591 if (ics->client_state.session_overlay) {
592 visible = true;
593 focused = true;
594 z_order = ics->client_state.z_order;
595 }
596
597 ics->client_state.session_visible = visible;
598 ics->client_state.session_focused = focused;
599 ics->client_state.z_order = z_order;
600
601 if (ics->xc != NULL) {
602 xrt_syscomp_set_state(ics->server->xsysc, ics->xc, visible, focused);
603 xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, z_order);
604 }
605}
606
607static void
608flush_state_to_all_clients_locked(struct ipc_server *s)
609{
610 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
611 volatile struct ipc_client_state *ics = &s->threads[i].ics;
612
613 // Not running?
614 if (ics->server_thread_index < 0) {
615 continue;
616 }
617
618 handle_focused_client_events(ics, s->global_state.active_client_index,
619 s->global_state.last_active_client_index);
620 handle_overlay_client_events(ics, s->global_state.active_client_index,
621 s->global_state.last_active_client_index);
622 }
623}
624
625static void
626update_server_state_locked(struct ipc_server *s)
627{
628 // if our client that is set to active is still active,
629 // and it is the same as our last active client, we can
630 // early-out, as no events need to be sent
631
632 if (s->global_state.active_client_index >= 0) {
633
634 volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics;
635
636 if (ics->client_state.session_active &&
637 s->global_state.active_client_index == s->global_state.last_active_client_index) {
638 return;
639 }
640 }
641
642
643 // our active application has changed - this would typically be
644 // switched by the monado-ctl application or other app making a
645 // 'set active application' ipc call, or it could be a
646 // connection loss resulting in us needing to 'fall through' to
647 // the first active application
648 //, or finally to the idle 'wallpaper' images.
649
650
651 bool set_idle = true;
652 int fallback_active_application = -1;
653
654 // do we have a fallback application?
655 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
656 volatile struct ipc_client_state *ics = &s->threads[i].ics;
657 if (ics->client_state.session_overlay == false && ics->server_thread_index >= 0 &&
658 ics->client_state.session_active) {
659 fallback_active_application = i;
660 set_idle = false;
661 }
662 }
663
664 // if there is a currently-set active primary application and it is not
665 // actually active/displayable, use the fallback application
666 // instead.
667 if (s->global_state.active_client_index >= 0) {
668 volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics;
669 if (!(ics->client_state.session_overlay == false && ics->client_state.session_active)) {
670 s->global_state.active_client_index = fallback_active_application;
671 }
672 }
673
674
675 // if we have no applications to fallback to, enable the idle
676 // wallpaper.
677 if (set_idle) {
678 s->global_state.active_client_index = -1;
679 }
680
681 flush_state_to_all_clients_locked(s);
682
683 s->global_state.last_active_client_index = s->global_state.active_client_index;
684}
685
686static volatile struct ipc_client_state *
687find_client_locked(struct ipc_server *s, uint32_t client_id)
688{
689 // Check for invalid IDs.
690 if (client_id == 0 || client_id > INT_MAX) {
691 IPC_WARN(s, "Invalid ID '%u', failing operation.", client_id);
692 return NULL;
693 }
694
695 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
696 volatile struct ipc_client_state *ics = &s->threads[i].ics;
697
698 // Is this the client we are looking for?
699 if (ics->client_state.id != client_id) {
700 continue;
701 }
702
703 // Just in case of state data.
704 if (!xrt_ipc_handle_is_valid(ics->imc.ipc_handle)) {
705 IPC_WARN(s, "Encountered invalid state while searching for client with ID '%d'", client_id);
706 return NULL;
707 }
708
709 return ics;
710 }
711
712 IPC_WARN(s, "No client with ID '%u', failing operation.", client_id);
713
714 return NULL;
715}
716
717static xrt_result_t
718get_client_app_state_locked(struct ipc_server *s, uint32_t client_id, struct ipc_app_state *out_ias)
719{
720 volatile struct ipc_client_state *ics = find_client_locked(s, client_id);
721 if (ics == NULL) {
722 return XRT_ERROR_IPC_FAILURE;
723 }
724
725 struct ipc_app_state ias = ics->client_state;
726 ias.io_active = ics->io_active;
727
728 // @todo: track this data in the ipc_client_state struct
729 ias.primary_application = false;
730
731 // The active client is decided by index, so get that from the ics.
732 int index = ics->server_thread_index;
733
734 if (s->global_state.active_client_index == index) {
735 ias.primary_application = true;
736 }
737
738 *out_ias = ias;
739
740 return XRT_SUCCESS;
741}
742
743static xrt_result_t
744set_active_client_locked(struct ipc_server *s, uint32_t client_id)
745{
746 volatile struct ipc_client_state *ics = find_client_locked(s, client_id);
747 if (ics == NULL) {
748 return XRT_ERROR_IPC_FAILURE;
749 }
750
751 // The active client is decided by index, so get that from the ics.
752 int index = ics->server_thread_index;
753
754 if (index != s->global_state.active_client_index) {
755 s->global_state.active_client_index = index;
756 }
757
758 return XRT_SUCCESS;
759}
760
761static xrt_result_t
762toggle_io_client_locked(struct ipc_server *s, uint32_t client_id)
763{
764 volatile struct ipc_client_state *ics = find_client_locked(s, client_id);
765 if (ics == NULL) {
766 return XRT_ERROR_IPC_FAILURE;
767 }
768
769 ics->io_active = !ics->io_active;
770
771 return XRT_SUCCESS;
772}
773
774
775/*
776 *
777 * Exported functions.
778 *
779 */
780
781xrt_result_t
782ipc_server_get_client_app_state(struct ipc_server *s, uint32_t client_id, struct ipc_app_state *out_ias)
783{
784 os_mutex_lock(&s->global_state.lock);
785 xrt_result_t xret = get_client_app_state_locked(s, client_id, out_ias);
786 os_mutex_unlock(&s->global_state.lock);
787
788 return xret;
789}
790
791xrt_result_t
792ipc_server_set_active_client(struct ipc_server *s, uint32_t client_id)
793{
794 os_mutex_lock(&s->global_state.lock);
795 xrt_result_t xret = set_active_client_locked(s, client_id);
796 os_mutex_unlock(&s->global_state.lock);
797
798 return xret;
799}
800
801xrt_result_t
802ipc_server_toggle_io_client(struct ipc_server *s, uint32_t client_id)
803{
804 os_mutex_lock(&s->global_state.lock);
805 xrt_result_t xret = toggle_io_client_locked(s, client_id);
806 os_mutex_unlock(&s->global_state.lock);
807
808 return xret;
809}
810
811void
812ipc_server_activate_session(volatile struct ipc_client_state *ics)
813{
814 struct ipc_server *s = ics->server;
815
816 // Already active, noop.
817 if (ics->client_state.session_active) {
818 return;
819 }
820
821 assert(ics->server_thread_index >= 0);
822
823 // Multiple threads could call this at the same time.
824 os_mutex_lock(&s->global_state.lock);
825
826 ics->client_state.session_active = true;
827
828 if (ics->client_state.session_overlay) {
829 // For new active overlay sessions only update this session.
830 handle_focused_client_events(ics, s->global_state.active_client_index,
831 s->global_state.last_active_client_index);
832 handle_overlay_client_events(ics, s->global_state.active_client_index,
833 s->global_state.last_active_client_index);
834 } else {
835 // Update active client
836 set_active_client_locked(s, ics->client_state.id);
837
838 // For new active regular sessions update all clients.
839 update_server_state_locked(s);
840 }
841
842 os_mutex_unlock(&s->global_state.lock);
843}
844
845void
846ipc_server_deactivate_session(volatile struct ipc_client_state *ics)
847{
848 struct ipc_server *s = ics->server;
849
850 // Multiple threads could call this at the same time.
851 os_mutex_lock(&s->global_state.lock);
852
853 ics->client_state.session_active = false;
854
855 update_server_state_locked(s);
856
857 os_mutex_unlock(&s->global_state.lock);
858}
859
860void
861ipc_server_update_state(struct ipc_server *s)
862{
863 // Multiple threads could call this at the same time.
864 os_mutex_lock(&s->global_state.lock);
865
866 update_server_state_locked(s);
867
868 os_mutex_unlock(&s->global_state.lock);
869}
870
871void
872ipc_server_handle_failure(struct ipc_server *vs)
873{
874 // Right now handled just the same as a graceful shutdown.
875 vs->running = false;
876}
877
878void
879ipc_server_handle_shutdown_signal(struct ipc_server *vs)
880{
881 vs->running = false;
882}
883
884void
885ipc_server_handle_client_connected(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle)
886{
887 volatile struct ipc_client_state *ics = NULL;
888 int32_t cs_index = -1;
889
890 os_mutex_lock(&vs->global_state.lock);
891
892 // Increment the connected client counter
893 vs->global_state.connected_client_count++;
894
895 // A client connected, so we're no longer in a delayed exit state
896 // (The delay thread will still check the client count before exiting)
897 vs->last_client_disconnect_ns = 0;
898
899 // find the next free thread in our array (server_thread_index is -1)
900 // and have it handle this connection
901 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
902 volatile struct ipc_client_state *_cs = &vs->threads[i].ics;
903 if (_cs->server_thread_index < 0) {
904 ics = _cs;
905 cs_index = i;
906 break;
907 }
908 }
909 if (ics == NULL) {
910 xrt_ipc_handle_close(ipc_handle);
911
912 // Unlock when we are done.
913 os_mutex_unlock(&vs->global_state.lock);
914
915 U_LOG_E("Max client count reached!");
916 return;
917 }
918
919 struct ipc_thread *it = &vs->threads[cs_index];
920 if (it->state != IPC_THREAD_READY && it->state != IPC_THREAD_STOPPING) {
921 // we should not get here
922 xrt_ipc_handle_close(ipc_handle);
923
924 // Unlock when we are done.
925 os_mutex_unlock(&vs->global_state.lock);
926
927 U_LOG_E("Client state management error!");
928 return;
929 }
930
931 if (it->state != IPC_THREAD_READY) {
932 os_thread_join(&it->thread);
933 os_thread_destroy(&it->thread);
934 it->state = IPC_THREAD_READY;
935 }
936
937 it->state = IPC_THREAD_STARTING;
938
939 // Allocate a new ID, avoid zero.
940 //! @todo validate ID.
941 uint32_t id = ++vs->id_generator;
942
943 // Reset everything.
944 U_ZERO((struct ipc_client_state *)ics);
945
946 // Set state.
947 ics->client_state.id = id;
948 ics->imc.ipc_handle = ipc_handle;
949 ics->server = vs;
950 ics->server_thread_index = cs_index;
951 ics->io_active = true;
952
953 ics->plane_detection_size = 0;
954 ics->plane_detection_count = 0;
955 ics->plane_detection_ids = NULL;
956 ics->plane_detection_xdev = NULL;
957
958 xrt_result_t xret = init_shm(vs, ics);
959 if (xret != XRT_SUCCESS) {
960
961 // Unlock when we are done.
962 os_mutex_unlock(&vs->global_state.lock);
963
964 U_LOG_E("Failed to allocate shared memory!");
965 return;
966 }
967
968 os_thread_start(&it->thread, ipc_server_client_thread, (void *)ics);
969
970 // Unlock when we are done.
971 os_mutex_unlock(&vs->global_state.lock);
972}
973
974xrt_result_t
975ipc_server_get_system_properties(struct ipc_server *vs, struct xrt_system_properties *out_properties)
976{
977 memcpy(out_properties, &vs->xsys->properties, sizeof(*out_properties));
978 return XRT_SUCCESS;
979}
980
981int
982ipc_server_main_common(const struct ipc_server_main_info *ismi,
983 const struct ipc_server_callbacks *callbacks,
984 void *data)
985{
986 xrt_result_t xret = XRT_SUCCESS;
987 int ret = -1;
988
989 // Get log level first.
990 enum u_logging_level log_level = debug_get_log_option_ipc_log();
991
992 // Log very early who we are.
993 U_LOG_IFL_I(log_level, "%s '%s' starting up...", u_runtime_description, u_git_tag);
994
995 // Allocate the server itself.
996 struct ipc_server *s = U_TYPED_CALLOC(struct ipc_server);
997
998#ifdef XRT_OS_WINDOWS
999 timeBeginPeriod(1);
1000#endif
1001
1002 /*
1003 * Need to create early before any vars are added. Not created in
1004 * init_all since that function is shared with Android and the debug
1005 * GUI isn't supported on Android.
1006 */
1007 u_debug_gui_create(&ismi->udgci, &s->debug_gui);
1008
1009 xret = init_all(s, log_level);
1010 U_LOG_CHK_ONLY_PRINT(log_level, xret, "init_all");
1011 if (xret != XRT_SUCCESS) {
1012 // Propegate the failure.
1013 callbacks->init_failed(xret, data);
1014 u_debug_gui_stop(&s->debug_gui);
1015 free(s);
1016 return -1;
1017 }
1018
1019 // Start the debug UI now (if enabled).
1020 u_debug_gui_start(s->debug_gui, s->xinst, s->xsysd);
1021
1022 // Tell the callbacks we are entering the main-loop.
1023 callbacks->mainloop_entering(s, s->xinst, data);
1024
1025 // Main loop.
1026 ret = main_loop(s);
1027
1028 // Tell the callbacks we are leaving the main-loop.
1029 callbacks->mainloop_leaving(s, s->xinst, data);
1030
1031 // Stop the UI before tearing everything down.
1032 u_debug_gui_stop(&s->debug_gui);
1033
1034 // Done after UI stopped.
1035 teardown_all(s);
1036 free(s);
1037
1038#ifdef XRT_OS_WINDOWS
1039 timeEndPeriod(1);
1040#endif
1041
1042 U_LOG_IFL_I(log_level, "Server exiting: '%i'", ret);
1043
1044 return ret;
1045}
1046
1047
1048#ifndef XRT_OS_ANDROID
1049
1050static void
1051init_failed(xrt_result_t xret, void *data)
1052{
1053#ifdef XRT_OS_LINUX
1054 // Print information how to debug issues.
1055 print_linux_end_user_failed_information(debug_get_log_option_ipc_log());
1056#endif
1057}
1058
1059static void
1060mainloop_entering(struct ipc_server *s, struct xrt_instance *xinst, void *data)
1061{
1062#ifdef XRT_OS_LINUX
1063 // Print a very clear service started message.
1064 print_linux_end_user_started_information(s->log_level);
1065#endif
1066}
1067
1068static void
1069mainloop_leaving(struct ipc_server *s, struct xrt_instance *xinst, void *data)
1070{
1071 // No-op
1072}
1073
1074int
1075ipc_server_main(int argc, char **argv, const struct ipc_server_main_info *ismi)
1076{
1077 const struct ipc_server_callbacks callbacks = {
1078 .init_failed = init_failed,
1079 .mainloop_entering = mainloop_entering,
1080 .mainloop_leaving = mainloop_leaving,
1081 };
1082
1083 return ipc_server_main_common(ismi, &callbacks, NULL);
1084}
1085
1086#endif // !XRT_OS_ANDROID