The open source OpenXR runtime
at prediction-2 1203 lines 33 kB view raw
1// Copyright 2020-2024, Collabora, Ltd. 2// Copyright 2024-2025, NVIDIA CORPORATION. 3// SPDX-License-Identifier: BSL-1.0 4/*! 5 * @file 6 * @brief Server process functions. 7 * @author Pete Black <pblack@collabora.com> 8 * @author Jakob Bornecrantz <jakob@collabora.com> 9 * @author Rylie Pavlik <rylie.pavlik@collabora.com> 10 * @author Korcan Hussein <korcan.hussein@collabora.com> 11 * @ingroup ipc_server 12 */ 13 14#include "xrt/xrt_device.h" 15#include "xrt/xrt_system.h" 16#include "xrt/xrt_instance.h" 17#include "xrt/xrt_compositor.h" 18#include "xrt/xrt_config_have.h" 19#include "xrt/xrt_config_os.h" 20 21#include "os/os_time.h" 22#include "util/u_var.h" 23#include "util/u_misc.h" 24#include "util/u_debug.h" 25#include "util/u_trace_marker.h" 26#include "util/u_verify.h" 27#include "util/u_process.h" 28#include "util/u_debug_gui.h" 29#include "util/u_pretty_print.h" 30 31#include "util/u_git_tag.h" 32 33#include "shared/ipc_protocol.h" 34#include "shared/ipc_shmem.h" 35#include "server/ipc_server.h" 36#include "server/ipc_server_interface.h" 37 38#include <stdlib.h> 39#include <stdbool.h> 40#include <sys/types.h> 41#include <sys/stat.h> 42#include <fcntl.h> 43#include <errno.h> 44#include <stdio.h> 45#include <string.h> 46#include <assert.h> 47#include <limits.h> 48 49#if defined(XRT_OS_WINDOWS) 50#include <timeapi.h> 51#endif 52 53 54/* 55 * 56 * Defines and helpers. 57 * 58 */ 59 60DEBUG_GET_ONCE_BOOL_OPTION(exit_when_idle, "IPC_EXIT_WHEN_IDLE", false) 61DEBUG_GET_ONCE_NUM_OPTION(exit_when_idle_delay_ms, "IPC_EXIT_WHEN_IDLE_DELAY_MS", 5000) 62DEBUG_GET_ONCE_LOG_OPTION(ipc_log, "IPC_LOG", U_LOGGING_INFO) 63 64/* 65 * "XRT_NO_STDIN" option disables stdin and prevents monado-service from terminating. 66 * This could be useful for situations where there is no proper or in a non-interactive shell. 67 * Two example scenarios are: 68 * * IDE terminals, 69 * * Some scripting environments where monado-service is spawned in the background 70 */ 71DEBUG_GET_ONCE_BOOL_OPTION(no_stdin, "XRT_NO_STDIN", false) 72 73 74/* 75 * 76 * Idev functions. 77 * 78 */ 79 80static int32_t 81find_xdev_index(struct ipc_server *s, struct xrt_device *xdev) 82{ 83 if (xdev == NULL) { 84 return -1; 85 } 86 87 for (int32_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 88 if (s->xsysd->xdevs[i] == xdev) { 89 return i; 90 } 91 } 92 93 IPC_WARN(s, "Could not find index for xdev: '%s'", xdev->str); 94 95 return -1; 96} 97 98static void 99init_idev(struct ipc_device *idev, struct xrt_device *xdev) 100{ 101 if (xdev != NULL) { 102 idev->io_active = true; 103 idev->xdev = xdev; 104 } else { 105 idev->io_active = false; 106 } 107} 108 109static void 110teardown_idev(struct ipc_device *idev) 111{ 112 idev->io_active = false; 113} 114 115static void 116init_idevs(struct ipc_server *s) 117{ 118 // Copy the devices over into the idevs array. 119 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 120 if (s->xsysd->xdevs[i] == NULL) { 121 continue; 122 } 123 124 init_idev(&s->idevs[i], s->xsysd->xdevs[i]); 125 } 126} 127 128static void 129teardown_idevs(struct ipc_server *s) 130{ 131 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 132 teardown_idev(&s->idevs[i]); 133 } 134} 135 136 137/* 138 * 139 * Static functions. 140 * 141 */ 142 143XRT_MAYBE_UNUSED static void 144print_linux_end_user_failed_information(enum u_logging_level log_level) 145{ 146 struct u_pp_sink_stack_only sink; 147 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink); 148 149 // Print Newline 150#define PN() u_pp(dg, "\n") 151 // Print Newline, Hash, Space 152#define PNH() u_pp(dg, "\n#") 153 // Print Newline, Hash, Space 154#define PNHS(...) u_pp(dg, "\n# "__VA_ARGS__) 155 // Print Newline, 80 Hashes 156#define PN80H() \ 157 do { \ 158 PN(); \ 159 for (uint32_t i = 0; i < 8; i++) { \ 160 u_pp(dg, "##########"); \ 161 } \ 162 } while (false) 163 164 PN80H(); 165 PNHS(" #"); 166 PNHS(" The Monado service has failed to start. #"); 167 PNHS(" #"); 168 PNHS("If you want to report please upload the logs of the service as a text file. #"); 169 PNHS("You can also capture the output the monado-cli info command to provide more #"); 170 PNHS("information about your system, that will help diagnosing your problem. The #"); 171 PNHS("below commands is how you best capture the information from the commands. #"); 172 PNHS(" #"); 173 PNHS(" monado-cli info 2>&1 | tee info.txt #"); 174 PNHS(" monado-service 2>&1 | tee logs.txt #"); 175 PNHS(" #"); 176 PN80H(); 177 178 U_LOG_IFL_I(log_level, "%s", sink.buffer); 179} 180 181XRT_MAYBE_UNUSED static void 182print_linux_end_user_started_information(enum u_logging_level log_level) 183{ 184 struct u_pp_sink_stack_only sink; 185 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink); 186 187 188 PN80H(); 189 PNHS(" #"); 190 PNHS(" The Monado service has started. #"); 191 PNHS(" #"); 192 PN80H(); 193 194#undef PN 195#undef PNH 196#undef PNHS 197#undef PN80H 198 199 U_LOG_IFL_I(log_level, "%s", sink.buffer); 200} 201 202static void 203teardown_all(struct ipc_server *s) 204{ 205 u_var_remove_root(s); 206 207 xrt_syscomp_destroy(&s->xsysc); 208 209 teardown_idevs(s); 210 211 xrt_space_overseer_destroy(&s->xso); 212 xrt_system_devices_destroy(&s->xsysd); 213 xrt_system_destroy(&s->xsys); 214 215 xrt_instance_destroy(&s->xinst); 216 217 ipc_server_mainloop_deinit(&s->ml); 218 219 u_process_destroy(s->process); 220 221 // Destroyed last. 222 os_mutex_destroy(&s->global_state.lock); 223} 224 225static void 226init_tracking_origins(struct ipc_server *s) 227{ 228 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 229 struct xrt_device *xdev = s->idevs[i].xdev; 230 if (xdev == NULL) { 231 continue; 232 } 233 234 struct xrt_tracking_origin *xtrack = xdev->tracking_origin; 235 assert(xtrack != NULL); 236 size_t index = 0; 237 238 for (; index < XRT_SYSTEM_MAX_DEVICES; index++) { 239 if (s->xtracks[index] == NULL) { 240 s->xtracks[index] = xtrack; 241 break; 242 } 243 if (s->xtracks[index] == xtrack) { 244 break; 245 } 246 } 247 } 248} 249 250static void 251handle_binding(struct ipc_shared_memory *ism, 252 struct xrt_binding_profile *xbp, 253 struct ipc_shared_binding_profile *isbp, 254 uint32_t *input_pair_index_ptr, 255 uint32_t *output_pair_index_ptr) 256{ 257 uint32_t input_pair_index = *input_pair_index_ptr; 258 uint32_t output_pair_index = *output_pair_index_ptr; 259 260 isbp->name = xbp->name; 261 262 // Copy the initial state and also count the number in input_pairs. 263 uint32_t input_pair_start = input_pair_index; 264 for (size_t k = 0; k < xbp->input_count; k++) { 265 ism->input_pairs[input_pair_index++] = xbp->inputs[k]; 266 } 267 268 // Setup the 'offsets' and number of input_pairs. 269 if (input_pair_start != input_pair_index) { 270 isbp->input_count = input_pair_index - input_pair_start; 271 isbp->first_input_index = input_pair_start; 272 } 273 274 // Copy the initial state and also count the number in outputs. 275 uint32_t output_pair_start = output_pair_index; 276 for (size_t k = 0; k < xbp->output_count; k++) { 277 ism->output_pairs[output_pair_index++] = xbp->outputs[k]; 278 } 279 280 // Setup the 'offsets' and number of output_pairs. 281 if (output_pair_start != output_pair_index) { 282 isbp->output_count = output_pair_index - output_pair_start; 283 isbp->first_output_index = output_pair_start; 284 } 285 286 *input_pair_index_ptr = input_pair_index; 287 *output_pair_index_ptr = output_pair_index; 288} 289 290XRT_CHECK_RESULT static xrt_result_t 291init_shm_and_instance_state(struct ipc_server *s, volatile struct ipc_client_state *ics) 292{ 293 const size_t size = sizeof(struct ipc_shared_memory); 294 xrt_shmem_handle_t handle; 295 296 xrt_result_t xret = ipc_shmem_create(size, &handle, (void **)&s->isms[ics->server_thread_index]); 297 IPC_CHK_AND_RET(s, xret, "ipc_shmem_create"); 298 299 // we have a filehandle, we will pass this to our client 300 ics->ism_handle = handle; 301 302 // Convenience 303 struct ipc_shared_memory *ism = s->isms[ics->server_thread_index]; 304 305 // Clients expect git version info and timestamp available upon connect. 306 snprintf(ism->u_git_tag, IPC_VERSION_NAME_LEN, "%s", u_git_tag); 307 308 // Used to synchronize all client's xrt_instance::startup_timestamp. 309 ism->startup_timestamp = os_monotonic_get_ns(); 310 311 return XRT_SUCCESS; 312} 313 314static void 315init_system_shm_state(struct ipc_server *s, volatile struct ipc_client_state *cs) 316{ 317 /* 318 * 319 * Setup the shared memory state. 320 * 321 */ 322 323 uint32_t count = 0; 324 struct ipc_shared_memory *ism = s->isms[cs->server_thread_index]; 325 326 // Setup the tracking origins. 327 count = 0; 328 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 329 struct xrt_tracking_origin *xtrack = s->xtracks[i]; 330 if (xtrack == NULL) { 331 continue; 332 } 333 334 // The position of the tracking origin matches that in the 335 // server's memory. 336 assert(i < XRT_SYSTEM_MAX_DEVICES); 337 338 struct ipc_shared_tracking_origin *itrack = &ism->itracks[count++]; 339 memcpy(itrack->name, xtrack->name, sizeof(itrack->name)); 340 itrack->type = xtrack->type; 341 itrack->offset = xtrack->initial_offset; 342 } 343 344 ism->itrack_count = count; 345 346 count = 0; 347 uint32_t input_index = 0; 348 uint32_t output_index = 0; 349 uint32_t binding_index = 0; 350 uint32_t input_pair_index = 0; 351 uint32_t output_pair_index = 0; 352 353 for (size_t i = 0; i < XRT_SYSTEM_MAX_DEVICES; i++) { 354 struct xrt_device *xdev = s->idevs[i].xdev; 355 if (xdev == NULL) { 356 continue; 357 } 358 359 struct ipc_shared_device *isdev = &ism->isdevs[count++]; 360 361 isdev->name = xdev->name; 362 memcpy(isdev->str, xdev->str, sizeof(isdev->str)); 363 memcpy(isdev->serial, xdev->serial, sizeof(isdev->serial)); 364 365 // Copy information. 366 isdev->device_type = xdev->device_type; 367 isdev->supported = xdev->supported; 368 369 // Setup the tracking origin. 370 isdev->tracking_origin_index = (uint32_t)-1; 371 for (uint32_t k = 0; k < XRT_SYSTEM_MAX_DEVICES; k++) { 372 if (xdev->tracking_origin != s->xtracks[k]) { 373 continue; 374 } 375 376 isdev->tracking_origin_index = k; 377 break; 378 } 379 380 assert(isdev->tracking_origin_index != (uint32_t)-1); 381 382 // Initial update. 383 xrt_device_update_inputs(xdev); 384 385 // Bindings 386 uint32_t binding_start = binding_index; 387 for (size_t k = 0; k < xdev->binding_profile_count; k++) { 388 handle_binding(ism, &xdev->binding_profiles[k], &ism->binding_profiles[binding_index++], 389 &input_pair_index, &output_pair_index); 390 } 391 392 // Setup the 'offsets' and number of bindings. 393 if (binding_start != binding_index) { 394 isdev->binding_profile_count = binding_index - binding_start; 395 isdev->first_binding_profile_index = binding_start; 396 } 397 398 // Copy the initial state and also count the number in inputs. 399 uint32_t input_start = input_index; 400 for (size_t k = 0; k < xdev->input_count; k++) { 401 ism->inputs[input_index++] = xdev->inputs[k]; 402 } 403 404 // Setup the 'offsets' and number of inputs. 405 if (input_start != input_index) { 406 isdev->input_count = input_index - input_start; 407 isdev->first_input_index = input_start; 408 } 409 410 // Copy the initial state and also count the number in outputs. 411 uint32_t output_start = output_index; 412 for (size_t k = 0; k < xdev->output_count; k++) { 413 ism->outputs[output_index++] = xdev->outputs[k]; 414 } 415 416 // Setup the 'offsets' and number of outputs. 417 if (output_start != output_index) { 418 isdev->output_count = output_index - output_start; 419 isdev->first_output_index = output_start; 420 } 421 } 422 423 // Setup the HMD 424 // set view count 425 assert(s->xsysd->static_roles.head->hmd); 426 ism->hmd.view_count = s->xsysd->static_roles.head->hmd->view_count; 427 for (uint32_t view = 0; view < s->xsysd->static_roles.head->hmd->view_count; ++view) { 428 ism->hmd.views[view].display.w_pixels = s->xsysd->static_roles.head->hmd->views[view].display.w_pixels; 429 ism->hmd.views[view].display.h_pixels = s->xsysd->static_roles.head->hmd->views[view].display.h_pixels; 430 } 431 432 for (size_t i = 0; i < s->xsysd->static_roles.head->hmd->blend_mode_count; i++) { 433 // Not super necessary, we also do this assert in oxr_system.c 434 assert(u_verify_blend_mode_valid(s->xsysd->static_roles.head->hmd->blend_modes[i])); 435 ism->hmd.blend_modes[i] = s->xsysd->static_roles.head->hmd->blend_modes[i]; 436 } 437 ism->hmd.blend_mode_count = s->xsysd->static_roles.head->hmd->blend_mode_count; 438 439 // Finally tell the client how many devices we have. 440 ism->isdev_count = count; 441 442 // Assign all of the roles. 443 ism->roles.head = find_xdev_index(s, s->xsysd->static_roles.head); 444 ism->roles.eyes = find_xdev_index(s, s->xsysd->static_roles.eyes); 445 ism->roles.face = find_xdev_index(s, s->xsysd->static_roles.face); 446 ism->roles.body = find_xdev_index(s, s->xsysd->static_roles.body); 447 448#define SET_HT_ROLE(SRC) \ 449 ism->roles.hand_tracking.SRC.left = find_xdev_index(s, s->xsysd->static_roles.hand_tracking.SRC.left); \ 450 ism->roles.hand_tracking.SRC.right = find_xdev_index(s, s->xsysd->static_roles.hand_tracking.SRC.right); 451 SET_HT_ROLE(unobstructed) 452 SET_HT_ROLE(conforming) 453#undef SET_HT_ROLE 454} 455 456static void 457init_server_state(struct ipc_server *s) 458{ 459 // set up initial state for global vars, and each client state 460 461 s->global_state.active_client_index = -1; // we start off with no active client. 462 s->global_state.last_active_client_index = -1; 463 s->global_state.connected_client_count = 0; // No clients connected initially 464 s->current_slot_index = 0; 465 466 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 467 volatile struct ipc_client_state *ics = &s->threads[i].ics; 468 ics->server = s; 469 ics->server_thread_index = -1; 470 } 471} 472 473static xrt_result_t 474init_all(struct ipc_server *s, 475 enum u_logging_level log_level, 476 const struct ipc_server_callbacks *callbacks, 477 void *callback_data, 478 bool exit_on_disconnect) 479{ 480 xrt_result_t xret = XRT_SUCCESS; 481 int ret; 482 483 // First order of business set the log level. 484 s->log_level = log_level; 485 486 // Store callbacks and data 487 s->callbacks = callbacks; 488 s->callback_data = callback_data; 489 490 // This should never fail. 491 ret = os_mutex_init(&s->global_state.lock); 492 if (ret < 0) { 493 IPC_ERROR(s, "Global state lock mutex failed to init!"); 494 // Do not call teardown_all here, os_mutex_destroy will assert. 495 return XRT_ERROR_SYNC_PRIMITIVE_CREATION_FAILED; 496 } 497 498 s->process = u_process_create_if_not_running(); 499 if (!s->process) { 500 IPC_ERROR(s, "monado-service is already running! Use XRT_LOG=trace for more information."); 501 xret = XRT_ERROR_IPC_SERVICE_ALREADY_RUNNING; 502 } 503 IPC_CHK_WITH_GOTO(s, xret, "u_process_create_if_not_running", error); 504 505 // Yes we should be running. 506 s->running = true; 507 s->exit_on_disconnect = exit_on_disconnect; 508 s->exit_when_idle = debug_get_bool_option_exit_when_idle(); 509 s->last_client_disconnect_ns = 0; 510 uint64_t delay_ms = debug_get_num_option_exit_when_idle_delay_ms(); 511 s->exit_when_idle_delay_ns = delay_ms * U_TIME_1MS_IN_NS; 512 513 xret = xrt_instance_create(NULL, &s->xinst); 514 IPC_CHK_WITH_GOTO(s, xret, "xrt_instance_create", error); 515 516 ret = ipc_server_mainloop_init(&s->ml, s->no_stdin); 517 if (ret < 0) { 518 xret = XRT_ERROR_IPC_MAINLOOP_FAILED_TO_INIT; 519 } 520 IPC_CHK_WITH_GOTO(s, xret, "ipc_server_mainloop_init", error); 521 522 // Never fails, do this second last. 523 init_server_state(s); 524 525 u_var_add_root(s, "IPC Server", false); 526 u_var_add_log_level(s, &s->log_level, "Log level"); 527 u_var_add_bool(s, &s->exit_on_disconnect, "exit_on_disconnect"); 528 u_var_add_bool(s, &s->exit_when_idle, "exit_when_idle"); 529 u_var_add_u64(s, &s->exit_when_idle_delay_ns, "exit_when_idle_delay_ns"); 530 u_var_add_bool(s, (bool *)&s->running, "running"); 531 532 return XRT_SUCCESS; 533 534error: 535 teardown_all(s); 536 537 return xret; 538} 539 540static int 541main_loop(struct ipc_server *s) 542{ 543 while (s->running) { 544 os_nanosleep(U_TIME_1S_IN_NS / 20); 545 546 // Check polling. 547 ipc_server_mainloop_poll(s, &s->ml); 548 } 549 550 return 0; 551} 552 553 554/* 555 * 556 * Client management functions. 557 * 558 */ 559 560static void 561handle_overlay_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id) 562{ 563 // Is an overlay session? 564 if (!ics->client_state.session_overlay) { 565 return; 566 } 567 568 // Does this client have a compositor yet, if not return? 569 if (ics->xc == NULL) { 570 return; 571 } 572 573 // Switch between main applications 574 if (active_id >= 0 && prev_active_id >= 0) { 575 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false); 576 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true); 577 } 578 579 // Switch from idle to active application 580 if (active_id >= 0 && prev_active_id < 0) { 581 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true); 582 } 583 584 // Switch from active application to idle 585 if (active_id < 0 && prev_active_id >= 0) { 586 xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false); 587 } 588} 589 590static void 591handle_focused_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id) 592{ 593 // Set start z_order at the bottom. 594 int64_t z_order = INT64_MIN; 595 596 // Set visibility/focus to false on all applications. 597 bool focused = false; 598 bool visible = false; 599 600 // Set visible + focused if we are the primary application 601 if (ics->server_thread_index == active_id) { 602 visible = true; 603 focused = true; 604 z_order = INT64_MIN; 605 } 606 607 // Set all overlays to always active and focused. 608 if (ics->client_state.session_overlay) { 609 visible = true; 610 focused = true; 611 z_order = ics->client_state.z_order; 612 } 613 614 ics->client_state.session_visible = visible; 615 ics->client_state.session_focused = focused; 616 ics->client_state.z_order = z_order; 617 618 if (ics->xc != NULL) { 619 xrt_syscomp_set_state(ics->server->xsysc, ics->xc, visible, focused); 620 xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, z_order); 621 } 622} 623 624static void 625flush_state_to_all_clients_locked(struct ipc_server *s) 626{ 627 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 628 volatile struct ipc_client_state *ics = &s->threads[i].ics; 629 630 // Not running? 631 if (ics->server_thread_index < 0) { 632 continue; 633 } 634 635 handle_focused_client_events(ics, s->global_state.active_client_index, 636 s->global_state.last_active_client_index); 637 handle_overlay_client_events(ics, s->global_state.active_client_index, 638 s->global_state.last_active_client_index); 639 } 640} 641 642static void 643update_server_state_locked(struct ipc_server *s) 644{ 645 // if our client that is set to active is still active, 646 // and it is the same as our last active client, we can 647 // early-out, as no events need to be sent 648 649 if (s->global_state.active_client_index >= 0) { 650 651 volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics; 652 653 if (ics->client_state.session_active && 654 s->global_state.active_client_index == s->global_state.last_active_client_index) { 655 return; 656 } 657 } 658 659 660 // our active application has changed - this would typically be 661 // switched by the monado-ctl application or other app making a 662 // 'set active application' ipc call, or it could be a 663 // connection loss resulting in us needing to 'fall through' to 664 // the first active application 665 //, or finally to the idle 'wallpaper' images. 666 667 668 bool set_idle = true; 669 int fallback_active_application = -1; 670 671 // do we have a fallback application? 672 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 673 volatile struct ipc_client_state *ics = &s->threads[i].ics; 674 if (ics->client_state.session_overlay == false && ics->server_thread_index >= 0 && 675 ics->client_state.session_active) { 676 fallback_active_application = i; 677 set_idle = false; 678 } 679 } 680 681 // if there is a currently-set active primary application and it is not 682 // actually active/displayable, use the fallback application 683 // instead. 684 if (s->global_state.active_client_index >= 0) { 685 volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics; 686 if (!(ics->client_state.session_overlay == false && ics->client_state.session_active)) { 687 s->global_state.active_client_index = fallback_active_application; 688 } 689 } 690 691 692 // if we have no applications to fallback to, enable the idle 693 // wallpaper. 694 if (set_idle) { 695 s->global_state.active_client_index = -1; 696 } 697 698 flush_state_to_all_clients_locked(s); 699 700 s->global_state.last_active_client_index = s->global_state.active_client_index; 701} 702 703static volatile struct ipc_client_state * 704find_client_locked(struct ipc_server *s, uint32_t client_id) 705{ 706 // Check for invalid IDs. 707 if (client_id == 0 || client_id > INT_MAX) { 708 IPC_WARN(s, "Invalid ID '%u', failing operation.", client_id); 709 return NULL; 710 } 711 712 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 713 volatile struct ipc_client_state *ics = &s->threads[i].ics; 714 715 // Is this the client we are looking for? 716 if (ics->client_state.id != client_id) { 717 continue; 718 } 719 720 // Just in case of state data. 721 if (!xrt_ipc_handle_is_valid(ics->imc.ipc_handle)) { 722 IPC_WARN(s, "Encountered invalid state while searching for client with ID '%d'", client_id); 723 return NULL; 724 } 725 726 return ics; 727 } 728 729 IPC_WARN(s, "No client with ID '%u', failing operation.", client_id); 730 731 return NULL; 732} 733 734static xrt_result_t 735get_client_app_state_locked(struct ipc_server *s, uint32_t client_id, struct ipc_app_state *out_ias) 736{ 737 volatile struct ipc_client_state *ics = find_client_locked(s, client_id); 738 if (ics == NULL) { 739 return XRT_ERROR_IPC_FAILURE; 740 } 741 742 struct ipc_app_state ias = ics->client_state; 743 ias.io_active = ics->io_active; 744 745 // @todo: track this data in the ipc_client_state struct 746 ias.primary_application = false; 747 748 // The active client is decided by index, so get that from the ics. 749 int index = ics->server_thread_index; 750 751 if (s->global_state.active_client_index == index) { 752 ias.primary_application = true; 753 } 754 755 *out_ias = ias; 756 757 return XRT_SUCCESS; 758} 759 760static xrt_result_t 761set_active_client_locked(struct ipc_server *s, uint32_t client_id) 762{ 763 volatile struct ipc_client_state *ics = find_client_locked(s, client_id); 764 if (ics == NULL) { 765 return XRT_ERROR_IPC_FAILURE; 766 } 767 768 // The active client is decided by index, so get that from the ics. 769 int index = ics->server_thread_index; 770 771 if (index != s->global_state.active_client_index) { 772 s->global_state.active_client_index = index; 773 } 774 775 return XRT_SUCCESS; 776} 777 778static xrt_result_t 779toggle_io_client_locked(struct ipc_server *s, uint32_t client_id) 780{ 781 volatile struct ipc_client_state *ics = find_client_locked(s, client_id); 782 if (ics == NULL) { 783 return XRT_ERROR_IPC_FAILURE; 784 } 785 786 ics->io_active = !ics->io_active; 787 788 return XRT_SUCCESS; 789} 790 791static uint32_t 792allocate_id_locked(struct ipc_server *s) 793{ 794 uint32_t id = 0; 795 while (id == 0) { 796 // Allocate a new one. 797 id = ++s->id_generator; 798 799 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 800 volatile struct ipc_client_state *ics = &s->threads[i].ics; 801 802 // If we find the ID, get a new one by setting to zero. 803 if (ics->client_state.id == id) { 804 id = 0; 805 break; 806 } 807 } 808 } 809 810 // Paranoia. 811 if (id == 0) { 812 U_LOG_E("Got app(client) id 0, not allowed!"); 813 assert(id > 0); 814 } 815 816 return id; 817} 818 819 820/* 821 * 822 * Exported functions. 823 * 824 */ 825 826xrt_result_t 827ipc_server_init_system_if_available_locked(struct ipc_server *s, 828 volatile struct ipc_client_state *ics, 829 bool *out_available) 830{ 831 xrt_result_t xret = XRT_SUCCESS; 832 833 bool available = false; 834 835 if (s->xsys) { 836 available = true; 837 } else { 838 xret = xrt_instance_is_system_available(s->xinst, &available); 839 IPC_CHK_WITH_GOTO(s, xret, "xrt_instance_is_system_available", error); 840 841 if (available) { 842 xret = xrt_instance_create_system(s->xinst, &s->xsys, &s->xsysd, &s->xso, &s->xsysc); 843 IPC_CHK_WITH_GOTO(s, xret, "xrt_instance_create_system", error); 844 845 // Always succeeds. 846 init_idevs(s); 847 init_tracking_origins(s); 848 } 849 } 850 851 if (available && ics != NULL && !ics->has_init_shm_system) { 852 init_system_shm_state(s, ics); 853 ics->has_init_shm_system = true; 854 } 855 856 if (out_available) { 857 *out_available = available; 858 } 859 860 return XRT_SUCCESS; 861 862error: 863 return xret; 864} 865 866xrt_result_t 867ipc_server_get_client_app_state(struct ipc_server *s, uint32_t client_id, struct ipc_app_state *out_ias) 868{ 869 os_mutex_lock(&s->global_state.lock); 870 xrt_result_t xret = get_client_app_state_locked(s, client_id, out_ias); 871 os_mutex_unlock(&s->global_state.lock); 872 873 return xret; 874} 875 876xrt_result_t 877ipc_server_set_active_client(struct ipc_server *s, uint32_t client_id) 878{ 879 os_mutex_lock(&s->global_state.lock); 880 xrt_result_t xret = set_active_client_locked(s, client_id); 881 os_mutex_unlock(&s->global_state.lock); 882 883 return xret; 884} 885 886xrt_result_t 887ipc_server_toggle_io_client(struct ipc_server *s, uint32_t client_id) 888{ 889 os_mutex_lock(&s->global_state.lock); 890 xrt_result_t xret = toggle_io_client_locked(s, client_id); 891 os_mutex_unlock(&s->global_state.lock); 892 893 return xret; 894} 895 896void 897ipc_server_activate_session(volatile struct ipc_client_state *ics) 898{ 899 struct ipc_server *s = ics->server; 900 901 // Already active, noop. 902 if (ics->client_state.session_active) { 903 return; 904 } 905 906 assert(ics->server_thread_index >= 0); 907 908 // Multiple threads could call this at the same time. 909 os_mutex_lock(&s->global_state.lock); 910 911 ics->client_state.session_active = true; 912 913 if (ics->client_state.session_overlay) { 914 // For new active overlay sessions only update this session. 915 handle_focused_client_events(ics, s->global_state.active_client_index, 916 s->global_state.last_active_client_index); 917 handle_overlay_client_events(ics, s->global_state.active_client_index, 918 s->global_state.last_active_client_index); 919 } else { 920 // Update active client 921 set_active_client_locked(s, ics->client_state.id); 922 923 // For new active regular sessions update all clients. 924 update_server_state_locked(s); 925 } 926 927 os_mutex_unlock(&s->global_state.lock); 928} 929 930void 931ipc_server_deactivate_session(volatile struct ipc_client_state *ics) 932{ 933 struct ipc_server *s = ics->server; 934 935 // Multiple threads could call this at the same time. 936 os_mutex_lock(&s->global_state.lock); 937 938 ics->client_state.session_active = false; 939 940 update_server_state_locked(s); 941 942 os_mutex_unlock(&s->global_state.lock); 943} 944 945void 946ipc_server_update_state(struct ipc_server *s) 947{ 948 // Multiple threads could call this at the same time. 949 os_mutex_lock(&s->global_state.lock); 950 951 update_server_state_locked(s); 952 953 os_mutex_unlock(&s->global_state.lock); 954} 955 956void 957ipc_server_handle_failure(struct ipc_server *vs) 958{ 959 // Right now handled just the same as a graceful shutdown. 960 vs->running = false; 961} 962 963void 964ipc_server_handle_shutdown_signal(struct ipc_server *vs) 965{ 966 vs->running = false; 967} 968 969void 970ipc_server_handle_client_connected(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle) 971{ 972 volatile struct ipc_client_state *ics = NULL; 973 int32_t cs_index = -1; 974 975 os_mutex_lock(&vs->global_state.lock); 976 977 // Increment the connected client counter 978 vs->global_state.connected_client_count++; 979 980 // A client connected, so we're no longer in a delayed exit state 981 // (The delay thread will still check the client count before exiting) 982 vs->last_client_disconnect_ns = 0; 983 984 // find the next free thread in our array (server_thread_index is -1) 985 // and have it handle this connection 986 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) { 987 volatile struct ipc_client_state *_cs = &vs->threads[i].ics; 988 if (_cs->server_thread_index < 0) { 989 ics = _cs; 990 cs_index = i; 991 break; 992 } 993 } 994 if (ics == NULL) { 995 xrt_ipc_handle_close(ipc_handle); 996 997 // Unlock when we are done. 998 os_mutex_unlock(&vs->global_state.lock); 999 1000 U_LOG_E("Max client count reached!"); 1001 return; 1002 } 1003 1004 struct ipc_thread *it = &vs->threads[cs_index]; 1005 if (it->state != IPC_THREAD_READY && it->state != IPC_THREAD_STOPPING) { 1006 // we should not get here 1007 xrt_ipc_handle_close(ipc_handle); 1008 1009 // Unlock when we are done. 1010 os_mutex_unlock(&vs->global_state.lock); 1011 1012 U_LOG_E("Client state management error!"); 1013 return; 1014 } 1015 1016 if (it->state != IPC_THREAD_READY) { 1017 os_thread_join(&it->thread); 1018 os_thread_destroy(&it->thread); 1019 it->state = IPC_THREAD_READY; 1020 } 1021 1022 it->state = IPC_THREAD_STARTING; 1023 1024 // Allocate a new ID, avoid zero. 1025 uint32_t id = allocate_id_locked(vs); 1026 1027 // Reset everything. 1028 U_ZERO((struct ipc_client_state *)ics); 1029 1030 // Set state. 1031 ics->local_space_overseer_index = UINT32_MAX; 1032 ics->client_state.id = id; 1033 ics->imc.ipc_handle = ipc_handle; 1034 ics->server = vs; 1035 ics->server_thread_index = cs_index; 1036 ics->io_active = true; 1037 1038 ics->plane_detection_size = 0; 1039 ics->plane_detection_count = 0; 1040 ics->plane_detection_ids = NULL; 1041 ics->plane_detection_xdev = NULL; 1042 1043 xrt_result_t xret = init_shm_and_instance_state(vs, ics); 1044 if (xret != XRT_SUCCESS) { 1045 1046 // Unlock when we are done. 1047 os_mutex_unlock(&vs->global_state.lock); 1048 1049 U_LOG_E("Failed to allocate shared memory!"); 1050 return; 1051 } 1052 1053 os_thread_start(&it->thread, ipc_server_client_thread, (void *)ics); 1054 1055 // Unlock when we are done. 1056 os_mutex_unlock(&vs->global_state.lock); 1057} 1058 1059xrt_result_t 1060ipc_server_get_system_properties(struct ipc_server *vs, struct xrt_system_properties *out_properties) 1061{ 1062 memcpy(out_properties, &vs->xsys->properties, sizeof(*out_properties)); 1063 return XRT_SUCCESS; 1064} 1065 1066int 1067ipc_server_main_common(const struct ipc_server_main_info *ismi, 1068 const struct ipc_server_callbacks *callbacks, 1069 void *data) 1070{ 1071 xrt_result_t xret = XRT_SUCCESS; 1072 int ret = -1; 1073 1074 // Get log level first. 1075 enum u_logging_level log_level = debug_get_log_option_ipc_log(); 1076 1077 // Log very early who we are. 1078 U_LOG_IFL_I(log_level, "%s '%s' starting up...", u_runtime_description, u_git_tag); 1079 1080 // Allocate the server itself. 1081 struct ipc_server *s = U_TYPED_CALLOC(struct ipc_server); 1082 1083 // Can be set by either. 1084 s->no_stdin = ismi->no_stdin || debug_get_bool_option_no_stdin(); 1085 1086#ifdef XRT_OS_WINDOWS 1087 timeBeginPeriod(1); 1088#endif 1089 1090 /* 1091 * Need to create early before any vars are added. Not created in 1092 * init_all since that function is shared with Android and the debug 1093 * GUI isn't supported on Android. 1094 */ 1095 u_debug_gui_create(&ismi->udgci, &s->debug_gui); 1096 1097 xret = init_all(s, log_level, callbacks, data, ismi->exit_on_disconnect); 1098 U_LOG_CHK_ONLY_PRINT(log_level, xret, "init_all"); 1099 if (xret != XRT_SUCCESS) { 1100 // Propagate the failure. 1101 callbacks->init_failed(xret, data); 1102 u_debug_gui_stop(&s->debug_gui); 1103 free(s); 1104 return -1; 1105 } 1106 1107 // Start the debug UI now (if enabled). 1108 u_debug_gui_start(s->debug_gui, s->xinst, s->xsysd); 1109 1110 // Tell the callbacks we are entering the main-loop. 1111 callbacks->mainloop_entering(s, s->xinst, data); 1112 1113 // Early init the system. If not available now, will try again per client request. 1114 xret = ipc_server_init_system_if_available_locked( // 1115 s, // 1116 NULL, // optional - ics 1117 NULL); // optional - out_available 1118 if (xret != XRT_SUCCESS) { 1119 U_LOG_CHK_ONLY_PRINT(log_level, xret, "ipc_server_init_system_if_available_locked"); 1120 } 1121 1122 // Main loop. 1123 ret = main_loop(s); 1124 1125 // Tell the callbacks we are leaving the main-loop. 1126 callbacks->mainloop_leaving(s, s->xinst, data); 1127 1128 // Stop the UI before tearing everything down. 1129 u_debug_gui_stop(&s->debug_gui); 1130 1131 // Done after UI stopped. 1132 teardown_all(s); 1133 free(s); 1134 1135#ifdef XRT_OS_WINDOWS 1136 timeEndPeriod(1); 1137#endif 1138 1139 U_LOG_IFL_I(log_level, "Server exiting: '%i'", ret); 1140 1141 return ret; 1142} 1143 1144int 1145ipc_server_stop(struct ipc_server *s) 1146{ 1147 s->running = false; 1148 return 0; 1149} 1150 1151#ifndef XRT_OS_ANDROID 1152 1153static void 1154init_failed(xrt_result_t xret, void *data) 1155{ 1156#ifdef XRT_OS_LINUX 1157 // Print information how to debug issues. 1158 print_linux_end_user_failed_information(debug_get_log_option_ipc_log()); 1159#endif 1160} 1161 1162static void 1163mainloop_entering(struct ipc_server *s, struct xrt_instance *xinst, void *data) 1164{ 1165#ifdef XRT_OS_LINUX 1166 // Print a very clear service started message. 1167 print_linux_end_user_started_information(s->log_level); 1168#endif 1169} 1170 1171static void 1172mainloop_leaving(struct ipc_server *s, struct xrt_instance *xinst, void *data) 1173{ 1174 // No-op 1175} 1176 1177void 1178client_connected(struct ipc_server *s, uint32_t client_id, void *data) 1179{ 1180 IPC_INFO(s, "Client %u connected", client_id); 1181} 1182 1183void 1184client_disconnected(struct ipc_server *s, uint32_t client_id, void *data) 1185{ 1186 IPC_INFO(s, "Client %u disconnected", client_id); 1187} 1188 1189int 1190ipc_server_main(int argc, char **argv, const struct ipc_server_main_info *ismi) 1191{ 1192 const struct ipc_server_callbacks callbacks = { 1193 .init_failed = init_failed, 1194 .mainloop_entering = mainloop_entering, 1195 .mainloop_leaving = mainloop_leaving, 1196 .client_connected = client_connected, 1197 .client_disconnected = client_disconnected, 1198 }; 1199 1200 return ipc_server_main_common(ismi, &callbacks, NULL); 1201} 1202 1203#endif // !XRT_OS_ANDROID