The open source OpenXR runtime

xrt/compositor: use int64_t for timestamps

Part-of: <https://gitlab.freedesktop.org/monado/monado/-/merge_requests/2298>

authored by

Simon Zeni and committed by
Rylie Pavlik
9f6f6c6d f88705a3

+189 -192
+5 -5
src/xrt/compositor/client/comp_d3d11_client.cpp
··· 171 struct client_d3d11_swapchain; 172 173 static inline DWORD 174 - convertTimeoutToWindowsMilliseconds(uint64_t timeout_ns) 175 { 176 - return (timeout_ns == XRT_INFINITE_DURATION) ? INFINITE : (DWORD)(timeout_ns / (uint64_t)U_TIME_1MS_IN_NS); 177 } 178 179 /*! ··· 275 } 276 277 static xrt_result_t 278 - client_d3d11_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 279 { 280 struct client_d3d11_swapchain *sc = as_client_d3d11_swapchain(xsc); 281 ··· 516 static xrt_result_t 517 client_d3d11_compositor_wait_frame(struct xrt_compositor *xc, 518 int64_t *out_frame_id, 519 - uint64_t *predicted_display_time, 520 - uint64_t *predicted_display_period) 521 { 522 struct client_d3d11_compositor *c = as_client_d3d11_compositor(xc); 523
··· 171 struct client_d3d11_swapchain; 172 173 static inline DWORD 174 + convertTimeoutToWindowsMilliseconds(int64_t timeout_ns) 175 { 176 + return (timeout_ns == XRT_INFINITE_DURATION) ? INFINITE : (DWORD)(timeout_ns / (int64_t)U_TIME_1MS_IN_NS); 177 } 178 179 /*! ··· 275 } 276 277 static xrt_result_t 278 + client_d3d11_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 279 { 280 struct client_d3d11_swapchain *sc = as_client_d3d11_swapchain(xsc); 281 ··· 516 static xrt_result_t 517 client_d3d11_compositor_wait_frame(struct xrt_compositor *xc, 518 int64_t *out_frame_id, 519 + int64_t *predicted_display_time, 520 + int64_t *predicted_display_period) 521 { 522 struct client_d3d11_compositor *c = as_client_d3d11_compositor(xc); 523
+5 -5
src/xrt/compositor/client/comp_d3d12_client.cpp
··· 163 struct client_d3d12_swapchain; 164 165 static inline DWORD 166 - convertTimeoutToWindowsMilliseconds(uint64_t timeout_ns) 167 { 168 - return (timeout_ns == XRT_INFINITE_DURATION) ? INFINITE : (DWORD)(timeout_ns / (uint64_t)U_TIME_1MS_IN_NS); 169 } 170 171 static inline bool ··· 377 } 378 379 static xrt_result_t 380 - client_d3d12_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 381 { 382 struct client_d3d12_swapchain *sc = as_client_d3d12_swapchain(xsc); 383 ··· 744 static xrt_result_t 745 client_d3d12_compositor_wait_frame(struct xrt_compositor *xc, 746 int64_t *out_frame_id, 747 - uint64_t *predicted_display_time, 748 - uint64_t *predicted_display_period) 749 { 750 struct client_d3d12_compositor *c = as_client_d3d12_compositor(xc); 751
··· 163 struct client_d3d12_swapchain; 164 165 static inline DWORD 166 + convertTimeoutToWindowsMilliseconds(int64_t timeout_ns) 167 { 168 + return (timeout_ns == XRT_INFINITE_DURATION) ? INFINITE : (DWORD)(timeout_ns / (int64_t)U_TIME_1MS_IN_NS); 169 } 170 171 static inline bool ··· 377 } 378 379 static xrt_result_t 380 + client_d3d12_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 381 { 382 struct client_d3d12_swapchain *sc = as_client_d3d12_swapchain(xsc); 383 ··· 744 static xrt_result_t 745 client_d3d12_compositor_wait_frame(struct xrt_compositor *xc, 746 int64_t *out_frame_id, 747 + int64_t *predicted_display_time, 748 + int64_t *predicted_display_period) 749 { 750 struct client_d3d12_compositor *c = as_client_d3d12_compositor(xc); 751
+3 -3
src/xrt/compositor/client/comp_gl_client.c
··· 152 } 153 154 static xrt_result_t 155 - client_gl_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 156 { 157 // Pipe down call into native swapchain. 158 return xrt_swapchain_wait_image(to_native_swapchain(xsc), timeout_ns, index); ··· 195 static xrt_result_t 196 client_gl_compositor_wait_frame(struct xrt_compositor *xc, 197 int64_t *out_frame_id, 198 - uint64_t *predicted_display_time, 199 - uint64_t *predicted_display_period) 200 { 201 // Pipe down call into native compositor. 202 return xrt_comp_wait_frame( //
··· 152 } 153 154 static xrt_result_t 155 + client_gl_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 156 { 157 // Pipe down call into native swapchain. 158 return xrt_swapchain_wait_image(to_native_swapchain(xsc), timeout_ns, index); ··· 195 static xrt_result_t 196 client_gl_compositor_wait_frame(struct xrt_compositor *xc, 197 int64_t *out_frame_id, 198 + int64_t *predicted_display_time, 199 + int64_t *predicted_display_period) 200 { 201 // Pipe down call into native compositor. 202 return xrt_comp_wait_frame( //
+3 -3
src/xrt/compositor/client/comp_vk_client.c
··· 319 } 320 321 static xrt_result_t 322 - client_vk_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 323 { 324 COMP_TRACE_MARKER(); 325 ··· 438 static xrt_result_t 439 client_vk_compositor_wait_frame(struct xrt_compositor *xc, 440 int64_t *out_frame_id, 441 - uint64_t *predicted_display_time, 442 - uint64_t *predicted_display_period) 443 { 444 COMP_TRACE_MARKER(); 445
··· 319 } 320 321 static xrt_result_t 322 + client_vk_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 323 { 324 COMP_TRACE_MARKER(); 325 ··· 438 static xrt_result_t 439 client_vk_compositor_wait_frame(struct xrt_compositor *xc, 440 int64_t *out_frame_id, 441 + int64_t *predicted_display_time, 442 + int64_t *predicted_display_period) 443 { 444 COMP_TRACE_MARKER(); 445
+10 -10
src/xrt/compositor/main/comp_compositor.c
··· 174 static xrt_result_t 175 compositor_predict_frame(struct xrt_compositor *xc, 176 int64_t *out_frame_id, 177 - uint64_t *out_wake_time_ns, 178 - uint64_t *out_predicted_gpu_time_ns, 179 - uint64_t *out_predicted_display_time_ns, 180 - uint64_t *out_predicted_display_period_ns) 181 { 182 COMP_TRACE_MARKER(); 183 ··· 186 COMP_SPEW(c, "PREDICT_FRAME"); 187 188 // A little bit easier to read. 189 - uint64_t interval_ns = (int64_t)c->settings.nominal_frame_interval_ns; 190 191 comp_target_update_timings(c->target); 192 193 assert(comp_frame_is_invalid_locked(&c->frame.waited)); 194 195 int64_t frame_id = -1; 196 - uint64_t wake_up_time_ns = 0; 197 - uint64_t present_slop_ns = 0; 198 - uint64_t desired_present_time_ns = 0; 199 - uint64_t predicted_display_time_ns = 0; 200 comp_target_calc_frame_pacing( // 201 c->target, // 202 &frame_id, // ··· 223 compositor_mark_frame(struct xrt_compositor *xc, 224 int64_t frame_id, 225 enum xrt_compositor_frame_point point, 226 - uint64_t when_ns) 227 { 228 COMP_TRACE_MARKER(); 229
··· 174 static xrt_result_t 175 compositor_predict_frame(struct xrt_compositor *xc, 176 int64_t *out_frame_id, 177 + int64_t *out_wake_time_ns, 178 + int64_t *out_predicted_gpu_time_ns, 179 + int64_t *out_predicted_display_time_ns, 180 + int64_t *out_predicted_display_period_ns) 181 { 182 COMP_TRACE_MARKER(); 183 ··· 186 COMP_SPEW(c, "PREDICT_FRAME"); 187 188 // A little bit easier to read. 189 + int64_t interval_ns = (int64_t)c->settings.nominal_frame_interval_ns; 190 191 comp_target_update_timings(c->target); 192 193 assert(comp_frame_is_invalid_locked(&c->frame.waited)); 194 195 int64_t frame_id = -1; 196 + int64_t wake_up_time_ns = 0; 197 + int64_t present_slop_ns = 0; 198 + int64_t desired_present_time_ns = 0; 199 + int64_t predicted_display_time_ns = 0; 200 comp_target_calc_frame_pacing( // 201 c->target, // 202 &frame_id, // ··· 223 compositor_mark_frame(struct xrt_compositor *xc, 224 int64_t frame_id, 225 enum xrt_compositor_frame_point point, 226 + int64_t when_ns) 227 { 228 COMP_TRACE_MARKER(); 229
+1 -1
src/xrt/compositor/main/comp_settings.h
··· 84 bool print_modes; 85 86 //! Nominal frame interval 87 - uint64_t nominal_frame_interval_ns; 88 89 //! Vulkan physical device selected by comp_settings_check_vulkan_caps 90 //! may be forced by user
··· 84 bool print_modes; 85 86 //! Nominal frame interval 87 + int64_t nominal_frame_interval_ns; 88 89 //! Vulkan physical device selected by comp_settings_check_vulkan_caps 90 //! may be forced by user
+19 -19
src/xrt/compositor/main/comp_target.h
··· 221 VkQueue queue, 222 uint32_t index, 223 uint64_t timeline_semaphore_value, 224 - uint64_t desired_present_time_ns, 225 - uint64_t present_slop_ns); 226 227 /*! 228 * Flush any WSI state before rendering. ··· 242 */ 243 void (*calc_frame_pacing)(struct comp_target *ct, 244 int64_t *out_frame_id, 245 - uint64_t *out_wake_up_time_ns, 246 - uint64_t *out_desired_present_time_ns, 247 - uint64_t *out_present_slop_ns, 248 - uint64_t *out_predicted_display_time_ns); 249 250 /*! 251 * The compositor tells the target a timing information about a single ··· 254 void (*mark_timing_point)(struct comp_target *ct, 255 enum comp_target_timing_point point, 256 int64_t frame_id, 257 - uint64_t when_ns); 258 259 /*! 260 * Update timing information for this target, this function should be ··· 279 * @see @ref frame-pacing. 280 */ 281 void (*info_gpu)( 282 - struct comp_target *ct, int64_t frame_id, uint64_t gpu_start_ns, uint64_t gpu_end_ns, uint64_t when_ns); 283 284 /* 285 * ··· 393 VkQueue queue, 394 uint32_t index, 395 uint64_t timeline_semaphore_value, 396 - uint64_t desired_present_time_ns, 397 - uint64_t present_slop_ns) 398 399 { 400 COMP_TRACE_MARKER(); ··· 431 static inline void 432 comp_target_calc_frame_pacing(struct comp_target *ct, 433 int64_t *out_frame_id, 434 - uint64_t *out_wake_up_time_ns, 435 - uint64_t *out_desired_present_time_ns, 436 - uint64_t *out_present_slop_ns, 437 - uint64_t *out_predicted_display_time_ns) 438 { 439 COMP_TRACE_MARKER(); 440 ··· 455 * @ingroup comp_main 456 */ 457 static inline void 458 - comp_target_mark_wake_up(struct comp_target *ct, int64_t frame_id, uint64_t when_woke_ns) 459 { 460 COMP_TRACE_MARKER(); 461 ··· 470 * @ingroup comp_main 471 */ 472 static inline void 473 - comp_target_mark_begin(struct comp_target *ct, int64_t frame_id, uint64_t when_began_ns) 474 { 475 COMP_TRACE_MARKER(); 476 ··· 485 * @ingroup comp_main 486 */ 487 static inline void 488 - comp_target_mark_submit_begin(struct comp_target *ct, int64_t frame_id, uint64_t when_submit_began_ns) 489 { 490 COMP_TRACE_MARKER(); 491 ··· 500 * @ingroup comp_main 501 */ 502 static inline void 503 - comp_target_mark_submit_end(struct comp_target *ct, int64_t frame_id, uint64_t when_submit_end_ns) 504 { 505 COMP_TRACE_MARKER(); 506 ··· 529 */ 530 static inline void 531 comp_target_info_gpu( 532 - struct comp_target *ct, int64_t frame_id, uint64_t gpu_start_ns, uint64_t gpu_end_ns, uint64_t when_ns) 533 { 534 COMP_TRACE_MARKER(); 535
··· 221 VkQueue queue, 222 uint32_t index, 223 uint64_t timeline_semaphore_value, 224 + int64_t desired_present_time_ns, 225 + int64_t present_slop_ns); 226 227 /*! 228 * Flush any WSI state before rendering. ··· 242 */ 243 void (*calc_frame_pacing)(struct comp_target *ct, 244 int64_t *out_frame_id, 245 + int64_t *out_wake_up_time_ns, 246 + int64_t *out_desired_present_time_ns, 247 + int64_t *out_present_slop_ns, 248 + int64_t *out_predicted_display_time_ns); 249 250 /*! 251 * The compositor tells the target a timing information about a single ··· 254 void (*mark_timing_point)(struct comp_target *ct, 255 enum comp_target_timing_point point, 256 int64_t frame_id, 257 + int64_t when_ns); 258 259 /*! 260 * Update timing information for this target, this function should be ··· 279 * @see @ref frame-pacing. 280 */ 281 void (*info_gpu)( 282 + struct comp_target *ct, int64_t frame_id, int64_t gpu_start_ns, int64_t gpu_end_ns, int64_t when_ns); 283 284 /* 285 * ··· 393 VkQueue queue, 394 uint32_t index, 395 uint64_t timeline_semaphore_value, 396 + int64_t desired_present_time_ns, 397 + int64_t present_slop_ns) 398 399 { 400 COMP_TRACE_MARKER(); ··· 431 static inline void 432 comp_target_calc_frame_pacing(struct comp_target *ct, 433 int64_t *out_frame_id, 434 + int64_t *out_wake_up_time_ns, 435 + int64_t *out_desired_present_time_ns, 436 + int64_t *out_present_slop_ns, 437 + int64_t *out_predicted_display_time_ns) 438 { 439 COMP_TRACE_MARKER(); 440 ··· 455 * @ingroup comp_main 456 */ 457 static inline void 458 + comp_target_mark_wake_up(struct comp_target *ct, int64_t frame_id, int64_t when_woke_ns) 459 { 460 COMP_TRACE_MARKER(); 461 ··· 470 * @ingroup comp_main 471 */ 472 static inline void 473 + comp_target_mark_begin(struct comp_target *ct, int64_t frame_id, int64_t when_began_ns) 474 { 475 COMP_TRACE_MARKER(); 476 ··· 485 * @ingroup comp_main 486 */ 487 static inline void 488 + comp_target_mark_submit_begin(struct comp_target *ct, int64_t frame_id, int64_t when_submit_began_ns) 489 { 490 COMP_TRACE_MARKER(); 491 ··· 500 * @ingroup comp_main 501 */ 502 static inline void 503 + comp_target_mark_submit_end(struct comp_target *ct, int64_t frame_id, int64_t when_submit_end_ns) 504 { 505 COMP_TRACE_MARKER(); 506 ··· 529 */ 530 static inline void 531 comp_target_info_gpu( 532 + struct comp_target *ct, int64_t frame_id, int64_t gpu_start_ns, int64_t gpu_end_ns, int64_t when_ns) 533 { 534 COMP_TRACE_MARKER(); 535
+21 -21
src/xrt/compositor/main/comp_target_swapchain.c
··· 329 cts->swapchain.handle, // 330 &count, // 331 timings); // 332 - uint64_t now_ns = os_monotonic_get_ns(); 333 for (uint32_t i = 0; i < count; i++) { 334 u_pc_info(cts->upc, // 335 timings[i].presentID, // ··· 350 return; 351 } 352 353 - uint64_t last_vblank_ns; 354 355 os_thread_helper_lock(&cts->vblank.event_thread); 356 last_vblank_ns = cts->vblank.last_vblank_ns; ··· 416 } 417 418 static bool 419 - vblank_event_func(struct comp_target *ct, uint64_t *out_timestamp_ns) 420 { 421 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 422 ··· 449 ret = vk->vkWaitForFences(vk->device, 1, &vblank_event_fence, true, time_s_to_ns(1)); 450 451 // As quickly as possible after the fence has fired. 452 - uint64_t now_ns = os_monotonic_get_ns(); 453 454 bool valid = false; 455 if (ret == VK_SUCCESS) { ··· 520 // Unlock while waiting. 521 os_thread_helper_unlock(&cts->vblank.event_thread); 522 523 - uint64_t when_ns = 0; 524 bool valid = vblank_event_func(ct, &when_ns); 525 526 // Just keep swimming. ··· 619 VkBool32 supported; 620 VkResult ret; 621 622 - uint64_t now_ns = os_monotonic_get_ns(); 623 // Some platforms really don't like the pacing_compositor code. 624 bool use_display_timing_if_available = cts->timing_usage == COMP_TARGET_USE_DISPLAY_IF_AVAILABLE; 625 if (cts->upc == NULL && use_display_timing_if_available && vk->has_GOOGLE_display_timing) { ··· 846 VkQueue queue, 847 uint32_t index, 848 uint64_t timeline_semaphore_value, 849 - uint64_t desired_present_time_ns, 850 - uint64_t present_slop_ns) 851 { 852 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 853 struct vk_bundle *vk = get_vk(cts); ··· 914 static void 915 comp_target_swapchain_calc_frame_pacing(struct comp_target *ct, 916 int64_t *out_frame_id, 917 - uint64_t *out_wake_up_time_ns, 918 - uint64_t *out_desired_present_time_ns, 919 - uint64_t *out_present_slop_ns, 920 - uint64_t *out_predicted_display_time_ns) 921 { 922 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 923 924 int64_t frame_id = -1; 925 - uint64_t wake_up_time_ns = 0; 926 - uint64_t desired_present_time_ns = 0; 927 - uint64_t present_slop_ns = 0; 928 - uint64_t predicted_display_time_ns = 0; 929 - uint64_t predicted_display_period_ns = 0; 930 - uint64_t min_display_period_ns = 0; 931 - uint64_t now_ns = os_monotonic_get_ns(); 932 933 u_pc_predict(cts->upc, // 934 now_ns, // ··· 953 comp_target_swapchain_mark_timing_point(struct comp_target *ct, 954 enum comp_target_timing_point point, 955 int64_t frame_id, 956 - uint64_t when_ns) 957 { 958 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 959 assert(frame_id == cts->current_frame_id); ··· 990 991 static void 992 comp_target_swapchain_info_gpu( 993 - struct comp_target *ct, int64_t frame_id, uint64_t gpu_start_ns, uint64_t gpu_end_ns, uint64_t when_ns) 994 { 995 COMP_TRACE_MARKER(); 996
··· 329 cts->swapchain.handle, // 330 &count, // 331 timings); // 332 + int64_t now_ns = os_monotonic_get_ns(); 333 for (uint32_t i = 0; i < count; i++) { 334 u_pc_info(cts->upc, // 335 timings[i].presentID, // ··· 350 return; 351 } 352 353 + int64_t last_vblank_ns; 354 355 os_thread_helper_lock(&cts->vblank.event_thread); 356 last_vblank_ns = cts->vblank.last_vblank_ns; ··· 416 } 417 418 static bool 419 + vblank_event_func(struct comp_target *ct, int64_t *out_timestamp_ns) 420 { 421 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 422 ··· 449 ret = vk->vkWaitForFences(vk->device, 1, &vblank_event_fence, true, time_s_to_ns(1)); 450 451 // As quickly as possible after the fence has fired. 452 + int64_t now_ns = os_monotonic_get_ns(); 453 454 bool valid = false; 455 if (ret == VK_SUCCESS) { ··· 520 // Unlock while waiting. 521 os_thread_helper_unlock(&cts->vblank.event_thread); 522 523 + int64_t when_ns = 0; 524 bool valid = vblank_event_func(ct, &when_ns); 525 526 // Just keep swimming. ··· 619 VkBool32 supported; 620 VkResult ret; 621 622 + int64_t now_ns = os_monotonic_get_ns(); 623 // Some platforms really don't like the pacing_compositor code. 624 bool use_display_timing_if_available = cts->timing_usage == COMP_TARGET_USE_DISPLAY_IF_AVAILABLE; 625 if (cts->upc == NULL && use_display_timing_if_available && vk->has_GOOGLE_display_timing) { ··· 846 VkQueue queue, 847 uint32_t index, 848 uint64_t timeline_semaphore_value, 849 + int64_t desired_present_time_ns, 850 + int64_t present_slop_ns) 851 { 852 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 853 struct vk_bundle *vk = get_vk(cts); ··· 914 static void 915 comp_target_swapchain_calc_frame_pacing(struct comp_target *ct, 916 int64_t *out_frame_id, 917 + int64_t *out_wake_up_time_ns, 918 + int64_t *out_desired_present_time_ns, 919 + int64_t *out_present_slop_ns, 920 + int64_t *out_predicted_display_time_ns) 921 { 922 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 923 924 int64_t frame_id = -1; 925 + int64_t wake_up_time_ns = 0; 926 + int64_t desired_present_time_ns = 0; 927 + int64_t present_slop_ns = 0; 928 + int64_t predicted_display_time_ns = 0; 929 + int64_t predicted_display_period_ns = 0; 930 + int64_t min_display_period_ns = 0; 931 + int64_t now_ns = os_monotonic_get_ns(); 932 933 u_pc_predict(cts->upc, // 934 now_ns, // ··· 953 comp_target_swapchain_mark_timing_point(struct comp_target *ct, 954 enum comp_target_timing_point point, 955 int64_t frame_id, 956 + int64_t when_ns) 957 { 958 struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; 959 assert(frame_id == cts->current_frame_id); ··· 990 991 static void 992 comp_target_swapchain_info_gpu( 993 + struct comp_target *ct, int64_t frame_id, int64_t gpu_start_ns, int64_t gpu_end_ns, int64_t when_ns) 994 { 995 COMP_TRACE_MARKER(); 996
+1 -1
src/xrt/compositor/mock/mock_compositor.cpp
··· 28 } 29 30 static xrt_result_t 31 - mock_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 32 { 33 struct mock_compositor_swapchain *mcsc = mock_compositor_swapchain(xsc); 34 struct mock_compositor *mc = mcsc->mc;
··· 28 } 29 30 static xrt_result_t 31 + mock_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 32 { 33 struct mock_compositor_swapchain *mcsc = mock_compositor_swapchain(xsc); 34 struct mock_compositor *mc = mcsc->mc;
+25 -25
src/xrt/compositor/multi/comp_multi_compositor.c
··· 49 slot_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *slot) 50 { 51 if (slot->active) { 52 - uint64_t now_ns = os_monotonic_get_ns(); 53 u_pa_retired(mc->upa, slot->data.frame_id, now_ns); 54 } 55 ··· 125 xrt_result_t ret = XRT_SUCCESS; 126 127 // 100ms 128 - uint64_t timeout_ns = 100 * U_TIME_1MS_IN_NS; 129 130 do { 131 ret = xrt_compositor_fence_wait(*xcf_ptr, timeout_ns); ··· 150 xrt_result_t ret = XRT_SUCCESS; 151 152 // 100ms 153 - uint64_t timeout_ns = 100 * U_TIME_1MS_IN_NS; 154 155 do { 156 ret = xrt_compositor_semaphore_wait(*xcsem_ptr, value, timeout_ns); ··· 291 } 292 293 // Sample time outside of lock. 294 - uint64_t now_ns = os_monotonic_get_ns(); 295 296 os_mutex_lock(&mc->msc->list_and_timing_lock); 297 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns); ··· 496 static xrt_result_t 497 multi_compositor_predict_frame(struct xrt_compositor *xc, 498 int64_t *out_frame_id, 499 - uint64_t *out_wake_time_ns, 500 - uint64_t *out_predicted_gpu_time_ns, 501 - uint64_t *out_predicted_display_time_ns, 502 - uint64_t *out_predicted_display_period_ns) 503 { 504 COMP_TRACE_MARKER(); 505 506 struct multi_compositor *mc = multi_compositor(xc); 507 - uint64_t now_ns = os_monotonic_get_ns(); 508 os_mutex_lock(&mc->msc->list_and_timing_lock); 509 510 u_pa_predict( // ··· 526 multi_compositor_mark_frame(struct xrt_compositor *xc, 527 int64_t frame_id, 528 enum xrt_compositor_frame_point point, 529 - uint64_t when_ns) 530 { 531 COMP_TRACE_MARKER(); 532 533 struct multi_compositor *mc = multi_compositor(xc); 534 535 - uint64_t now_ns = os_monotonic_get_ns(); 536 537 switch (point) { 538 case XRT_COMPOSITOR_FRAME_POINT_WOKE: ··· 549 static xrt_result_t 550 multi_compositor_wait_frame(struct xrt_compositor *xc, 551 int64_t *out_frame_id, 552 - uint64_t *out_predicted_display_time_ns, 553 - uint64_t *out_predicted_display_period_ns) 554 { 555 COMP_TRACE_MARKER(); 556 557 struct multi_compositor *mc = multi_compositor(xc); 558 559 int64_t frame_id = -1; 560 - uint64_t wake_up_time_ns = 0; 561 - uint64_t predicted_gpu_time_ns = 0; 562 563 xrt_comp_predict_frame( // 564 xc, // ··· 571 // Wait until the given wake up time. 572 u_wait_until(&mc->frame_sleeper, wake_up_time_ns); 573 574 - uint64_t now_ns = os_monotonic_get_ns(); 575 576 // Signal that we woke up. 577 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns); ··· 589 struct multi_compositor *mc = multi_compositor(xc); 590 591 os_mutex_lock(&mc->msc->list_and_timing_lock); 592 - uint64_t now_ns = os_monotonic_get_ns(); 593 u_pa_mark_point(mc->upa, frame_id, U_TIMING_POINT_BEGIN, now_ns); 594 os_mutex_unlock(&mc->msc->list_and_timing_lock); 595 ··· 602 COMP_TRACE_MARKER(); 603 604 struct multi_compositor *mc = multi_compositor(xc); 605 - uint64_t now_ns = os_monotonic_get_ns(); 606 607 os_mutex_lock(&mc->msc->list_and_timing_lock); 608 u_pa_mark_discarded(mc->upa, frame_id, now_ns); ··· 617 struct multi_compositor *mc = multi_compositor(xc); 618 619 // As early as possible. 620 - uint64_t now_ns = os_monotonic_get_ns(); 621 os_mutex_lock(&mc->msc->list_and_timing_lock); 622 u_pa_mark_delivered(mc->upa, data->frame_id, now_ns, data->display_time_ns); 623 os_mutex_unlock(&mc->msc->list_and_timing_lock); ··· 797 push_fence_to_wait_thread(mc, frame_id, xcf); 798 } else { 799 // Assume that the app side compositor waited. 800 - uint64_t now_ns = os_monotonic_get_ns(); 801 802 os_mutex_lock(&mc->msc->list_and_timing_lock); 803 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns); ··· 898 } 899 900 static void 901 - log_frame_time_diff(uint64_t frame_time_ns, uint64_t display_time_ns) 902 { 903 int64_t diff_ns = (int64_t)frame_time_ns - (int64_t)display_time_ns; 904 bool late = false; ··· 911 } 912 913 void 914 - multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns) 915 { 916 os_mutex_lock(&mc->slot_lock); 917 ··· 923 if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.data.display_time_ns)) { 924 slot_move_and_clear_locked(mc, &mc->delivered, &mc->scheduled); 925 926 - uint64_t frame_time_ns = mc->delivered.data.display_time_ns; 927 if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) { 928 log_frame_time_diff(frame_time_ns, display_time_ns); 929 } ··· 933 } 934 935 void 936 - multi_compositor_latch_frame_locked(struct multi_compositor *mc, uint64_t when_ns, int64_t system_frame_id) 937 { 938 u_pa_latched(mc->upa, mc->delivered.data.frame_id, when_ns, system_frame_id); 939 } 940 941 void 942 - multi_compositor_retire_delivered_locked(struct multi_compositor *mc, uint64_t when_ns) 943 { 944 slot_clear_locked(mc, &mc->delivered); 945 }
··· 49 slot_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *slot) 50 { 51 if (slot->active) { 52 + int64_t now_ns = os_monotonic_get_ns(); 53 u_pa_retired(mc->upa, slot->data.frame_id, now_ns); 54 } 55 ··· 125 xrt_result_t ret = XRT_SUCCESS; 126 127 // 100ms 128 + int64_t timeout_ns = 100 * U_TIME_1MS_IN_NS; 129 130 do { 131 ret = xrt_compositor_fence_wait(*xcf_ptr, timeout_ns); ··· 150 xrt_result_t ret = XRT_SUCCESS; 151 152 // 100ms 153 + int64_t timeout_ns = 100 * U_TIME_1MS_IN_NS; 154 155 do { 156 ret = xrt_compositor_semaphore_wait(*xcsem_ptr, value, timeout_ns); ··· 291 } 292 293 // Sample time outside of lock. 294 + int64_t now_ns = os_monotonic_get_ns(); 295 296 os_mutex_lock(&mc->msc->list_and_timing_lock); 297 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns); ··· 496 static xrt_result_t 497 multi_compositor_predict_frame(struct xrt_compositor *xc, 498 int64_t *out_frame_id, 499 + int64_t *out_wake_time_ns, 500 + int64_t *out_predicted_gpu_time_ns, 501 + int64_t *out_predicted_display_time_ns, 502 + int64_t *out_predicted_display_period_ns) 503 { 504 COMP_TRACE_MARKER(); 505 506 struct multi_compositor *mc = multi_compositor(xc); 507 + int64_t now_ns = os_monotonic_get_ns(); 508 os_mutex_lock(&mc->msc->list_and_timing_lock); 509 510 u_pa_predict( // ··· 526 multi_compositor_mark_frame(struct xrt_compositor *xc, 527 int64_t frame_id, 528 enum xrt_compositor_frame_point point, 529 + int64_t when_ns) 530 { 531 COMP_TRACE_MARKER(); 532 533 struct multi_compositor *mc = multi_compositor(xc); 534 535 + int64_t now_ns = os_monotonic_get_ns(); 536 537 switch (point) { 538 case XRT_COMPOSITOR_FRAME_POINT_WOKE: ··· 549 static xrt_result_t 550 multi_compositor_wait_frame(struct xrt_compositor *xc, 551 int64_t *out_frame_id, 552 + int64_t *out_predicted_display_time_ns, 553 + int64_t *out_predicted_display_period_ns) 554 { 555 COMP_TRACE_MARKER(); 556 557 struct multi_compositor *mc = multi_compositor(xc); 558 559 int64_t frame_id = -1; 560 + int64_t wake_up_time_ns = 0; 561 + int64_t predicted_gpu_time_ns = 0; 562 563 xrt_comp_predict_frame( // 564 xc, // ··· 571 // Wait until the given wake up time. 572 u_wait_until(&mc->frame_sleeper, wake_up_time_ns); 573 574 + int64_t now_ns = os_monotonic_get_ns(); 575 576 // Signal that we woke up. 577 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns); ··· 589 struct multi_compositor *mc = multi_compositor(xc); 590 591 os_mutex_lock(&mc->msc->list_and_timing_lock); 592 + int64_t now_ns = os_monotonic_get_ns(); 593 u_pa_mark_point(mc->upa, frame_id, U_TIMING_POINT_BEGIN, now_ns); 594 os_mutex_unlock(&mc->msc->list_and_timing_lock); 595 ··· 602 COMP_TRACE_MARKER(); 603 604 struct multi_compositor *mc = multi_compositor(xc); 605 + int64_t now_ns = os_monotonic_get_ns(); 606 607 os_mutex_lock(&mc->msc->list_and_timing_lock); 608 u_pa_mark_discarded(mc->upa, frame_id, now_ns); ··· 617 struct multi_compositor *mc = multi_compositor(xc); 618 619 // As early as possible. 620 + int64_t now_ns = os_monotonic_get_ns(); 621 os_mutex_lock(&mc->msc->list_and_timing_lock); 622 u_pa_mark_delivered(mc->upa, data->frame_id, now_ns, data->display_time_ns); 623 os_mutex_unlock(&mc->msc->list_and_timing_lock); ··· 797 push_fence_to_wait_thread(mc, frame_id, xcf); 798 } else { 799 // Assume that the app side compositor waited. 800 + int64_t now_ns = os_monotonic_get_ns(); 801 802 os_mutex_lock(&mc->msc->list_and_timing_lock); 803 u_pa_mark_gpu_done(mc->upa, frame_id, now_ns); ··· 898 } 899 900 static void 901 + log_frame_time_diff(int64_t frame_time_ns, int64_t display_time_ns) 902 { 903 int64_t diff_ns = (int64_t)frame_time_ns - (int64_t)display_time_ns; 904 bool late = false; ··· 911 } 912 913 void 914 + multi_compositor_deliver_any_frames(struct multi_compositor *mc, int64_t display_time_ns) 915 { 916 os_mutex_lock(&mc->slot_lock); 917 ··· 923 if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.data.display_time_ns)) { 924 slot_move_and_clear_locked(mc, &mc->delivered, &mc->scheduled); 925 926 + int64_t frame_time_ns = mc->delivered.data.display_time_ns; 927 if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) { 928 log_frame_time_diff(frame_time_ns, display_time_ns); 929 } ··· 933 } 934 935 void 936 + multi_compositor_latch_frame_locked(struct multi_compositor *mc, int64_t when_ns, int64_t system_frame_id) 937 { 938 u_pa_latched(mc->upa, mc->delivered.data.frame_id, when_ns, system_frame_id); 939 } 940 941 void 942 + multi_compositor_retire_delivered_locked(struct multi_compositor *mc, int64_t when_ns) 943 { 944 slot_clear_locked(mc, &mc->delivered); 945 }
+7 -7
src/xrt/compositor/multi/comp_multi_private.h
··· 165 /*! 166 * The next which the next frames to be picked up will be displayed. 167 */ 168 - uint64_t slot_next_frame_display; 169 170 /*! 171 * Currently being transferred or waited on. ··· 225 * @private @memberof multi_compositor 226 */ 227 void 228 - multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns); 229 230 /*! 231 * Makes the current delivered frame as latched, called by the render thread. ··· 235 * @private @memberof multi_compositor 236 */ 237 void 238 - multi_compositor_latch_frame_locked(struct multi_compositor *mc, uint64_t when_ns, int64_t system_frame_id); 239 240 /*! 241 * Clears and retires the delivered frame, called by the render thread. ··· 245 * @private @memberof multi_compositor 246 */ 247 void 248 - multi_compositor_retire_delivered_locked(struct multi_compositor *mc, uint64_t when_ns); 249 250 251 /* ··· 358 359 struct 360 { 361 - uint64_t predicted_display_time_ns; 362 - uint64_t predicted_display_period_ns; 363 - uint64_t diff_ns; 364 } last_timings; 365 366 //! List of active clients.
··· 165 /*! 166 * The next which the next frames to be picked up will be displayed. 167 */ 168 + int64_t slot_next_frame_display; 169 170 /*! 171 * Currently being transferred or waited on. ··· 225 * @private @memberof multi_compositor 226 */ 227 void 228 + multi_compositor_deliver_any_frames(struct multi_compositor *mc, int64_t display_time_ns); 229 230 /*! 231 * Makes the current delivered frame as latched, called by the render thread. ··· 235 * @private @memberof multi_compositor 236 */ 237 void 238 + multi_compositor_latch_frame_locked(struct multi_compositor *mc, int64_t when_ns, int64_t system_frame_id); 239 240 /*! 241 * Clears and retires the delivered frame, called by the render thread. ··· 245 * @private @memberof multi_compositor 246 */ 247 void 248 + multi_compositor_retire_delivered_locked(struct multi_compositor *mc, int64_t when_ns); 249 250 251 /* ··· 358 359 struct 360 { 361 + int64_t predicted_display_time_ns; 362 + int64_t predicted_display_period_ns; 363 + int64_t diff_ns; 364 } last_timings; 365 366 //! List of active clients.
+15 -15
src/xrt/compositor/multi/comp_multi_system.c
··· 251 } 252 253 static void 254 - transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_time_ns, int64_t system_frame_id) 255 { 256 COMP_TRACE_MARKER(); 257 ··· 260 struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0}; 261 262 // To mark latching. 263 - uint64_t now_ns = os_monotonic_get_ns(); 264 265 size_t count = 0; 266 for (size_t k = 0; k < ARRAY_SIZE(array); k++) { ··· 337 } 338 339 static void 340 - broadcast_timings_to_clients(struct multi_system_compositor *msc, uint64_t predicted_display_time_ns) 341 { 342 COMP_TRACE_MARKER(); 343 ··· 359 360 static void 361 broadcast_timings_to_pacers(struct multi_system_compositor *msc, 362 - uint64_t predicted_display_time_ns, 363 - uint64_t predicted_display_period_ns, 364 - uint64_t diff_ns) 365 { 366 COMP_TRACE_MARKER(); 367 ··· 392 } 393 394 static void 395 - wait_frame(struct os_precise_sleeper *sleeper, struct xrt_compositor *xc, int64_t frame_id, uint64_t wake_up_time_ns) 396 { 397 COMP_TRACE_MARKER(); 398 399 // Wait until the given wake up time. 400 u_wait_until(sleeper, wake_up_time_ns); 401 402 - uint64_t now_ns = os_monotonic_get_ns(); 403 404 // Signal that we woke up. 405 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns); ··· 507 os_thread_helper_unlock(&msc->oth); 508 509 int64_t frame_id = -1; 510 - uint64_t wake_up_time_ns = 0; 511 - uint64_t predicted_gpu_time_ns = 0; 512 - uint64_t predicted_display_time_ns = 0; 513 - uint64_t predicted_display_period_ns = 0; 514 515 // Get the information for the next frame. 516 xrt_comp_predict_frame( // ··· 527 // Now we can wait. 528 wait_frame(&sleeper, xc, frame_id, wake_up_time_ns); 529 530 - uint64_t now_ns = os_monotonic_get_ns(); 531 - uint64_t diff_ns = predicted_display_time_ns - now_ns; 532 533 // Now we know the diff, broadcast to pacers. 534 broadcast_timings_to_pacers(msc, predicted_display_time_ns, predicted_display_period_ns, diff_ns); ··· 630 static xrt_result_t 631 system_compositor_notify_loss_pending(struct xrt_system_compositor *xsc, 632 struct xrt_compositor *xc, 633 - uint64_t loss_time_ns) 634 { 635 struct multi_system_compositor *msc = multi_system_compositor(xsc); 636 struct multi_compositor *mc = multi_compositor(xc);
··· 251 } 252 253 static void 254 + transfer_layers_locked(struct multi_system_compositor *msc, int64_t display_time_ns, int64_t system_frame_id) 255 { 256 COMP_TRACE_MARKER(); 257 ··· 260 struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0}; 261 262 // To mark latching. 263 + int64_t now_ns = os_monotonic_get_ns(); 264 265 size_t count = 0; 266 for (size_t k = 0; k < ARRAY_SIZE(array); k++) { ··· 337 } 338 339 static void 340 + broadcast_timings_to_clients(struct multi_system_compositor *msc, int64_t predicted_display_time_ns) 341 { 342 COMP_TRACE_MARKER(); 343 ··· 359 360 static void 361 broadcast_timings_to_pacers(struct multi_system_compositor *msc, 362 + int64_t predicted_display_time_ns, 363 + int64_t predicted_display_period_ns, 364 + int64_t diff_ns) 365 { 366 COMP_TRACE_MARKER(); 367 ··· 392 } 393 394 static void 395 + wait_frame(struct os_precise_sleeper *sleeper, struct xrt_compositor *xc, int64_t frame_id, int64_t wake_up_time_ns) 396 { 397 COMP_TRACE_MARKER(); 398 399 // Wait until the given wake up time. 400 u_wait_until(sleeper, wake_up_time_ns); 401 402 + int64_t now_ns = os_monotonic_get_ns(); 403 404 // Signal that we woke up. 405 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns); ··· 507 os_thread_helper_unlock(&msc->oth); 508 509 int64_t frame_id = -1; 510 + int64_t wake_up_time_ns = 0; 511 + int64_t predicted_gpu_time_ns = 0; 512 + int64_t predicted_display_time_ns = 0; 513 + int64_t predicted_display_period_ns = 0; 514 515 // Get the information for the next frame. 516 xrt_comp_predict_frame( // ··· 527 // Now we can wait. 528 wait_frame(&sleeper, xc, frame_id, wake_up_time_ns); 529 530 + int64_t now_ns = os_monotonic_get_ns(); 531 + int64_t diff_ns = predicted_display_time_ns - now_ns; 532 533 // Now we know the diff, broadcast to pacers. 534 broadcast_timings_to_pacers(msc, predicted_display_time_ns, predicted_display_period_ns, diff_ns); ··· 630 static xrt_result_t 631 system_compositor_notify_loss_pending(struct xrt_system_compositor *xsc, 632 struct xrt_compositor *xc, 633 + int64_t loss_time_ns) 634 { 635 struct multi_system_compositor *msc = multi_system_compositor(xsc); 636 struct multi_compositor *mc = multi_compositor(xc);
+11 -11
src/xrt/compositor/null/null_compositor.c
··· 341 static xrt_result_t 342 null_compositor_predict_frame(struct xrt_compositor *xc, 343 int64_t *out_frame_id, 344 - uint64_t *out_wake_time_ns, 345 - uint64_t *out_predicted_gpu_time_ns, 346 - uint64_t *out_predicted_display_time_ns, 347 - uint64_t *out_predicted_display_period_ns) 348 { 349 COMP_TRACE_MARKER(); 350 351 struct null_compositor *c = null_compositor(xc); 352 NULL_TRACE(c, "PREDICT_FRAME"); 353 354 - uint64_t now_ns = os_monotonic_get_ns(); 355 - uint64_t null_desired_present_time_ns = 0; 356 - uint64_t null_present_slop_ns = 0; 357 - uint64_t null_min_display_period_ns = 0; 358 359 u_pc_predict( // 360 c->upc, // upc ··· 374 null_compositor_mark_frame(struct xrt_compositor *xc, 375 int64_t frame_id, 376 enum xrt_compositor_frame_point point, 377 - uint64_t when_ns) 378 { 379 COMP_TRACE_MARKER(); 380 ··· 442 443 // When we begin rendering. 444 { 445 - uint64_t now_ns = os_monotonic_get_ns(); 446 u_pc_mark_point(c->upc, U_TIMING_POINT_BEGIN, frame_id, now_ns); 447 } 448 449 // When we are submitting to the GPU. 450 { 451 - uint64_t now_ns = os_monotonic_get_ns(); 452 u_pc_mark_point(c->upc, U_TIMING_POINT_SUBMIT_BEGIN, frame_id, now_ns); 453 454 now_ns = os_monotonic_get_ns();
··· 341 static xrt_result_t 342 null_compositor_predict_frame(struct xrt_compositor *xc, 343 int64_t *out_frame_id, 344 + int64_t *out_wake_time_ns, 345 + int64_t *out_predicted_gpu_time_ns, 346 + int64_t *out_predicted_display_time_ns, 347 + int64_t *out_predicted_display_period_ns) 348 { 349 COMP_TRACE_MARKER(); 350 351 struct null_compositor *c = null_compositor(xc); 352 NULL_TRACE(c, "PREDICT_FRAME"); 353 354 + int64_t now_ns = os_monotonic_get_ns(); 355 + int64_t null_desired_present_time_ns = 0; 356 + int64_t null_present_slop_ns = 0; 357 + int64_t null_min_display_period_ns = 0; 358 359 u_pc_predict( // 360 c->upc, // upc ··· 374 null_compositor_mark_frame(struct xrt_compositor *xc, 375 int64_t frame_id, 376 enum xrt_compositor_frame_point point, 377 + int64_t when_ns) 378 { 379 COMP_TRACE_MARKER(); 380 ··· 442 443 // When we begin rendering. 444 { 445 + int64_t now_ns = os_monotonic_get_ns(); 446 u_pc_mark_point(c->upc, U_TIMING_POINT_BEGIN, frame_id, now_ns); 447 } 448 449 // When we are submitting to the GPU. 450 { 451 + int64_t now_ns = os_monotonic_get_ns(); 452 u_pc_mark_point(c->upc, U_TIMING_POINT_SUBMIT_BEGIN, frame_id, now_ns); 453 454 now_ns = os_monotonic_get_ns();
+5 -5
src/xrt/compositor/util/comp_base.c
··· 207 static xrt_result_t 208 base_wait_frame(struct xrt_compositor *xc, 209 int64_t *out_frame_id, 210 - uint64_t *out_predicted_display_time_ns, 211 - uint64_t *out_predicted_display_period_ns) 212 { 213 COMP_TRACE_MARKER(); 214 215 struct comp_base *cb = comp_base(xc); 216 217 int64_t frame_id = -1; 218 - uint64_t wake_up_time_ns = 0; 219 - uint64_t predicted_gpu_time_ns = 0; 220 221 xrt_comp_predict_frame( // 222 xc, // ··· 229 // Wait until the given wake up time. 230 u_wait_until(&cb->sleeper, wake_up_time_ns); 231 232 - uint64_t now_ns = os_monotonic_get_ns(); 233 234 // Signal that we woke up. 235 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns);
··· 207 static xrt_result_t 208 base_wait_frame(struct xrt_compositor *xc, 209 int64_t *out_frame_id, 210 + int64_t *out_predicted_display_time_ns, 211 + int64_t *out_predicted_display_period_ns) 212 { 213 COMP_TRACE_MARKER(); 214 215 struct comp_base *cb = comp_base(xc); 216 217 int64_t frame_id = -1; 218 + int64_t wake_up_time_ns = 0; 219 + int64_t predicted_gpu_time_ns = 0; 220 221 xrt_comp_predict_frame( // 222 xc, // ··· 229 // Wait until the given wake up time. 230 u_wait_until(&cb->sleeper, wake_up_time_ns); 231 232 + int64_t now_ns = os_monotonic_get_ns(); 233 234 // Signal that we woke up. 235 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns);
+6 -6
src/xrt/compositor/util/comp_swapchain.c
··· 106 } 107 108 static xrt_result_t 109 - swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 110 { 111 struct comp_swapchain *sc = comp_swapchain(xsc); 112 ··· 124 } 125 126 // on windows pthread_cond_timedwait can not be used with monotonic time 127 - uint64_t start_wait_rt = os_realtime_get_ns(); 128 129 - uint64_t end_wait_rt; 130 // don't wrap on big or indefinite timeout 131 - if (start_wait_rt > UINT64_MAX - timeout_ns) { 132 - end_wait_rt = UINT64_MAX; 133 } else { 134 end_wait_rt = start_wait_rt + timeout_ns; 135 } ··· 145 // use pthread_cond_timedwait to implement timeout behavior 146 ret = pthread_cond_timedwait(&sc->images[index].use_cond, &sc->images[index].use_mutex.mutex, &spec); 147 148 - uint64_t now_rt = os_realtime_get_ns(); 149 double diff = time_ns_to_ms_f(now_rt - start_wait_rt); 150 151 if (ret == 0) {
··· 106 } 107 108 static xrt_result_t 109 + swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 110 { 111 struct comp_swapchain *sc = comp_swapchain(xsc); 112 ··· 124 } 125 126 // on windows pthread_cond_timedwait can not be used with monotonic time 127 + int64_t start_wait_rt = os_realtime_get_ns(); 128 129 + int64_t end_wait_rt; 130 // don't wrap on big or indefinite timeout 131 + if (start_wait_rt > INT64_MAX - timeout_ns) { 132 + end_wait_rt = INT64_MAX; 133 } else { 134 end_wait_rt = start_wait_rt + timeout_ns; 135 } ··· 145 // use pthread_cond_timedwait to implement timeout behavior 146 ret = pthread_cond_timedwait(&sc->images[index].use_cond, &sc->images[index].use_mutex.mutex, &spec); 147 148 + int64_t now_rt = os_realtime_get_ns(); 149 double diff = time_ns_to_ms_f(now_rt - start_wait_rt); 150 151 if (ret == 0) {
+19 -22
src/xrt/include/xrt/xrt_compositor.h
··· 589 * @param timeout_ns Timeout in nanoseconds, 590 * @param index Image index to wait for. 591 */ 592 - xrt_result_t (*wait_image)(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index); 593 594 /*! 595 * Do any barrier transitions to and from the application. ··· 686 * @public @memberof xrt_swapchain 687 */ 688 static inline xrt_result_t 689 - xrt_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 690 { 691 return xsc->wait_image(xsc, timeout_ns, index); 692 } ··· 725 */ 726 727 /*! 728 - * Compositor fence used for syncornization. 729 */ 730 struct xrt_compositor_fence 731 { ··· 1104 */ 1105 xrt_result_t (*predict_frame)(struct xrt_compositor *xc, 1106 int64_t *out_frame_id, 1107 - uint64_t *out_wake_time_ns, 1108 - uint64_t *out_predicted_gpu_time_ns, 1109 - uint64_t *out_predicted_display_time_ns, 1110 - uint64_t *out_predicted_display_period_ns); 1111 1112 /*! 1113 * This function and @ref predict_frame function calls are a alternative to ··· 1124 xrt_result_t (*mark_frame)(struct xrt_compositor *xc, 1125 int64_t frame_id, 1126 enum xrt_compositor_frame_point point, 1127 - uint64_t when_ns); 1128 1129 /*! 1130 * See xrWaitFrame. ··· 1147 */ 1148 xrt_result_t (*wait_frame)(struct xrt_compositor *xc, 1149 int64_t *out_frame_id, 1150 - uint64_t *out_predicted_display_time, 1151 - uint64_t *out_predicted_display_period); 1152 1153 /*! 1154 * See xrBeginFrame. ··· 1588 static inline xrt_result_t 1589 xrt_comp_predict_frame(struct xrt_compositor *xc, 1590 int64_t *out_frame_id, 1591 - uint64_t *out_wake_time_ns, 1592 - uint64_t *out_predicted_gpu_time_ns, 1593 - uint64_t *out_predicted_display_time_ns, 1594 - uint64_t *out_predicted_display_period_ns) 1595 { 1596 return xc->predict_frame( // 1597 xc, // ··· 1610 * @public @memberof xrt_compositor 1611 */ 1612 static inline xrt_result_t 1613 - xrt_comp_mark_frame(struct xrt_compositor *xc, 1614 - int64_t frame_id, 1615 - enum xrt_compositor_frame_point point, 1616 - uint64_t when_ns) 1617 { 1618 return xc->mark_frame(xc, frame_id, point, when_ns); 1619 } ··· 1628 static inline xrt_result_t 1629 xrt_comp_wait_frame(struct xrt_compositor *xc, 1630 int64_t *out_frame_id, 1631 - uint64_t *out_predicted_display_time, 1632 - uint64_t *out_predicted_display_period) 1633 { 1634 return xc->wait_frame(xc, out_frame_id, out_predicted_display_time, out_predicted_display_period); 1635 } ··· 2380 */ 2381 xrt_result_t (*notify_loss_pending)(struct xrt_system_compositor *xsc, 2382 struct xrt_compositor *xc, 2383 - uint64_t loss_time_ns); 2384 2385 /*! 2386 * Notify this client/session if the compositor lost the ability of rendering. ··· 2517 * @public @memberof xrt_system_compositor 2518 */ 2519 static inline xrt_result_t 2520 - xrt_syscomp_notify_loss_pending(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, uint64_t loss_time_ns) 2521 { 2522 if (xsc->xmcc == NULL) { 2523 return XRT_ERROR_MULTI_SESSION_NOT_IMPLEMENTED;
··· 589 * @param timeout_ns Timeout in nanoseconds, 590 * @param index Image index to wait for. 591 */ 592 + xrt_result_t (*wait_image)(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index); 593 594 /*! 595 * Do any barrier transitions to and from the application. ··· 686 * @public @memberof xrt_swapchain 687 */ 688 static inline xrt_result_t 689 + xrt_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 690 { 691 return xsc->wait_image(xsc, timeout_ns, index); 692 } ··· 725 */ 726 727 /*! 728 + * Compositor fence used for synchronization. 729 */ 730 struct xrt_compositor_fence 731 { ··· 1104 */ 1105 xrt_result_t (*predict_frame)(struct xrt_compositor *xc, 1106 int64_t *out_frame_id, 1107 + int64_t *out_wake_time_ns, 1108 + int64_t *out_predicted_gpu_time_ns, 1109 + int64_t *out_predicted_display_time_ns, 1110 + int64_t *out_predicted_display_period_ns); 1111 1112 /*! 1113 * This function and @ref predict_frame function calls are a alternative to ··· 1124 xrt_result_t (*mark_frame)(struct xrt_compositor *xc, 1125 int64_t frame_id, 1126 enum xrt_compositor_frame_point point, 1127 + int64_t when_ns); 1128 1129 /*! 1130 * See xrWaitFrame. ··· 1147 */ 1148 xrt_result_t (*wait_frame)(struct xrt_compositor *xc, 1149 int64_t *out_frame_id, 1150 + int64_t *out_predicted_display_time, 1151 + int64_t *out_predicted_display_period); 1152 1153 /*! 1154 * See xrBeginFrame. ··· 1588 static inline xrt_result_t 1589 xrt_comp_predict_frame(struct xrt_compositor *xc, 1590 int64_t *out_frame_id, 1591 + int64_t *out_wake_time_ns, 1592 + int64_t *out_predicted_gpu_time_ns, 1593 + int64_t *out_predicted_display_time_ns, 1594 + int64_t *out_predicted_display_period_ns) 1595 { 1596 return xc->predict_frame( // 1597 xc, // ··· 1610 * @public @memberof xrt_compositor 1611 */ 1612 static inline xrt_result_t 1613 + xrt_comp_mark_frame(struct xrt_compositor *xc, int64_t frame_id, enum xrt_compositor_frame_point point, int64_t when_ns) 1614 { 1615 return xc->mark_frame(xc, frame_id, point, when_ns); 1616 } ··· 1625 static inline xrt_result_t 1626 xrt_comp_wait_frame(struct xrt_compositor *xc, 1627 int64_t *out_frame_id, 1628 + int64_t *out_predicted_display_time, 1629 + int64_t *out_predicted_display_period) 1630 { 1631 return xc->wait_frame(xc, out_frame_id, out_predicted_display_time, out_predicted_display_period); 1632 } ··· 2377 */ 2378 xrt_result_t (*notify_loss_pending)(struct xrt_system_compositor *xsc, 2379 struct xrt_compositor *xc, 2380 + int64_t loss_time_ns); 2381 2382 /*! 2383 * Notify this client/session if the compositor lost the ability of rendering. ··· 2514 * @public @memberof xrt_system_compositor 2515 */ 2516 static inline xrt_result_t 2517 + xrt_syscomp_notify_loss_pending(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, int64_t loss_time_ns) 2518 { 2519 if (xsc->xmcc == NULL) { 2520 return XRT_ERROR_MULTI_SESSION_NOT_IMPLEMENTED;
+6 -6
src/xrt/ipc/client/ipc_client_compositor.c
··· 183 } 184 185 static xrt_result_t 186 - ipc_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index) 187 { 188 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc); 189 struct ipc_client_compositor *icc = ics->icc; ··· 522 static xrt_result_t 523 ipc_compositor_wait_frame(struct xrt_compositor *xc, 524 int64_t *out_frame_id, 525 - uint64_t *out_predicted_display_time, 526 - uint64_t *out_predicted_display_period) 527 { 528 IPC_TRACE_MARKER(); 529 struct ipc_client_compositor *icc = ipc_client_compositor(xc); 530 xrt_result_t xret; 531 532 int64_t frame_id = -1; 533 - uint64_t wake_up_time_ns = 0; 534 - uint64_t predicted_display_time = 0; 535 - uint64_t predicted_display_period = 0; 536 537 xret = ipc_call_compositor_predict_frame( // 538 icc->ipc_c, // Connection
··· 183 } 184 185 static xrt_result_t 186 + ipc_compositor_swapchain_wait_image(struct xrt_swapchain *xsc, int64_t timeout_ns, uint32_t index) 187 { 188 struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc); 189 struct ipc_client_compositor *icc = ics->icc; ··· 522 static xrt_result_t 523 ipc_compositor_wait_frame(struct xrt_compositor *xc, 524 int64_t *out_frame_id, 525 + int64_t *out_predicted_display_time, 526 + int64_t *out_predicted_display_period) 527 { 528 IPC_TRACE_MARKER(); 529 struct ipc_client_compositor *icc = ipc_client_compositor(xc); 530 xrt_result_t xret; 531 532 int64_t frame_id = -1; 533 + int64_t wake_up_time_ns = 0; 534 + int64_t predicted_display_time = 0; 535 + int64_t predicted_display_period = 0; 536 537 xret = ipc_call_compositor_predict_frame( // 538 icc->ipc_c, // Connection
+5 -5
src/xrt/ipc/server/ipc_server_handler.c
··· 861 xrt_result_t 862 ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics, 863 int64_t *out_frame_id, 864 - uint64_t *out_wake_up_time_ns, 865 - uint64_t *out_predicted_display_time_ns, 866 - uint64_t *out_predicted_display_period_ns) 867 { 868 IPC_TRACE_MARKER(); 869 ··· 877 */ 878 ipc_server_activate_session(ics); 879 880 - uint64_t gpu_time_ns = 0; 881 return xrt_comp_predict_frame( // 882 ics->xc, // 883 out_frame_id, // ··· 1656 } 1657 1658 xrt_result_t 1659 - ipc_handle_swapchain_wait_image(volatile struct ipc_client_state *ics, uint32_t id, uint64_t timeout_ns, uint32_t index) 1660 { 1661 if (ics->xc == NULL) { 1662 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
··· 861 xrt_result_t 862 ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics, 863 int64_t *out_frame_id, 864 + int64_t *out_wake_up_time_ns, 865 + int64_t *out_predicted_display_time_ns, 866 + int64_t *out_predicted_display_period_ns) 867 { 868 IPC_TRACE_MARKER(); 869 ··· 877 */ 878 ipc_server_activate_session(ics); 879 880 + int64_t gpu_time_ns = 0; 881 return xrt_comp_predict_frame( // 882 ics->xc, // 883 out_frame_id, // ··· 1656 } 1657 1658 xrt_result_t 1659 + ipc_handle_swapchain_wait_image(volatile struct ipc_client_state *ics, uint32_t id, int64_t timeout_ns, uint32_t index) 1660 { 1661 if (ics->xc == NULL) { 1662 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
+4 -4
src/xrt/ipc/shared/proto.json
··· 215 "compositor_predict_frame": { 216 "out": [ 217 {"name": "frame_id", "type": "int64_t"}, 218 - {"name": "wake_up_time", "type": "uint64_t"}, 219 - {"name": "predicted_display_time", "type": "uint64_t"}, 220 - {"name": "predicted_display_period", "type": "uint64_t"} 221 ] 222 }, 223 ··· 345 "swapchain_wait_image": { 346 "in": [ 347 {"name": "id", "type": "uint32_t"}, 348 - {"name": "timeout_ns", "type": "uint64_t"}, 349 {"name": "index", "type": "uint32_t"} 350 ] 351 },
··· 215 "compositor_predict_frame": { 216 "out": [ 217 {"name": "frame_id", "type": "int64_t"}, 218 + {"name": "wake_up_time", "type": "int64_t"}, 219 + {"name": "predicted_display_time", "type": "int64_t"}, 220 + {"name": "predicted_display_period", "type": "int64_t"} 221 ] 222 }, 223 ··· 345 "swapchain_wait_image": { 346 "in": [ 347 {"name": "id", "type": "uint32_t"}, 348 + {"name": "timeout_ns", "type": "int64_t"}, 349 {"name": "index", "type": "uint32_t"} 350 ] 351 },
+7 -7
src/xrt/state_trackers/oxr/oxr_session.c
··· 695 do_wait_frame_and_checks(struct oxr_logger *log, 696 struct oxr_session *sess, 697 int64_t *out_frame_id, 698 - uint64_t *out_predicted_display_time, 699 - uint64_t *out_predicted_display_period, 700 XrTime *out_converted_time) 701 { 702 assert(sess->compositor != NULL); 703 704 int64_t frame_id = -1; 705 - uint64_t predicted_display_time = 0; 706 - uint64_t predicted_display_period = 0; 707 708 xrt_result_t xret = xrt_comp_wait_frame( // 709 sess->compositor, // compositor ··· 765 } 766 767 int64_t frame_id = -1; 768 - uint64_t predicted_display_time = 0; 769 - uint64_t predicted_display_period = 0; 770 XrTime converted_time = 0; 771 772 XrResult ret = do_wait_frame_and_checks( // ··· 811 } 812 813 if (sess->frame_timing_wait_sleep_ms > 0) { 814 - uint64_t sleep_ns = U_TIME_1MS_IN_NS * sess->frame_timing_wait_sleep_ms; 815 os_precise_sleeper_nanosleep(&sess->sleeper, sleep_ns); 816 } 817
··· 695 do_wait_frame_and_checks(struct oxr_logger *log, 696 struct oxr_session *sess, 697 int64_t *out_frame_id, 698 + int64_t *out_predicted_display_time, 699 + int64_t *out_predicted_display_period, 700 XrTime *out_converted_time) 701 { 702 assert(sess->compositor != NULL); 703 704 int64_t frame_id = -1; 705 + int64_t predicted_display_time = 0; 706 + int64_t predicted_display_period = 0; 707 708 xrt_result_t xret = xrt_comp_wait_frame( // 709 sess->compositor, // compositor ··· 765 } 766 767 int64_t frame_id = -1; 768 + int64_t predicted_display_time = 0; 769 + int64_t predicted_display_period = 0; 770 XrTime converted_time = 0; 771 772 XrResult ret = do_wait_frame_and_checks( // ··· 811 } 812 813 if (sess->frame_timing_wait_sleep_ms > 0) { 814 + int64_t sleep_ns = U_TIME_1MS_IN_NS * sess->frame_timing_wait_sleep_ms; 815 os_precise_sleeper_nanosleep(&sess->sleeper, sleep_ns); 816 } 817
+11 -11
src/xrt/targets/sdl_test/sdl_compositor.c
··· 347 static xrt_result_t 348 sdl_compositor_predict_frame(struct xrt_compositor *xc, 349 int64_t *out_frame_id, 350 - uint64_t *out_wake_time_ns, 351 - uint64_t *out_predicted_gpu_time_ns, 352 - uint64_t *out_predicted_display_time_ns, 353 - uint64_t *out_predicted_display_period_ns) 354 { 355 COMP_TRACE_MARKER(); 356 ··· 358 359 SC_TRACE(c, "PREDICT_FRAME"); 360 361 - uint64_t now_ns = os_monotonic_get_ns(); 362 - uint64_t null_desired_present_time_ns = 0; 363 - uint64_t null_present_slop_ns = 0; 364 - uint64_t null_min_display_period_ns = 0; 365 366 u_pc_predict( // 367 c->upc, // upc ··· 381 sdl_compositor_mark_frame(struct xrt_compositor *xc, 382 int64_t frame_id, 383 enum xrt_compositor_frame_point point, 384 - uint64_t when_ns) 385 { 386 COMP_TRACE_MARKER(); 387 ··· 452 453 // When we begin rendering. 454 { 455 - uint64_t now_ns = os_monotonic_get_ns(); 456 u_pc_mark_point(c->upc, U_TIMING_POINT_BEGIN, frame_id, now_ns); 457 } 458 ··· 461 462 // When we are submitting to the GPU. 463 { 464 - uint64_t now_ns = os_monotonic_get_ns(); 465 u_pc_mark_point(c->upc, U_TIMING_POINT_SUBMIT_BEGIN, frame_id, now_ns); 466 467 now_ns = os_monotonic_get_ns();
··· 347 static xrt_result_t 348 sdl_compositor_predict_frame(struct xrt_compositor *xc, 349 int64_t *out_frame_id, 350 + int64_t *out_wake_time_ns, 351 + int64_t *out_predicted_gpu_time_ns, 352 + int64_t *out_predicted_display_time_ns, 353 + int64_t *out_predicted_display_period_ns) 354 { 355 COMP_TRACE_MARKER(); 356 ··· 358 359 SC_TRACE(c, "PREDICT_FRAME"); 360 361 + int64_t now_ns = os_monotonic_get_ns(); 362 + int64_t null_desired_present_time_ns = 0; 363 + int64_t null_present_slop_ns = 0; 364 + int64_t null_min_display_period_ns = 0; 365 366 u_pc_predict( // 367 c->upc, // upc ··· 381 sdl_compositor_mark_frame(struct xrt_compositor *xc, 382 int64_t frame_id, 383 enum xrt_compositor_frame_point point, 384 + int64_t when_ns) 385 { 386 COMP_TRACE_MARKER(); 387 ··· 452 453 // When we begin rendering. 454 { 455 + int64_t now_ns = os_monotonic_get_ns(); 456 u_pc_mark_point(c->upc, U_TIMING_POINT_BEGIN, frame_id, now_ns); 457 } 458 ··· 461 462 // When we are submitting to the GPU. 463 { 464 + int64_t now_ns = os_monotonic_get_ns(); 465 u_pc_mark_point(c->upc, U_TIMING_POINT_SUBMIT_BEGIN, frame_id, now_ns); 466 467 now_ns = os_monotonic_get_ns();