The open source OpenXR runtime
1// Copyright 2025, Collabora, Ltd.
2// SPDX-License-Identifier: BSL-1.0
3/*!
4 * @file
5 * @brief Helper to implement @ref xrt_future,
6 * A basic CPU based future implementation.
7 * @author Korcan Hussein <korcan.hussein@collabora.com>
8 * @ingroup aux_util
9 */
10#include "u_future.h"
11
12#include "util/u_debug.h"
13#include "util/u_logging.h"
14
15#include "os/os_threading.h"
16
17#include <errno.h>
18#include <atomic>
19#include <memory>
20
21DEBUG_GET_ONCE_LOG_OPTION(log_level_future, "U_FUTURE_LOG", U_LOGGING_WARN)
22
23#define UFT_LOG_T(...) U_LOG_IFL_T(debug_get_log_option_log_level_future(), __VA_ARGS__)
24#define UFT_LOG_D(...) U_LOG_IFL_D(debug_get_log_option_log_level_future(), __VA_ARGS__)
25#define UFT_LOG_I(...) U_LOG_IFL_I(debug_get_log_option_log_level_future(), __VA_ARGS__)
26#define UFT_LOG_W(...) U_LOG_IFL_W(debug_get_log_option_log_level_future(), __VA_ARGS__)
27#define UFT_LOG_E(...) U_LOG_IFL_E(debug_get_log_option_log_level_future(), __VA_ARGS__)
28
29#define U_FUTURE_CLEANUP_TIMEOUT_NS (3000000000LL) // 3 seconds
30
31/*!
32 * A helper to implement a @ref xrt_future,
33 * a basic CPU based future implementation
34 *
35 * @ingroup aux_util
36 * @implements xrt_future
37 */
38struct u_future
39{
40 struct xrt_future base;
41 struct os_mutex mtx;
42 struct os_cond cv;
43 std::atomic<xrt_future_state_t> state{XRT_FUTURE_STATE_PENDING};
44 std::atomic<xrt_result_t> result{XRT_SUCCESS};
45 struct xrt_future_value value = XRT_NULL_FUTURE_VALUE;
46};
47
48static inline xrt_result_t
49u_future_get_xrt_result(const struct u_future *uft)
50{
51 assert(uft != NULL);
52 return uft->result.load(std::memory_order::acquire);
53}
54
55static inline xrt_future_state_t
56u_future_get_state_priv(const struct u_future *uft)
57{
58 assert(uft != NULL);
59 return uft->state.load(std::memory_order::acquire);
60}
61
62static inline void
63u_future_set_xrt_result(struct u_future *uft, const xrt_result_t result)
64{
65 assert(uft != NULL);
66 uft->result.store(result, std::memory_order::release);
67}
68
69static inline void
70u_future_set_state(struct u_future *uft, const xrt_future_state_t new_state)
71{
72 assert(uft != NULL);
73 uft->state.store(new_state, std::memory_order::release);
74}
75
76//! internal helper only, does not atomically set both.
77static inline void
78u_future_set_state_and_xrt_result(struct u_future *uft, const xrt_future_state_t new_state, const xrt_result_t result)
79{
80 u_future_set_xrt_result(uft, result);
81 u_future_set_state(uft, new_state);
82}
83
84static inline xrt_result_t
85u_future_get_state(const struct xrt_future *xft, enum xrt_future_state *out_state)
86{
87 const struct u_future *uft = (const struct u_future *)xft;
88 if (uft == NULL || out_state == NULL) {
89 return XRT_ERROR_INVALID_ARGUMENT;
90 }
91 *out_state = u_future_get_state_priv(uft);
92 return XRT_SUCCESS;
93}
94
95static xrt_result_t
96u_future_get_result(const struct xrt_future *xft, struct xrt_future_result *out_result)
97{
98 const struct u_future *uft = (const struct u_future *)xft;
99 if (uft == NULL || out_result == NULL) {
100 return XRT_ERROR_INVALID_ARGUMENT;
101 }
102
103 struct os_mutex *mtx = (struct os_mutex *)&uft->mtx;
104 os_mutex_lock(mtx);
105
106 const xrt_future_state_t curr_state = u_future_get_state_priv(uft);
107 if (curr_state == XRT_FUTURE_STATE_PENDING) {
108 os_mutex_unlock(mtx);
109 return XRT_ERROR_FUTURE_RESULT_NOT_READY;
110 }
111
112 out_result->result = u_future_get_xrt_result(uft);
113 if (out_result->result == XRT_SUCCESS && //
114 curr_state == XRT_FUTURE_STATE_READY) {
115 out_result->value = uft->value;
116 }
117
118 os_mutex_unlock(mtx);
119 return XRT_SUCCESS;
120}
121
122static xrt_result_t
123u_future_cancel(struct xrt_future *xft)
124{
125 struct u_future *uft = (struct u_future *)xft;
126 if (uft == NULL) {
127 return XRT_ERROR_INVALID_ARGUMENT;
128 }
129
130 os_mutex_lock(&uft->mtx);
131 if (u_future_get_state_priv(uft) == XRT_FUTURE_STATE_PENDING) {
132 u_future_set_state_and_xrt_result(uft, XRT_FUTURE_STATE_CANCELLED, XRT_OPERATION_CANCELLED);
133 os_cond_broadcast(&uft->cv);
134 }
135 os_mutex_unlock(&uft->mtx);
136 return XRT_SUCCESS;
137}
138
139static xrt_result_t
140u_future_wait(struct xrt_future *xft, int64_t timeout_ns)
141{
142 struct u_future *uft = (struct u_future *)xft;
143 if (uft == NULL) {
144 return XRT_ERROR_INVALID_ARGUMENT;
145 }
146
147 if (timeout_ns < 0) {
148 timeout_ns = INT64_MAX;
149 }
150
151 // on windows pthread_cond_timedwait can not be used with monotonic time
152 const int64_t start_wait_rt = os_realtime_get_ns();
153 const int64_t end_wait_rt =
154 (start_wait_rt > (INT64_MAX - timeout_ns)) ? INT64_MAX : (start_wait_rt + timeout_ns);
155
156 struct timespec ts = {};
157 os_ns_to_timespec(end_wait_rt, &ts);
158
159 xrt_future_state_t curr_state = XRT_FUTURE_STATE_PENDING;
160
161 os_mutex_lock(&uft->mtx);
162
163 while ((curr_state = u_future_get_state_priv(uft)) == XRT_FUTURE_STATE_PENDING) {
164 const int wait_res = pthread_cond_timedwait(&uft->cv.cond, &uft->mtx.mutex, &ts);
165 if (wait_res == ETIMEDOUT) {
166 if (os_realtime_get_ns() >= end_wait_rt) {
167 // final state check - might have completed during timeout handling
168 curr_state = u_future_get_state_priv(uft);
169 break;
170 }
171 } else if (wait_res != 0) {
172
173 break;
174 }
175 }
176
177 os_mutex_unlock(&uft->mtx);
178
179 if (curr_state == XRT_FUTURE_STATE_PENDING) {
180 return XRT_TIMEOUT;
181 }
182 return u_future_get_xrt_result(uft);
183}
184
185static inline xrt_result_t
186u_future_is_cancel_requested(const struct xrt_future *xft, bool *out_request_cancel)
187{
188 const struct u_future *uft = (const struct u_future *)xft;
189 if (uft == NULL || out_request_cancel == NULL) {
190 return XRT_ERROR_INVALID_ARGUMENT;
191 }
192 const xrt_future_state_t curr_state = u_future_get_state_priv(uft);
193 *out_request_cancel = curr_state == XRT_FUTURE_STATE_CANCELLED;
194 return XRT_SUCCESS;
195}
196
197static xrt_result_t
198u_future_complete(struct xrt_future *xft, const struct xrt_future_result *ft_result)
199{
200 struct u_future *uft = (struct u_future *)xft;
201 if (uft == NULL || ft_result == NULL) {
202 return XRT_ERROR_INVALID_ARGUMENT;
203 }
204
205 os_mutex_lock(&uft->mtx);
206 const xrt_future_state_t curr_state = u_future_get_state_priv(uft);
207 if (curr_state != XRT_FUTURE_STATE_PENDING) {
208 os_mutex_unlock(&uft->mtx);
209 switch (curr_state) {
210 case XRT_FUTURE_STATE_READY: return XRT_ERROR_FUTURE_ALREADY_COMPLETE;
211 case XRT_FUTURE_STATE_CANCELLED:
212 default: return XRT_OPERATION_CANCELLED;
213 }
214 }
215
216 if (ft_result->result == XRT_SUCCESS) {
217 uft->value = ft_result->value;
218 }
219 u_future_set_state_and_xrt_result(uft, XRT_FUTURE_STATE_READY, ft_result->result);
220
221 os_cond_broadcast(&uft->cv);
222 os_mutex_unlock(&uft->mtx);
223 return XRT_SUCCESS;
224}
225
226static void
227u_future_destroy(struct xrt_future *xft)
228{
229 struct u_future *uft = (struct u_future *)xft;
230 if (uft == NULL) {
231 return;
232 }
233
234 UFT_LOG_T("destroying u_future:%p", (void *)uft);
235
236 u_future_cancel(&uft->base);
237 u_future_wait(&uft->base, U_FUTURE_CLEANUP_TIMEOUT_NS);
238 os_cond_destroy(&uft->cv);
239 os_mutex_destroy(&uft->mtx);
240
241 UFT_LOG_T("u_future:%p destroyed", (void *)uft);
242
243 delete uft;
244}
245
246struct xrt_future *
247u_future_create(void)
248{
249 std::unique_ptr<struct u_future> uft{new struct u_future()};
250 os_mutex_init(&uft->mtx);
251 os_cond_init(&uft->cv);
252
253 struct xrt_future *xft = &uft->base;
254 xft->reference.count = 1;
255 xft->get_state = u_future_get_state;
256 xft->get_result = u_future_get_result;
257 xft->cancel = u_future_cancel;
258 xft->wait = u_future_wait;
259 xft->is_cancel_requested = u_future_is_cancel_requested;
260 xft->complete = u_future_complete;
261 xft->destroy = u_future_destroy;
262
263 UFT_LOG_T("created u_future:%p", (void *)xft);
264 return &uft.release()->base;
265}