A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita audio rust zig deno mpris rockbox mpd
at master 349 lines 11 kB view raw
1/*************************************************************************** 2 * __________ __ ___. 3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 * \/ \/ \/ \/ \/ 8 * $Id$ 9 * 10 * Copyright (C) 2014 by Michael Sevakis 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 2 15 * of the License, or (at your option) any later version. 16 * 17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 * KIND, either express or implied. 19 * 20 ****************************************************************************/ 21#include "kernel-internal.h" 22#include <string.h> 23#include "mrsw_lock.h" 24 25#ifdef HAVE_PRIORITY_SCHEDULING 26 27static FORCE_INLINE void 28mrsw_reader_claim(struct mrsw_lock *mrsw, struct thread_entry *current, 29 int count, unsigned int slotnum) 30{ 31 /* no need to lock this; if a reader can claim, noone is in the queue */ 32 threadbit_set_bit(&mrsw->splay.mask, slotnum); 33 mrsw->splay.blocker.thread = count == 1 ? current : NULL; 34} 35 36static FORCE_INLINE void 37mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, 38 struct thread_entry *first, int count, 39 unsigned int slotnum) 40{ 41 /* If no writer is queued or has ownership then noone is queued; 42 if a writer owns it, then the reader would be blocked instead. 43 Therefore, if the queue has threads, then the next after the 44 owning readers is a writer and this is not the last reader. */ 45 if (first) 46 corelock_lock(&mrsw->splay.cl); 47 48 threadbit_clear_bit(&mrsw->splay.mask, slotnum); 49 50 if (count == 0) 51 { 52 /* There is noone waiting; we'd be calling mrsw_wakeup_writer() 53 at this time instead */ 54 mrsw->splay.blocker.thread = NULL; 55 return; 56 } 57 58 if (count == 1) 59 { 60 KERNEL_ASSERT(threadbit_popcount(&mrsw->splay.mask) == 1, 61 "mrsw_reader_relinquish() - " 62 "threadbits has wrong popcount: %d\n", 63 threadbit_popcount(&mrsw->splay.mask)); 64 /* switch owner to sole remaining reader */ 65 slotnum = threadbit_ffs(&mrsw->splay.mask); 66 mrsw->splay.blocker.thread = __thread_slot_entry(slotnum); 67 } 68 69 if (first) 70 { 71 priority_disinherit(current, &mrsw->splay.blocker); 72 corelock_unlock(&mrsw->splay.cl); 73 } 74} 75 76static FORCE_INLINE unsigned int 77mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread, 78 unsigned int slotnum) 79{ 80 threadbit_clear_bit(&mrsw->splay.mask, slotnum); 81 return wakeup_thread(thread, WAKEUP_TRANSFER); 82} 83 84static FORCE_INLINE unsigned int 85mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) 86{ 87 return wakeup_thread(thread, WAKEUP_TRANSFER); 88 (void)mrsw; 89} 90 91static FORCE_INLINE unsigned int 92mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first) 93{ 94 unsigned int result = wakeup_thread(first, WAKEUP_TRANSFER_MULTI); 95 mrsw->count = __running_self_entry()->retval; 96 return result; 97} 98 99#else /* !HAVE_PRIORITY_SCHEDULING */ 100 101#define mrsw_reader_claim(mrsw, current, count, slotnum) \ 102 do {} while (0) 103 104#define mrsw_reader_relinquish(mrsw, current, first, count, slotnum) \ 105 do {} while (0) 106 107static FORCE_INLINE unsigned int 108mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) 109{ 110 mrsw->splay.blocker.thread = thread; 111 return wakeup_thread(thread); 112} 113 114static FORCE_INLINE unsigned int 115mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) 116{ 117 mrsw->splay.blocker.thread = thread; 118 return wakeup_thread(thread); 119} 120 121static FORCE_INLINE unsigned int 122mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first) 123{ 124 mrsw->splay.blocker.thread = NULL; 125 int count = 1; 126 127 while (1) 128 { 129 wakeup_thread(first); 130 131 if (!(first = WQ_THREAD_FIRST(&mrsw->queue)) || first->retval == 0) 132 break; 133 134 count++; 135 } 136 137 mrsw->count = count; 138 return THREAD_OK; 139} 140 141#endif /* HAVE_PRIORITY_SCHEDULING */ 142 143/** Public interface **/ 144 145/* Initializes a multi-reader, single-writer object */ 146void mrsw_init(struct mrsw_lock *mrsw) 147{ 148 mrsw->count = 0; 149 wait_queue_init(&mrsw->queue); 150 blocker_splay_init(&mrsw->splay); 151#ifdef HAVE_PRIORITY_SCHEDULING 152 memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion)); 153#endif 154 corelock_init(&mrsw->cl); 155} 156 157/* Request reader thread lock. Any number of reader threads may enter which 158 * also locks-out all writer threads. Same thread may safely acquire read 159 * access recursively. The current writer is ignored and gets access. */ 160void mrsw_read_acquire(struct mrsw_lock *mrsw) 161{ 162 ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); 163 164 struct thread_entry *current = __running_self_entry(); 165 166 if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) 167 return; /* Read request while holding write access; pass */ 168 169#ifdef HAVE_PRIORITY_SCHEDULING 170 /* Track recursion counts for each thread: 171 IF_PRIO, mrsw->count just tracks unique owners */ 172 unsigned int slotnum = THREAD_ID_SLOT(current->id); 173 if (threadbit_test_bit(&mrsw->splay.mask, slotnum)) 174 { 175 KERNEL_ASSERT(mrsw->rdrecursion[slotnum] < UINT8_MAX, 176 "mrsw_read_acquire() - " 177 "Thread %s did too many claims\n", 178 current->name); 179 mrsw->rdrecursion[slotnum]++; 180 return; 181 } 182#endif /* HAVE_PRIORITY_SCHEDULING */ 183 184 corelock_lock(&mrsw->cl); 185 186 int count = mrsw->count; 187 188 if (LIKELY(count >= 0 && mrsw->queue.head == NULL)) 189 { 190 /* Lock open to readers: 191 IFN_PRIO, mrsw->count tracks reader recursion */ 192 mrsw->count = ++count; 193 mrsw_reader_claim(mrsw, current, count, slotnum); 194 corelock_unlock(&mrsw->cl); 195 return; 196 } 197 198 /* A writer owns it or is waiting; block... */ 199 current->retval = 1; /* indicate multi-wake candidate */ 200 201 disable_irq(); 202 block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker); 203 204 corelock_unlock(&mrsw->cl); 205 206 /* ...and turn control over to next thread */ 207 switch_thread(); 208} 209 210/* Release reader thread lockout of writer thread. The last reader to 211 * leave opens up access to writer threads. The current writer is ignored. */ 212void mrsw_read_release(struct mrsw_lock *mrsw) 213{ 214 struct thread_entry *current = __running_self_entry(); 215 216 if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) 217 return; /* Read release while holding write access; ignore */ 218 219#ifdef HAVE_PRIORITY_SCHEDULING 220 unsigned int slotnum = THREAD_ID_SLOT(current->id); 221 KERNEL_ASSERT(threadbit_test_bit(&mrsw->splay.mask, slotnum), 222 "mrsw_read_release() -" 223 " thread '%s' not reader\n", 224 current->name); 225 226 uint8_t *rdcountp = &mrsw->rdrecursion[slotnum]; 227 unsigned int rdcount = *rdcountp; 228 if (rdcount > 0) 229 { 230 /* Reader is releasing recursive claim */ 231 *rdcountp = rdcount - 1; 232 return; 233 } 234#endif /* HAVE_PRIORITY_SCHEDULING */ 235 236 corelock_lock(&mrsw->cl); 237 int count = mrsw->count; 238 239 KERNEL_ASSERT(count > 0, "mrsw_read_release() - no readers!\n"); 240 241 unsigned int result = THREAD_NONE; 242 const int oldlevel = disable_irq_save(); 243 244 struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue); 245 if (--count == 0 && thread != NULL) 246 { 247 /* No readers remain and a writer is waiting */ 248 mrsw->count = -1; 249 result = mrsw_reader_wakeup_writer(mrsw, thread IF_PRIO(, slotnum)); 250 } 251 else 252 { 253 /* Giving up readership; we may be the last, or not */ 254 mrsw->count = count; 255 mrsw_reader_relinquish(mrsw, current, thread, count, slotnum); 256 } 257 258 restore_irq(oldlevel); 259 corelock_unlock(&mrsw->cl); 260 261#ifdef HAVE_PRIORITY_SCHEDULING 262 if (result & THREAD_SWITCH) 263 switch_thread(); 264#endif /* HAVE_PRIORITY_SCHEDULING */ 265 (void)result; 266} 267 268/* Acquire writer thread lock which provides exclusive access. If a thread 269 * that is holding read access calls this it will deadlock. The writer may 270 * safely call recursively. */ 271void mrsw_write_acquire(struct mrsw_lock *mrsw) 272{ 273 ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); 274 275 struct thread_entry *current = __running_self_entry(); 276 277 if (current == mrsw->splay.blocker.thread) 278 { 279 /* Current thread already has write access */ 280 mrsw->count--; 281 return; 282 } 283 284 corelock_lock(&mrsw->cl); 285 286 int count = mrsw->count; 287 288 if (LIKELY(count == 0)) 289 { 290 /* Lock is open to a writer */ 291 mrsw->count = -1; 292 mrsw->splay.blocker.thread = current; 293 corelock_unlock(&mrsw->cl); 294 return; 295 } 296 297 /* Readers present or a writer owns it - block... */ 298 current->retval = 0; /* indicate single-wake candidate */ 299 300 disable_irq(); 301 block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker); 302 303 corelock_unlock(&mrsw->cl); 304 305 /* ...and turn control over to next thread */ 306 switch_thread(); 307} 308 309/* Release writer thread lock and open the lock to readers and writers */ 310void mrsw_write_release(struct mrsw_lock *mrsw) 311{ 312 KERNEL_ASSERT(__running_self_entry() == mrsw->splay.blocker.thread, 313 "mrsw_write_release->wrong thread (%s != %s)\n", 314 __running_self_entry()->name, 315 mrsw->splay.blocker.thread->name); 316 317 int count = mrsw->count; 318 if (count < -1) 319 { 320 /* This thread still owns write lock */ 321 mrsw->count = count + 1; 322 return; 323 } 324 325 unsigned int result = THREAD_NONE; 326 327 corelock_lock(&mrsw->cl); 328 const int oldlevel = disable_irq_save(); 329 330 struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue); 331 if (thread == NULL) /* 'count' becomes zero */ 332 { 333 mrsw->splay.blocker.thread = NULL; 334 mrsw->count = 0; 335 } 336 else if (thread->retval == 0) /* 'count' stays -1 */ 337 result = mrsw_writer_wakeup_writer(mrsw, thread); 338 else /* 'count' becomes # of readers */ 339 result = mrsw_writer_wakeup_readers(mrsw, thread); 340 341 restore_irq(oldlevel); 342 corelock_unlock(&mrsw->cl); 343 344#ifdef HAVE_PRIORITY_SCHEDULING 345 if (result & THREAD_SWITCH) 346 switch_thread(); 347#endif /* HAVE_PRIORITY_SCHEDULING */ 348 (void)result; 349}