A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita audio rust zig deno mpris rockbox mpd

Do some kernel cleanup

* Seal away private thread and kernel definitions and declarations
into the internal headers in order to better hide internal structure.

* Add a thread-common.c file that keeps shared functions together.
List functions aren't messed with since that's about to be changed to
different ones.

* It is necessary to modify some ARM/PP stuff since GCC was complaining
about constant pool distance and I would rather not force dump it. Just
bl the cache calls in the startup and exit code and let it use veneers
if it must.

* Clean up redundant #includes in relevant areas and reorganize them.

* Expunge useless and dangerous stuff like remove_thread().

Change-Id: I6e22932fad61a9fac30fd1363c071074ee7ab382

+363 -983
+15 -53
apps/debug_menu.c
··· 133 133 134 134 #include "talk.h" 135 135 136 - /*---------------------------------------------------*/ 137 - /* SPECIAL DEBUG STUFF */ 138 - /*---------------------------------------------------*/ 139 - extern struct thread_entry threads[MAXTHREADS]; 140 - 141 - static char thread_status_char(unsigned status) 142 - { 143 - static const char thread_status_chars[THREAD_NUM_STATES+1] = 144 - { 145 - [0 ... THREAD_NUM_STATES] = '?', 146 - [STATE_RUNNING] = 'R', 147 - [STATE_BLOCKED] = 'B', 148 - [STATE_SLEEPING] = 'S', 149 - [STATE_BLOCKED_W_TMO] = 'T', 150 - [STATE_FROZEN] = 'F', 151 - [STATE_KILLED] = 'K', 152 - }; 153 - 154 - if (status > THREAD_NUM_STATES) 155 - status = THREAD_NUM_STATES; 156 - 157 - return thread_status_chars[status]; 158 - } 159 - 160 136 static const char* threads_getname(int selected_item, void *data, 161 137 char *buffer, size_t buffer_len) 162 138 { 163 139 (void)data; 164 - struct thread_entry *thread; 165 - char name[32]; 166 140 167 141 #if NUM_CORES > 1 168 142 if (selected_item < (int)NUM_CORES) 169 143 { 144 + struct core_debug_info coreinfo; 145 + core_get_debug_info(selected_item, &coreinfo); 170 146 snprintf(buffer, buffer_len, "Idle (%d): %2d%%", selected_item, 171 - idle_stack_usage(selected_item)); 147 + coreinfo.idle_stack_usage); 172 148 return buffer; 173 149 } 174 150 175 151 selected_item -= NUM_CORES; 176 152 #endif 177 153 178 - thread = &threads[selected_item]; 179 - 180 - if (thread->state == STATE_KILLED) 154 + struct thread_debug_info threadinfo; 155 + if (thread_get_debug_info(selected_item, &threadinfo) <= 0) 181 156 { 182 157 snprintf(buffer, buffer_len, "%2d: ---", selected_item); 183 158 return buffer; 184 159 } 185 160 186 - thread_get_name(name, 32, thread); 187 - 188 161 snprintf(buffer, buffer_len, 189 - "%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d %d ") "%2d%% %s", 162 + "%2d: " IF_COP("(%d) ") "%s " IF_PRIO("%d %d ") "%2d%% %s", 190 163 selected_item, 191 - IF_COP(thread->core,) 192 - #ifdef HAVE_SCHEDULER_BOOSTCTRL 193 - (thread->cpu_boost) ? '+' : 164 + #if NUM_CORES > 1 165 + threadinfo.core, 194 166 #endif 195 - ((thread->state == STATE_RUNNING) ? '*' : ' '), 196 - thread_status_char(thread->state), 197 - IF_PRIO(thread->base_priority, thread->priority, ) 198 - thread_stack_usage(thread), name); 167 + threadinfo.statusstr, 168 + #ifdef HAVE_PRIORITY_SCHEDULING 169 + threadinfo.base_priority, 170 + threadinfo.current_priority, 171 + #endif 172 + threadinfo.stack_usage, 173 + threadinfo.name); 199 174 200 175 return buffer; 201 176 } ··· 203 178 static int dbg_threads_action_callback(int action, struct gui_synclist *lists) 204 179 { 205 180 (void)lists; 206 - #ifdef ROCKBOX_HAS_LOGF 207 - if (action == ACTION_STD_OK) 208 - { 209 - int selpos = gui_synclist_get_sel_pos(lists); 210 - #if NUM_CORES > 1 211 - if (selpos >= NUM_CORES) 212 - remove_thread(threads[selpos - NUM_CORES].id); 213 - #else 214 - remove_thread(threads[selpos].id); 215 - #endif 216 - return ACTION_REDRAW; 217 - } 218 - #endif /* ROCKBOX_HAS_LOGF */ 219 181 if (action == ACTION_NONE) 220 182 action = ACTION_REDRAW; 221 183 return action;
+1 -2
apps/main.c
··· 28 28 #include "rtc.h" 29 29 #include "debug.h" 30 30 #include "led.h" 31 - #include "kernel.h" 31 + #include "../kernel-internal.h" 32 32 #include "button.h" 33 33 #include "tree.h" 34 34 #include "filetypes.h" ··· 44 44 #endif 45 45 #include "audio.h" 46 46 #include "mp3_playback.h" 47 - #include "thread.h" 48 47 #include "settings.h" 49 48 #include "backlight.h" 50 49 #include "status.h"
+1
firmware/SOURCES
··· 1838 1838 #else 1839 1839 kernel/thread.c 1840 1840 #endif 1841 + kernel/thread-common.c 1841 1842 kernel/tick.c 1842 1843 #ifdef INCLUDE_TIMEOUT_API 1843 1844 kernel/timeout.c
+1 -3
firmware/asm/arm/thread.c
··· 34 34 "mov r1, #0 \n" /* Mark thread as running */ 35 35 "str r1, [r0, #40] \n" 36 36 #if NUM_CORES > 1 37 - "ldr r0, =commit_discard_idcache \n" /* Invalidate this core's cache. */ 38 - "mov lr, pc \n" /* This could be the first entry into */ 39 - "bx r0 \n" /* plugin or codec code for this core. */ 37 + "bl commit_discard_idcache \n" /* Invalidate this core's cache. */ 40 38 #endif 41 39 "mov lr, pc \n" /* Call thread function */ 42 40 "bx r4 \n"
+8 -4
firmware/kernel/include/corelock.h
··· 28 28 #ifndef HAVE_CORELOCK_OBJECT 29 29 30 30 /* No atomic corelock op needed or just none defined */ 31 - #define corelock_init(cl) 32 - #define corelock_lock(cl) 33 - #define corelock_try_lock(cl) 34 - #define corelock_unlock(cl) 31 + #define corelock_init(cl) \ 32 + do {} while (0) 33 + #define corelock_lock(cl) \ 34 + do {} while (0) 35 + #define corelock_try_lock(cl) \ 36 + do {} while (0) 37 + #define corelock_unlock(cl) \ 38 + do {} while (0) 35 39 36 40 #else 37 41
-19
firmware/kernel/include/kernel.h
··· 48 48 #define TIMEOUT_BLOCK -1 49 49 #define TIMEOUT_NOBLOCK 0 50 50 51 - static inline void kernel_init(void) 52 - { 53 - /* Init the threading API */ 54 - init_threads(); 55 - 56 - /* Other processors will not reach this point in a multicore build. 57 - * In a single-core build with multiple cores they fall-through and 58 - * sleep in cop_main without returning. */ 59 - if (CURRENT_CORE == CPU) 60 - { 61 - init_queues(); 62 - init_tick(); 63 - #ifdef KDEV_INIT 64 - kernel_device_init(); 65 - #endif 66 - } 67 - } 68 - 69 - 70 51 #endif /* KERNEL_H */
+2
firmware/kernel/include/mrsw_lock.h
··· 21 21 #ifndef MRSW_LOCK_H 22 22 #define MRSW_LOCK_H 23 23 24 + #include "thread.h" 25 + 24 26 /* Multi-reader, single-writer object that allows mutltiple readers or a 25 27 * single writer thread access to a critical section. 26 28 *
-2
firmware/kernel/include/mutex.h
··· 22 22 #ifndef MUTEX_H 23 23 #define MUTEX_H 24 24 25 - #include <stdbool.h> 26 - #include "config.h" 27 25 #include "thread.h" 28 26 29 27 struct mutex
-1
firmware/kernel/include/semaphore.h
··· 22 22 #ifndef SEMAPHORE_H 23 23 #define SEMAPHORE_H 24 24 25 - #include "config.h" 26 25 #include "thread.h" 27 26 28 27 struct semaphore
+35 -244
firmware/kernel/include/thread.h
··· 18 18 * KIND, either express or implied. 19 19 * 20 20 ****************************************************************************/ 21 - 22 21 #ifndef THREAD_H 23 22 #define THREAD_H 24 23 25 - #include "config.h" 26 24 #include <inttypes.h> 27 25 #include <stddef.h> 28 26 #include <stdbool.h> 27 + #include "config.h" 29 28 #include "gcc_extensions.h" 30 - #include "corelock.h" 31 29 #include "bitarray.h" 30 + #include "corelock.h" 32 31 33 32 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 34 33 * by giving high priority threads more CPU time than lower priority threads ··· 65 64 #define IO_PRIORITY_IMMEDIATE 0 66 65 #define IO_PRIORITY_BACKGROUND 32 67 66 68 - 69 67 #if CONFIG_CODEC == SWCODEC 70 68 # ifdef HAVE_HARDWARE_CLICK 71 69 # define BASETHREADS 17 ··· 85 83 BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) 86 84 BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) 87 85 86 + struct thread_entry; 87 + 88 88 /* 89 89 * We need more stack when we run under a host 90 90 * maybe more expensive C lib functions? ··· 92 92 * simulator (possibly) doesn't simulate stack usage anyway but well ... */ 93 93 94 94 #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) 95 - struct regs 96 - { 97 - void *t; /* OS thread */ 98 - void *told; /* Last thread in slot (explained in thead-sdl.c) */ 99 - void *s; /* Semaphore for blocking and wakeup */ 100 - void (*start)(void); /* Start function */ 101 - }; 102 - 103 95 #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ 104 96 #else 105 97 #include "asm/thread.h" 106 98 #endif /* HAVE_SDL_THREADS */ 107 99 108 - /* NOTE: The use of the word "queue" may also refer to a linked list of 109 - threads being maintained that are normally dealt with in FIFO order 110 - and not necessarily kernel event_queue */ 111 - enum 112 - { 113 - /* States without a timeout must be first */ 114 - STATE_KILLED = 0, /* Thread is killed (default) */ 115 - STATE_RUNNING, /* Thread is currently running */ 116 - STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */ 117 - /* These states involve adding the thread to the tmo list */ 118 - STATE_SLEEPING, /* Thread is sleeping with a timeout */ 119 - STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */ 120 - /* Miscellaneous states */ 121 - STATE_FROZEN, /* Thread is suspended and will not run until 122 - thread_thaw is called with its ID */ 123 - THREAD_NUM_STATES, 124 - TIMEOUT_STATE_FIRST = STATE_SLEEPING, 125 - }; 100 + extern void yield(void); 101 + extern unsigned sleep(unsigned ticks); 126 102 127 - #if NUM_CORES > 1 128 - /* Pointer value for name field to indicate thread is being killed. Using 129 - * an alternate STATE_* won't work since that would interfere with operation 130 - * while the thread is still running. */ 131 - #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) 103 + #ifdef HAVE_PRIORITY_SCHEDULING 104 + #define IF_PRIO(...) __VA_ARGS__ 105 + #define IFN_PRIO(...) 106 + #else 107 + #define IF_PRIO(...) 108 + #define IFN_PRIO(...) __VA_ARGS__ 132 109 #endif 133 110 134 - /* Link information for lists thread is in */ 135 - struct thread_entry; /* forward */ 136 - struct thread_list 137 - { 138 - struct thread_entry *prev; /* Previous thread in a list */ 139 - struct thread_entry *next; /* Next thread in a list */ 140 - }; 141 - 142 111 /* Basic structure describing the owner of an object */ 143 112 struct blocker 144 113 { ··· 163 132 #endif /* HAVE_PRIORITY_SCHEDULING */ 164 133 }; 165 134 166 - #ifdef HAVE_PRIORITY_SCHEDULING 167 - 168 - /* Quick-disinherit of priority elevation. Must be a running thread. */ 169 - void priority_disinherit(struct thread_entry *thread, struct blocker *bl); 170 - 171 - struct priority_distribution 172 - { 173 - uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ 174 - priobit_t mask; /* Bitmask of hist entries that are not zero */ 175 - }; 176 - 177 - #endif /* HAVE_PRIORITY_SCHEDULING */ 178 - 179 - /* Information kept in each thread slot 180 - * members are arranged according to size - largest first - in order 181 - * to ensure both alignment and packing at the same time. 182 - */ 183 - struct thread_entry 184 - { 185 - struct regs context; /* Register context at switch - 186 - _must_ be first member */ 187 - uintptr_t *stack; /* Pointer to top of stack */ 188 - const char *name; /* Thread name */ 189 - long tmo_tick; /* Tick when thread should be woken from 190 - timeout - 191 - states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ 192 - struct thread_list l; /* Links for blocked/waking/running - 193 - circular linkage in both directions */ 194 - struct thread_list tmo; /* Links for timeout list - 195 - Circular in reverse direction, NULL-terminated in 196 - forward direction - 197 - states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ 198 - struct thread_entry **bqp; /* Pointer to list variable in kernel 199 - object where thread is blocked - used 200 - for implicit unblock and explicit wake 201 - states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ 202 - #ifdef HAVE_CORELOCK_OBJECT 203 - struct corelock *obj_cl; /* Object corelock where thead is blocked - 204 - states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ 205 - struct corelock waiter_cl; /* Corelock for thread_wait */ 206 - struct corelock slot_cl; /* Corelock to lock thread slot */ 207 - unsigned char core; /* The core to which thread belongs */ 208 - #endif 209 - struct thread_entry *queue; /* List of threads waiting for thread to be 210 - removed */ 211 - #ifdef HAVE_WAKEUP_EXT_CB 212 - void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that 213 - performs special steps needed when being 214 - forced off of an object's wait queue that 215 - go beyond the standard wait queue removal 216 - and priority disinheritance */ 217 - /* Only enabled when using queue_send for now */ 218 - #endif 219 - #if defined(HAVE_SEMAPHORE_OBJECTS) || \ 220 - defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ 221 - NUM_CORES > 1 222 - volatile intptr_t retval; /* Return value from a blocked operation/ 223 - misc. use */ 224 - #endif 225 - uint32_t id; /* Current slot id */ 226 - int __errno; /* Thread error number (errno tls) */ 227 - #ifdef HAVE_PRIORITY_SCHEDULING 228 - /* Priority summary of owned objects that support inheritance */ 229 - struct blocker *blocker; /* Pointer to blocker when this thread is blocked 230 - on an object that supports PIP - 231 - states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ 232 - struct priority_distribution pdist; /* Priority summary of owned objects 233 - that have blocked threads and thread's own 234 - base priority */ 235 - int skip_count; /* Number of times skipped if higher priority 236 - thread was running */ 237 - unsigned char base_priority; /* Base priority (set explicitly during 238 - creation or thread_set_priority) */ 239 - unsigned char priority; /* Scheduled priority (higher of base or 240 - all threads blocked by this one) */ 241 - #endif 242 - unsigned short stack_size; /* Size of stack in bytes */ 243 - unsigned char state; /* Thread slot state (STATE_*) */ 244 - #ifdef HAVE_SCHEDULER_BOOSTCTRL 245 - unsigned char cpu_boost; /* CPU frequency boost flag */ 246 - #endif 247 - #ifdef HAVE_IO_PRIORITY 248 - unsigned char io_priority; 249 - #endif 250 - }; 251 - 252 - /*** Macros for internal use ***/ 253 - /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ 254 - #define THREAD_ID_VERSION_SHIFT 8 255 - #define THREAD_ID_VERSION_MASK 0xffffff00 256 - #define THREAD_ID_SLOT_MASK 0x000000ff 257 - #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) 258 - #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) 259 - 260 - #ifdef HAVE_CORELOCK_OBJECT 261 - /* Operations to be performed just before stopping a thread and starting 262 - a new one if specified before calling switch_thread */ 263 - enum 264 - { 265 - TBOP_CLEAR = 0, /* No operation to do */ 266 - TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ 267 - TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ 268 - }; 269 - 270 - struct thread_blk_ops 271 - { 272 - struct corelock *cl_p; /* pointer to corelock */ 273 - unsigned char flags; /* TBOP_* flags */ 274 - }; 275 - #endif /* NUM_CORES > 1 */ 276 - 277 - /* Information kept for each core 278 - * Members are arranged for the same reason as in thread_entry 279 - */ 280 - struct core_entry 281 - { 282 - /* "Active" lists - core is constantly active on these and are never 283 - locked and interrupts do not access them */ 284 - struct thread_entry *running; /* threads that are running (RTR) */ 285 - struct thread_entry *timeout; /* threads that are on a timeout before 286 - running again */ 287 - struct thread_entry *block_task; /* Task going off running list */ 288 - #ifdef HAVE_PRIORITY_SCHEDULING 289 - struct priority_distribution rtr; /* Summary of running and ready-to-run 290 - threads */ 291 - #endif 292 - long next_tmo_check; /* soonest time to check tmo threads */ 293 - #ifdef HAVE_CORELOCK_OBJECT 294 - struct thread_blk_ops blk_ops; /* operations to perform when 295 - blocking a thread */ 296 - struct corelock rtr_cl; /* Lock for rtr list */ 297 - #endif /* NUM_CORES */ 298 - }; 299 - 300 - extern void yield(void); 301 - extern unsigned sleep(unsigned ticks); 302 - 303 - #ifdef HAVE_PRIORITY_SCHEDULING 304 - #define IF_PRIO(...) __VA_ARGS__ 305 - #define IFN_PRIO(...) 306 - #else 307 - #define IF_PRIO(...) 308 - #define IFN_PRIO(...) __VA_ARGS__ 309 - #endif 310 - 311 135 void core_idle(void); 312 136 void core_wake(IF_COP_VOID(unsigned int core)); 313 137 314 - /* Initialize the scheduler */ 315 - void init_threads(void) INIT_ATTR; 316 - 317 138 /* Allocate a thread in the scheduler */ 318 139 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ 319 140 unsigned int create_thread(void (*function)(void), ··· 330 151 #define trigger_cpu_boost() do { } while(0) 331 152 #define cancel_cpu_boost() do { } while(0) 332 153 #endif 333 - /* Return thread entry from id */ 334 - struct thread_entry *thread_id_entry(unsigned int thread_id); 335 - /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). 154 + /* Make a frozen thread runnable (when started with CREATE_THREAD_FROZEN). 336 155 * Has no effect on a thread not frozen. */ 337 156 void thread_thaw(unsigned int thread_id); 338 157 /* Wait for a thread to exit */ 339 158 void thread_wait(unsigned int thread_id); 340 159 /* Exit the current thread */ 341 160 void thread_exit(void) NORETURN_ATTR; 342 - #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) 343 - #define ALLOW_REMOVE_THREAD 344 - /* Remove a thread from the scheduler */ 345 - void remove_thread(unsigned int thread_id); 346 - #endif 347 - 348 - /* Switch to next runnable thread */ 349 - void switch_thread(void); 350 - /* Blocks a thread for at least the specified number of ticks (0 = wait until 351 - * next tick) */ 352 - void sleep_thread(int ticks); 353 - /* Blocks the current thread on a thread queue (< 0 == infinite) */ 354 - void block_thread(struct thread_entry *current, int timeout); 355 - 356 - /* Return bit flags for thread wakeup */ 357 - #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ 358 - #define THREAD_OK 0x1 /* A thread was woken up */ 359 - #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of 360 - higher priority than current were woken) */ 361 - 362 - /* A convenience function for waking an entire queue of threads. */ 363 - unsigned int thread_queue_wake(struct thread_entry **list); 364 - 365 - /* Wakeup a thread at the head of a list */ 366 - enum wakeup_thread_protocol 367 - { 368 - WAKEUP_DEFAULT, 369 - WAKEUP_TRANSFER, 370 - WAKEUP_RELEASE, 371 - WAKEUP_TRANSFER_MULTI, 372 - }; 373 - 374 - unsigned int wakeup_thread_(struct thread_entry **list 375 - IF_PRIO(, enum wakeup_thread_protocol proto)); 376 161 377 162 #ifdef HAVE_PRIORITY_SCHEDULING 378 - #define wakeup_thread(list, proto) \ 379 - wakeup_thread_((list), (proto)) 380 - 381 163 int thread_set_priority(unsigned int thread_id, int priority); 382 164 int thread_get_priority(unsigned int thread_id); 383 - #else /* !HAVE_PRIORITY_SCHEDULING */ 384 - #define wakeup_thread(list, proto...) \ 385 - wakeup_thread_((list)); 386 165 #endif /* HAVE_PRIORITY_SCHEDULING */ 387 166 388 167 #ifdef HAVE_IO_PRIORITY ··· 396 175 /* Return the id of the calling thread. */ 397 176 unsigned int thread_self(void); 398 177 399 - /* Return the thread_entry for the calling thread. 400 - * INTERNAL: Intended for use by kernel and not for programs. */ 401 - struct thread_entry* thread_self_entry(void); 402 - 403 178 /* Debugging info - only! */ 404 - int thread_stack_usage(const struct thread_entry *thread); 405 179 #if NUM_CORES > 1 406 - int idle_stack_usage(unsigned int core); 180 + struct core_debug_info 181 + { 182 + unsigned int idle_stack_usage; 183 + }; 184 + 185 + int core_get_debug_info(unsigned int core, struct core_debug_info *infop); 186 + 187 + #endif /* NUM_CORES */ 188 + 189 + struct thread_debug_info 190 + { 191 + char statusstr[4]; 192 + char name[32]; 193 + unsigned int stack_usage; 194 + #if NUM_CORES > 1 195 + unsigned int core; 407 196 #endif 408 - void thread_get_name(char *buffer, int size, 409 - struct thread_entry *thread); 410 - #ifdef RB_PROFILE 411 - void profile_thread(void); 197 + #ifdef HAVE_PRIORITY_SCHEDULING 198 + int base_priority; 199 + int current_priority; 412 200 #endif 201 + }; 202 + int thread_get_debug_info(unsigned int thread_id, 203 + struct thread_debug_info *infop); 413 204 414 205 #endif /* THREAD_H */
+20 -2
firmware/kernel/kernel-internal.h
··· 22 22 #ifndef KERNEL_INTERNAL_H 23 23 #define KERNEL_INTERNAL_H 24 24 25 - #include "config.h" 26 - #include "debug.h" 25 + #include "thread-internal.h" 26 + #include "kernel.h" 27 27 28 28 /* Make this nonzero to enable more elaborate checks on objects */ 29 29 #if defined(DEBUG) || defined(SIMULATOR) ··· 45 45 #define KERNEL_ASSERT(exp, msg...) ({}) 46 46 #endif 47 47 48 + static inline void kernel_init(void) 49 + { 50 + /* Init the threading API */ 51 + extern void init_threads(void); 52 + init_threads(); 53 + 54 + /* Other processors will not reach this point in a multicore build. 55 + * In a single-core build with multiple cores they fall-through and 56 + * sleep in cop_main without returning. */ 57 + if (CURRENT_CORE == CPU) 58 + { 59 + init_queues(); 60 + init_tick(); 61 + #ifdef KDEV_INIT 62 + kernel_device_init(); 63 + #endif 64 + } 65 + } 48 66 49 67 #endif /* KERNEL_INTERNAL_H */
+1 -7
firmware/kernel/mrsw_lock.c
··· 18 18 * KIND, either express or implied. 19 19 * 20 20 ****************************************************************************/ 21 - #include <string.h> 22 - #include "config.h" 23 - #include "system.h" 24 - #include "thread.h" 25 - #include "kernel.h" 26 21 #include "kernel-internal.h" 22 + #include "mrsw-lock.h" 27 23 28 24 #ifdef HAVE_PRIORITY_SCHEDULING 29 25 ··· 45 41 Therefore, if the queue has threads, then the next after the 46 42 owning readers is a writer and this is not the last reader. */ 47 43 if (mrsw->queue) 48 - { 49 44 corelock_lock(&mrsw->splay.cl); 50 - } 51 45 52 46 threadbit_clear_bit(&mrsw->splay.mask, slotnum); 53 47
+1 -6
firmware/kernel/mutex.c
··· 23 23 /**************************************************************************** 24 24 * Simple mutex functions ;) 25 25 ****************************************************************************/ 26 - 27 - #include <stdbool.h> 28 - #include "config.h" 29 - #include "system.h" 30 - #include "kernel.h" 31 - #include "thread-internal.h" 32 26 #include "kernel-internal.h" 27 + #include "mutex.h" 33 28 34 29 /* Initialize a mutex object - call before any use and do not call again once 35 30 * the object is available to other threads */
-17
firmware/kernel/pthread/thread.c
··· 194 194 thread->l.next->l.prev = thread->l.prev; 195 195 } 196 196 197 - unsigned int thread_queue_wake(struct thread_entry **list) 198 - { 199 - unsigned int result = THREAD_NONE; 200 - 201 - for (;;) 202 - { 203 - unsigned int rc = wakeup_thread(list); 204 - 205 - if (rc == THREAD_NONE) 206 - break; 207 - 208 - result |= rc; 209 - } 210 - 211 - return result; 212 - } 213 - 214 197 /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point 215 198 * to a corelock instance, and this corelock must be held by the caller */ 216 199 void block_thread_switch(struct thread_entry *t, struct corelock *cl)
+1 -7
firmware/kernel/queue.c
··· 18 18 * KIND, either express or implied. 19 19 * 20 20 ****************************************************************************/ 21 - 22 21 #include <string.h> 23 - #include "config.h" 24 - #include "kernel.h" 25 - #include "system.h" 26 - #include "queue.h" 27 - #include "corelock.h" 28 22 #include "kernel-internal.h" 23 + #include "queue.h" 29 24 #include "general.h" 30 - #include "panic.h" 31 25 32 26 /* This array holds all queues that are initiated. It is used for broadcast. */ 33 27 static struct
+1 -11
firmware/kernel/semaphore.c
··· 18 18 * KIND, either express or implied. 19 19 * 20 20 ****************************************************************************/ 21 - 22 - 23 - /**************************************************************************** 24 - * Simple mutex functions ;) 25 - ****************************************************************************/ 26 - 27 - #include <stdbool.h> 28 - #include "config.h" 29 - #include "kernel.h" 30 - #include "semaphore.h" 31 21 #include "kernel-internal.h" 32 - #include "thread-internal.h" 22 + #include "semaphore.h" 33 23 34 24 /**************************************************************************** 35 25 * Simple semaphore functions ;)
+152
firmware/kernel/thread-common.c
··· 1 + /*************************************************************************** 2 + * __________ __ ___. 3 + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 + * \/ \/ \/ \/ \/ 8 + * $Id$ 9 + * 10 + * Copyright (C) 2002 by Ulf Ralberg 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 2 15 + * of the License, or (at your option) any later version. 16 + * 17 + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 + * KIND, either express or implied. 19 + * 20 + ****************************************************************************/ 21 + #include "thread-internal.h" 22 + #include "system.h" 23 + 24 + /*--------------------------------------------------------------------------- 25 + * Wakeup an entire queue of threads - returns bitwise-or of return bitmask 26 + * from each operation or THREAD_NONE of nothing was awakened. Object owning 27 + * the queue must be locked first. 28 + * 29 + * INTERNAL: Intended for use by kernel objects and not for programs. 30 + *--------------------------------------------------------------------------- 31 + */ 32 + unsigned int thread_queue_wake(struct thread_entry **list) 33 + { 34 + unsigned result = THREAD_NONE; 35 + 36 + for (;;) 37 + { 38 + unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); 39 + 40 + if (rc == THREAD_NONE) 41 + break; /* No more threads */ 42 + 43 + result |= rc; 44 + } 45 + 46 + return result; 47 + } 48 + 49 + 50 + /** Debug screen stuff **/ 51 + 52 + /*--------------------------------------------------------------------------- 53 + * returns the stack space used in bytes 54 + *--------------------------------------------------------------------------- 55 + */ 56 + static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) 57 + { 58 + unsigned int usage = 0; 59 + unsigned int stack_words = stack_size / sizeof (uintptr_t); 60 + 61 + for (unsigned int i = 0; i < stack_words; i++) 62 + { 63 + if (stackptr[i] != DEADBEEF) 64 + { 65 + usage = (stack_words - i) * 100 / stack_words; 66 + break; 67 + } 68 + } 69 + 70 + return usage; 71 + } 72 + 73 + #if NUM_CORES > 1 74 + /*--------------------------------------------------------------------------- 75 + * Returns the maximum percentage of the core's idle stack ever used during 76 + * runtime. 77 + *--------------------------------------------------------------------------- 78 + */ 79 + int core_get_debug_info(unsigned int core, struct core_debug_info *infop) 80 + { 81 + extern uintptr_t * const idle_stacks[NUM_CORES]; 82 + 83 + if (core >= NUM_CORES || !infop) 84 + return -1; 85 + 86 + infop->idle_stack_usage = stack_usage(idle_stacks[core], IDLE_STACK_SIZE); 87 + return 1; 88 + } 89 + #endif /* NUM_CORES > 1 */ 90 + 91 + int thread_get_debug_info(unsigned int thread_id, 92 + struct thread_debug_info *infop) 93 + { 94 + static const char status_chars[THREAD_NUM_STATES+1] = 95 + { 96 + [0 ... THREAD_NUM_STATES] = '?', 97 + [STATE_RUNNING] = 'R', 98 + [STATE_BLOCKED] = 'B', 99 + [STATE_SLEEPING] = 'S', 100 + [STATE_BLOCKED_W_TMO] = 'T', 101 + [STATE_FROZEN] = 'F', 102 + [STATE_KILLED] = 'K', 103 + }; 104 + 105 + if (!infop) 106 + return -1; 107 + 108 + unsigned int slot = THREAD_ID_SLOT(thread_id); 109 + if (slot >= MAXTHREADS) 110 + return -1; 111 + 112 + extern struct thread_entry threads[MAXTHREADS]; 113 + struct thread_entry *thread = &threads[slot]; 114 + 115 + int oldlevel = disable_irq_save(); 116 + LOCK_THREAD(thread); 117 + 118 + unsigned int state = thread->state; 119 + 120 + if (state != STATE_KILLED) 121 + { 122 + const char *name = thread->name; 123 + if (!name) 124 + name = ""; 125 + 126 + bool cpu_boost = false; 127 + #ifdef HAVE_SCHEDULER_BOOSTCTRL 128 + cpu_boost = thread->cpu_boost; 129 + #endif 130 + infop->stack_usage = stack_usage(thread->stack, thread->stack_size); 131 + #if NUM_CORES > 1 132 + infop->core = thread->core; 133 + #endif 134 + #ifdef HAVE_PRIORITY_SCHEDULING 135 + infop->base_priority = thread->base_priority; 136 + infop->current_priority = thread->priority; 137 + #endif 138 + 139 + snprintf(infop->statusstr, sizeof (infop->statusstr), "%c%c", 140 + cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), 141 + status_chars[state]); 142 + 143 + const char *fmt = *name ? "%s" : "%s%08lX"; 144 + snprintf(infop->name, sizeof (infop->name), fmt, name, 145 + thread->id); 146 + } 147 + 148 + UNLOCK_THREAD(thread); 149 + restore_irq(oldlevel); 150 + 151 + return state == STATE_KILLED ? 0 : 1; 152 + }
+86 -151
firmware/kernel/thread-internal.h
··· 18 18 * KIND, either express or implied. 19 19 * 20 20 ****************************************************************************/ 21 + #ifndef THREAD_INTERNAL_H 22 + #define THREAD_INTERNAL_H 21 23 22 - #ifndef THREAD_H 23 - #define THREAD_H 24 - 25 - #include "config.h" 26 - #include <inttypes.h> 27 - #include <stddef.h> 28 - #include <stdbool.h> 29 - #include "gcc_extensions.h" 24 + #include "thread.h" 25 + #include <stdio.h> 26 + #include "panic.h" 27 + #include "debug.h" 30 28 31 29 /* 32 30 * We need more stack when we run under a host ··· 48 46 #include "asm/thread.h" 49 47 #endif /* HAVE_SDL_THREADS */ 50 48 51 - #ifdef CPU_PP 52 - #ifdef HAVE_CORELOCK_OBJECT 53 - /* No reliable atomic instruction available - use Peterson's algorithm */ 54 - struct corelock 55 - { 56 - volatile unsigned char myl[NUM_CORES]; 57 - volatile unsigned char turn; 58 - } __attribute__((packed)); 59 - 60 - /* Too big to inline everywhere */ 61 - void corelock_init(struct corelock *cl); 62 - void corelock_lock(struct corelock *cl); 63 - int corelock_try_lock(struct corelock *cl); 64 - void corelock_unlock(struct corelock *cl); 65 - #endif /* HAVE_CORELOCK_OBJECT */ 66 - #endif /* CPU_PP */ 67 - 68 49 /* NOTE: The use of the word "queue" may also refer to a linked list of 69 50 threads being maintained that are normally dealt with in FIFO order 70 51 and not necessarily kernel event_queue */ ··· 84 65 TIMEOUT_STATE_FIRST = STATE_SLEEPING, 85 66 }; 86 67 87 - #if NUM_CORES > 1 88 - /* Pointer value for name field to indicate thread is being killed. Using 89 - * an alternate STATE_* won't work since that would interfere with operation 90 - * while the thread is still running. */ 91 - #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) 92 - #endif 68 + #ifdef HAVE_PRIORITY_SCHEDULING 93 69 94 - /* Link information for lists thread is in */ 95 - struct thread_entry; /* forward */ 96 - struct thread_list 70 + /* Quick-disinherit of priority elevation. Must be a running thread. */ 71 + void priority_disinherit(struct thread_entry *thread, struct blocker *bl); 72 + 73 + struct priority_distribution 97 74 { 98 - struct thread_entry *prev; /* Previous thread in a list */ 99 - struct thread_entry *next; /* Next thread in a list */ 75 + uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ 76 + priobit_t mask; /* Bitmask of hist entries that are not zero */ 100 77 }; 101 78 102 - #ifndef HAVE_CORELOCK_OBJECT 103 - /* No atomic corelock op needed or just none defined */ 104 - #define corelock_init(cl) 105 - #define corelock_lock(cl) 106 - #define corelock_try_lock(cl) 107 - #define corelock_unlock(cl) 108 - #endif /* HAVE_CORELOCK_OBJECT */ 79 + #endif /* HAVE_PRIORITY_SCHEDULING */ 109 80 110 - #ifdef HAVE_PRIORITY_SCHEDULING 111 - struct blocker 81 + #ifdef HAVE_CORELOCK_OBJECT 82 + /* Operations to be performed just before stopping a thread and starting 83 + a new one if specified before calling switch_thread */ 84 + enum 112 85 { 113 - struct thread_entry * volatile thread; /* thread blocking other threads 114 - (aka. object owner) */ 115 - int priority; /* highest priority waiter */ 116 - struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread); 86 + TBOP_CLEAR = 0, /* No operation to do */ 87 + TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ 88 + TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ 117 89 }; 118 90 119 - /* Choices of wakeup protocol */ 120 - 121 - /* For transfer of object ownership by one thread to another thread by 122 - * the owning thread itself (mutexes) */ 123 - struct thread_entry * 124 - wakeup_priority_protocol_transfer(struct thread_entry *thread); 91 + struct thread_blk_ops 92 + { 93 + struct corelock *cl_p; /* pointer to corelock */ 94 + unsigned char flags; /* TBOP_* flags */ 95 + }; 96 + #endif /* NUM_CORES > 1 */ 125 97 126 - /* For release by owner where ownership doesn't change - other threads, 127 - * interrupts, timeouts, etc. (mutex timeout, queues) */ 128 - struct thread_entry * 129 - wakeup_priority_protocol_release(struct thread_entry *thread); 130 - 131 - 132 - struct priority_distribution 98 + /* Link information for lists thread is in */ 99 + struct thread_entry; /* forward */ 100 + struct thread_list 133 101 { 134 - uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ 135 - uint32_t mask; /* Bitmask of hist entries that are not zero */ 102 + struct thread_entry *prev; /* Previous thread in a list */ 103 + struct thread_entry *next; /* Next thread in a list */ 136 104 }; 137 - 138 - #endif /* HAVE_PRIORITY_SCHEDULING */ 139 105 140 106 /* Information kept in each thread slot 141 107 * members are arranged according to size - largest first - in order ··· 183 149 volatile intptr_t retval; /* Return value from a blocked operation/ 184 150 misc. use */ 185 151 #endif 152 + uint32_t id; /* Current slot id */ 153 + int __errno; /* Thread error number (errno tls) */ 186 154 #ifdef HAVE_PRIORITY_SCHEDULING 187 155 /* Priority summary of owned objects that support inheritance */ 188 156 struct blocker *blocker; /* Pointer to blocker when this thread is blocked ··· 198 166 unsigned char priority; /* Scheduled priority (higher of base or 199 167 all threads blocked by this one) */ 200 168 #endif 201 - uint16_t id; /* Current slot id */ 202 169 unsigned short stack_size; /* Size of stack in bytes */ 203 170 unsigned char state; /* Thread slot state (STATE_*) */ 204 171 #ifdef HAVE_SCHEDULER_BOOSTCTRL ··· 209 176 #endif 210 177 }; 211 178 212 - /*** Macros for internal use ***/ 213 - /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ 214 - #define THREAD_ID_VERSION_SHIFT 8 215 - #define THREAD_ID_VERSION_MASK 0xff00 216 - #define THREAD_ID_SLOT_MASK 0x00ff 217 - #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) 218 - 219 - #ifdef HAVE_CORELOCK_OBJECT 220 - /* Operations to be performed just before stopping a thread and starting 221 - a new one if specified before calling switch_thread */ 222 - enum 223 - { 224 - TBOP_CLEAR = 0, /* No operation to do */ 225 - TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ 226 - TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ 227 - }; 228 - 229 - struct thread_blk_ops 230 - { 231 - struct corelock *cl_p; /* pointer to corelock */ 232 - unsigned char flags; /* TBOP_* flags */ 233 - }; 234 - #endif /* NUM_CORES > 1 */ 235 - 236 179 /* Information kept for each core 237 180 * Members are arranged for the same reason as in thread_entry 238 181 */ ··· 256 199 #endif /* NUM_CORES */ 257 200 }; 258 201 259 - #ifdef HAVE_PRIORITY_SCHEDULING 260 - #define IF_PRIO(...) __VA_ARGS__ 261 - #define IFN_PRIO(...) 262 - #else 263 - #define IF_PRIO(...) 264 - #define IFN_PRIO(...) __VA_ARGS__ 265 - #endif 202 + /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ 203 + #define THREAD_ID_VERSION_SHIFT 8 204 + #define THREAD_ID_VERSION_MASK 0xffffff00 205 + #define THREAD_ID_SLOT_MASK 0x000000ff 206 + #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) 207 + #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) 266 208 267 - void core_idle(void); 268 - void core_wake(IF_COP_VOID(unsigned int core)); 209 + /* Thread locking */ 210 + #if NUM_CORES > 1 211 + #define LOCK_THREAD(thread) \ 212 + ({ corelock_lock(&(thread)->slot_cl); }) 213 + #define TRY_LOCK_THREAD(thread) \ 214 + ({ corelock_try_lock(&(thread)->slot_cl); }) 215 + #define UNLOCK_THREAD(thread) \ 216 + ({ corelock_unlock(&(thread)->slot_cl); }) 217 + #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 218 + ({ unsigned int _core = (thread)->core; \ 219 + cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ 220 + cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) 221 + #else /* NUM_CORES == 1*/ 222 + #define LOCK_THREAD(thread) \ 223 + ({ (void)(thread); }) 224 + #define TRY_LOCK_THREAD(thread) \ 225 + ({ (void)(thread); }) 226 + #define UNLOCK_THREAD(thread) \ 227 + ({ (void)(thread); }) 228 + #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 229 + ({ (void)(thread); }) 230 + #endif /* NUM_CORES */ 269 231 270 - /* Initialize the scheduler */ 271 - void init_threads(void) INIT_ATTR; 272 - 273 - /* Allocate a thread in the scheduler */ 274 - #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ 275 - unsigned int create_thread(void (*function)(void), 276 - void* stack, size_t stack_size, 277 - unsigned flags, const char *name 278 - IF_PRIO(, int priority) 279 - IF_COP(, unsigned int core)); 280 - 281 - /* Set and clear the CPU frequency boost flag for the calling thread */ 282 - #ifdef HAVE_SCHEDULER_BOOSTCTRL 283 - void trigger_cpu_boost(void); 284 - void cancel_cpu_boost(void); 285 - #else 286 - #define trigger_cpu_boost() do { } while(0) 287 - #define cancel_cpu_boost() do { } while(0) 288 - #endif 289 - /* Return thread entry from id */ 290 - struct thread_entry *thread_id_entry(unsigned int thread_id); 291 - /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). 292 - * Has no effect on a thread not frozen. */ 293 - void thread_thaw(unsigned int thread_id); 294 - /* Wait for a thread to exit */ 295 - void thread_wait(unsigned int thread_id); 296 - /* Exit the current thread */ 297 - void thread_exit(void) NORETURN_ATTR; 298 - #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) 299 - #define ALLOW_REMOVE_THREAD 300 - /* Remove a thread from the scheduler */ 301 - void remove_thread(unsigned int thread_id); 302 - #endif 232 + #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) 303 233 304 234 /* Switch to next runnable thread */ 305 235 void switch_thread(void); 306 236 /* Blocks a thread for at least the specified number of ticks (0 = wait until 307 237 * next tick) */ 308 238 void sleep_thread(int ticks); 309 - /* Indefinitely blocks the current thread on a thread queue */ 310 - void block_thread(struct thread_entry *current); 311 - /* Blocks the current thread on a thread queue until explicitely woken or 312 - * the timeout is reached */ 313 - void block_thread_w_tmo(struct thread_entry *current, int timeout); 239 + /* Blocks the current thread on a thread queue (< 0 == infinite) */ 240 + void block_thread(struct thread_entry *current, int timeout); 314 241 315 242 /* Return bit flags for thread wakeup */ 316 243 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ ··· 322 249 unsigned int thread_queue_wake(struct thread_entry **list); 323 250 324 251 /* Wakeup a thread at the head of a list */ 325 - unsigned int wakeup_thread(struct thread_entry **list); 252 + enum wakeup_thread_protocol 253 + { 254 + WAKEUP_DEFAULT, 255 + WAKEUP_TRANSFER, 256 + WAKEUP_RELEASE, 257 + WAKEUP_TRANSFER_MULTI, 258 + }; 259 + 260 + unsigned int wakeup_thread_(struct thread_entry **list 261 + IF_PRIO(, enum wakeup_thread_protocol proto)); 326 262 327 263 #ifdef HAVE_PRIORITY_SCHEDULING 328 - int thread_set_priority(unsigned int thread_id, int priority); 329 - int thread_get_priority(unsigned int thread_id); 264 + #define wakeup_thread(list, proto) \ 265 + wakeup_thread_((list), (proto)) 266 + #else /* !HAVE_PRIORITY_SCHEDULING */ 267 + #define wakeup_thread(list, proto...) \ 268 + wakeup_thread_((list)); 330 269 #endif /* HAVE_PRIORITY_SCHEDULING */ 270 + 331 271 #ifdef HAVE_IO_PRIORITY 332 272 void thread_set_io_priority(unsigned int thread_id, int io_priority); 333 273 int thread_get_io_priority(unsigned int thread_id); ··· 339 279 /* Return the id of the calling thread. */ 340 280 unsigned int thread_self(void); 341 281 342 - /* Return the thread_entry for the calling thread. 343 - * INTERNAL: Intended for use by kernel and not for programs. */ 282 + /* Return the thread_entry for the calling thread */ 344 283 struct thread_entry* thread_self_entry(void); 345 284 346 - /* Debugging info - only! */ 347 - int thread_stack_usage(const struct thread_entry *thread); 348 - #if NUM_CORES > 1 349 - int idle_stack_usage(unsigned int core); 350 - #endif 351 - void thread_get_name(char *buffer, int size, 352 - struct thread_entry *thread); 285 + /* Return thread entry from id */ 286 + struct thread_entry *thread_id_entry(unsigned int thread_id); 287 + 353 288 #ifdef RB_PROFILE 354 289 void profile_thread(void); 355 290 #endif 356 291 357 - #endif /* THREAD_H */ 292 + #endif /* THREAD_INTERNAL_H */
+26 -374
firmware/kernel/thread.c
··· 28 28 #undef _FORTIFY_SOURCE 29 29 #endif 30 30 31 - #include <stdbool.h> 32 - #include <stdio.h> 33 - #include "thread.h" 34 - #include "panic.h" 35 - #include "system.h" 31 + #include "thread-internal.h" 36 32 #include "kernel.h" 37 33 #include "cpu.h" 38 34 #include "string.h" ··· 40 36 #include <profile.h> 41 37 #endif 42 38 #include "core_alloc.h" 43 - #include "gcc_extensions.h" 44 - #include "corelock.h" 45 39 46 40 /**************************************************************************** 47 41 * ATTENTION!! * ··· 131 125 132 126 /* Cast to the the machine pointer size, whose size could be < 4 or > 32 133 127 * (someday :). */ 134 - #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) 135 128 static struct core_entry cores[NUM_CORES] IBSS_ATTR; 136 129 struct thread_entry threads[MAXTHREADS] IBSS_ATTR; 137 130 ··· 204 197 * End Processor-specific section 205 198 ***************************************************************************/ 206 199 207 - #if THREAD_EXTRA_CHECKS 208 - static void thread_panicf(const char *msg, struct thread_entry *thread) 200 + static NO_INLINE 201 + void thread_panicf(const char *msg, struct thread_entry *thread) 209 202 { 210 203 IF_COP( const unsigned int core = thread->core; ) 211 - static char name[32]; 212 - thread_get_name(name, 32, thread); 204 + static char namebuf[sizeof (((struct thread_debug_info *)0)->name)]; 205 + const char *name = thread->name; 206 + if (!name) 207 + name = ""; 208 + snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX", 209 + name, (unsigned long)thread->id); 213 210 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); 214 211 } 212 + 215 213 static void thread_stkov(struct thread_entry *thread) 216 214 { 217 215 thread_panicf("Stkov", thread); 218 216 } 217 + 218 + #if THREAD_EXTRA_CHECKS 219 219 #define THREAD_PANICF(msg, thread) \ 220 220 thread_panicf(msg, thread) 221 221 #define THREAD_ASSERT(exp, msg, thread) \ 222 222 ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) 223 223 #else 224 - static void thread_stkov(struct thread_entry *thread) 225 - { 226 - IF_COP( const unsigned int core = thread->core; ) 227 - static char name[32]; 228 - thread_get_name(name, 32, thread); 229 - panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core)); 230 - } 231 - #define THREAD_PANICF(msg, thread) 232 - #define THREAD_ASSERT(exp, msg, thread) 224 + #define THREAD_PANICF(msg, thread) \ 225 + do {} while (0) 226 + #define THREAD_ASSERT(exp, msg, thread) \ 227 + do {} while (0) 233 228 #endif /* THREAD_EXTRA_CHECKS */ 234 229 235 - /* Thread locking */ 236 - #if NUM_CORES > 1 237 - #define LOCK_THREAD(thread) \ 238 - ({ corelock_lock(&(thread)->slot_cl); }) 239 - #define TRY_LOCK_THREAD(thread) \ 240 - ({ corelock_try_lock(&(thread)->slot_cl); }) 241 - #define UNLOCK_THREAD(thread) \ 242 - ({ corelock_unlock(&(thread)->slot_cl); }) 243 - #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 244 - ({ unsigned int _core = (thread)->core; \ 245 - cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ 246 - cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) 247 - #else 248 - #define LOCK_THREAD(thread) \ 249 - ({ (void)(thread); }) 250 - #define TRY_LOCK_THREAD(thread) \ 251 - ({ (void)(thread); }) 252 - #define UNLOCK_THREAD(thread) \ 253 - ({ (void)(thread); }) 254 - #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 255 - ({ (void)(thread); }) 256 - #endif 257 - 258 230 /* RTR list */ 259 231 #define RTR_LOCK(core) \ 260 232 ({ corelock_lock(&cores[core].rtr_cl); }) ··· 993 965 inherit_priority(bl, bl, blt, newblpr); 994 966 } 995 967 996 - /*--------------------------------------------------------------------------- 997 - * No threads must be blocked waiting for this thread except for it to exit. 998 - * The alternative is more elaborate cleanup and object registration code. 999 - * Check this for risk of silent data corruption when objects with 1000 - * inheritable blocking are abandoned by the owner - not precise but may 1001 - * catch something. 1002 - *--------------------------------------------------------------------------- 1003 - */ 1004 - static void __attribute__((noinline)) check_for_obj_waiters( 1005 - const char *function, struct thread_entry *thread) 1006 - { 1007 - /* Only one bit in the mask should be set with a frequency on 1 which 1008 - * represents the thread's own base priority */ 1009 - if (priobit_popcount(&thread->pdist.mask) != 1 || 1010 - thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1) 1011 - { 1012 - unsigned char name[32]; 1013 - thread_get_name(name, 32, thread); 1014 - panicf("%s->%s with obj. waiters", function, name); 1015 - } 1016 - } 1017 968 #endif /* HAVE_PRIORITY_SCHEDULING */ 1018 969 1019 970 /*--------------------------------------------------------------------------- ··· 1520 1471 } 1521 1472 1522 1473 /*--------------------------------------------------------------------------- 1523 - * Wakeup an entire queue of threads - returns bitwise-or of return bitmask 1524 - * from each operation or THREAD_NONE of nothing was awakened. Object owning 1525 - * the queue must be locked first. 1526 - * 1527 - * INTERNAL: Intended for use by kernel objects and not for programs. 1528 - *--------------------------------------------------------------------------- 1529 - */ 1530 - unsigned int thread_queue_wake(struct thread_entry **list) 1531 - { 1532 - unsigned result = THREAD_NONE; 1533 - 1534 - for (;;) 1535 - { 1536 - unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); 1537 - 1538 - if (rc == THREAD_NONE) 1539 - break; /* No more threads */ 1540 - 1541 - result |= rc; 1542 - } 1543 - 1544 - return result; 1545 - } 1546 - 1547 - /*--------------------------------------------------------------------------- 1548 1474 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. 1549 1475 *--------------------------------------------------------------------------- 1550 1476 */ ··· 1580 1506 struct thread_entry *t = &threads[n]; 1581 1507 LOCK_THREAD(t); 1582 1508 1583 - if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT )) 1509 + if (t->state == STATE_KILLED) 1584 1510 { 1585 1511 /* Slot is empty - leave it locked and caller will unlock */ 1586 1512 thread = t; ··· 1836 1762 corelock_lock(&current->waiter_cl); 1837 1763 LOCK_THREAD(current); 1838 1764 1839 - #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1 1840 - if (current->name == THREAD_DESTRUCT) 1841 - { 1842 - /* Thread being killed - become a waiter */ 1843 - unsigned int id = current->id; 1844 - UNLOCK_THREAD(current); 1845 - corelock_unlock(&current->waiter_cl); 1846 - thread_wait(id); 1847 - THREAD_PANICF("thread_exit->WK:*R", current); 1848 - } 1849 - #endif 1850 - 1851 1765 #ifdef HAVE_PRIORITY_SCHEDULING 1852 - check_for_obj_waiters("thread_exit", current); 1853 - #endif 1766 + /* Only one bit in the mask should be set with a frequency on 1 which 1767 + * represents the thread's own base priority otherwise threads are waiting 1768 + * on an abandoned object */ 1769 + if (priobit_popcount(&current->pdist.mask) != 1 || 1770 + current->pdist.hist[priobit_ffs(&current->pdist.mask)] > 1) 1771 + thread_panicf("abandon ship!", current); 1772 + #endif /* HAVE_PRIORITY_SCHEDULING */ 1854 1773 1855 1774 if (current->tmo.prev != NULL) 1856 1775 { ··· 1872 1791 thread_final_exit(current); 1873 1792 } 1874 1793 1875 - #ifdef ALLOW_REMOVE_THREAD 1876 - /*--------------------------------------------------------------------------- 1877 - * Remove a thread from the scheduler. Not The Right Way to Do Things in 1878 - * normal programs. 1879 - * 1880 - * Parameter is the ID as returned from create_thread(). 1881 - * 1882 - * Use with care on threads that are not under careful control as this may 1883 - * leave various objects in an undefined state. 1884 - *--------------------------------------------------------------------------- 1885 - */ 1886 - void remove_thread(unsigned int thread_id) 1887 - { 1888 - #ifdef HAVE_CORELOCK_OBJECT 1889 - /* core is not constant here because of core switching */ 1890 - unsigned int core = CURRENT_CORE; 1891 - unsigned int old_core = NUM_CORES; 1892 - struct corelock *ocl = NULL; 1893 - #else 1894 - const unsigned int core = CURRENT_CORE; 1895 - #endif 1896 - struct thread_entry *current = cores[core].running; 1897 - struct thread_entry *thread = thread_id_entry(thread_id); 1898 - 1899 - unsigned state; 1900 - int oldlevel; 1901 - 1902 - if (thread == current) 1903 - thread_exit(); /* Current thread - do normal exit */ 1904 - 1905 - oldlevel = disable_irq_save(); 1906 - 1907 - corelock_lock(&thread->waiter_cl); 1908 - LOCK_THREAD(thread); 1909 - 1910 - state = thread->state; 1911 - 1912 - if (thread->id != thread_id || state == STATE_KILLED) 1913 - goto thread_killed; 1914 - 1915 - #if NUM_CORES > 1 1916 - if (thread->name == THREAD_DESTRUCT) 1917 - { 1918 - /* Thread being killed - become a waiter */ 1919 - UNLOCK_THREAD(thread); 1920 - corelock_unlock(&thread->waiter_cl); 1921 - restore_irq(oldlevel); 1922 - thread_wait(thread_id); 1923 - return; 1924 - } 1925 - 1926 - thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */ 1927 - 1928 - #ifdef HAVE_PRIORITY_SCHEDULING 1929 - check_for_obj_waiters("remove_thread", thread); 1930 - #endif 1931 - 1932 - if (thread->core != core) 1933 - { 1934 - /* Switch cores and safely extract the thread there */ 1935 - /* Slot HAS to be unlocked or a deadlock could occur which means other 1936 - * threads have to be guided into becoming thread waiters if they 1937 - * attempt to remove it. */ 1938 - unsigned int new_core = thread->core; 1939 - 1940 - corelock_unlock(&thread->waiter_cl); 1941 - 1942 - UNLOCK_THREAD(thread); 1943 - restore_irq(oldlevel); 1944 - 1945 - old_core = switch_core(new_core); 1946 - 1947 - oldlevel = disable_irq_save(); 1948 - 1949 - corelock_lock(&thread->waiter_cl); 1950 - LOCK_THREAD(thread); 1951 - 1952 - state = thread->state; 1953 - core = new_core; 1954 - /* Perform the extraction and switch ourselves back to the original 1955 - processor */ 1956 - } 1957 - #endif /* NUM_CORES > 1 */ 1958 - 1959 - if (thread->tmo.prev != NULL) 1960 - { 1961 - /* Clean thread off the timeout list if a timeout check hasn't 1962 - * run yet */ 1963 - remove_from_list_tmo(thread); 1964 - } 1965 - 1966 - #ifdef HAVE_SCHEDULER_BOOSTCTRL 1967 - /* Cancel CPU boost if any */ 1968 - boost_thread(thread, false); 1969 - #endif 1970 - 1971 - IF_COP( retry_state: ) 1972 - 1973 - switch (state) 1974 - { 1975 - case STATE_RUNNING: 1976 - RTR_LOCK(core); 1977 - /* Remove thread from ready to run tasks */ 1978 - remove_from_list_l(&cores[core].running, thread); 1979 - rtr_subtract_entry(core, thread->priority); 1980 - RTR_UNLOCK(core); 1981 - break; 1982 - case STATE_BLOCKED: 1983 - case STATE_BLOCKED_W_TMO: 1984 - /* Remove thread from the queue it's blocked on - including its 1985 - * own if waiting there */ 1986 - #if NUM_CORES > 1 1987 - if (&thread->waiter_cl != thread->obj_cl) 1988 - { 1989 - ocl = thread->obj_cl; 1990 - 1991 - if (UNLIKELY(corelock_try_lock(ocl) == 0)) 1992 - { 1993 - UNLOCK_THREAD(thread); 1994 - corelock_lock(ocl); 1995 - LOCK_THREAD(thread); 1996 - 1997 - if (UNLIKELY(thread->state != state)) 1998 - { 1999 - /* Something woke the thread */ 2000 - state = thread->state; 2001 - corelock_unlock(ocl); 2002 - goto retry_state; 2003 - } 2004 - } 2005 - } 2006 - #endif 2007 - #ifdef HAVE_WAKEUP_EXT_CB 2008 - if (thread->wakeup_ext_cb != NULL) 2009 - thread->wakeup_ext_cb(thread); 2010 - #endif 2011 - 2012 - #ifdef HAVE_PRIORITY_SCHEDULING 2013 - /* Remove thread's priority influence from its chain if needed */ 2014 - if (thread->blocker != NULL) 2015 - wakeup_priority_protocol_release(thread); 2016 - else 2017 - #endif 2018 - remove_from_list_l(thread->bqp, thread); 2019 - 2020 - #if NUM_CORES > 1 2021 - if (ocl != NULL) 2022 - corelock_unlock(ocl); 2023 - #endif 2024 - break; 2025 - /* Otherwise thread is frozen and hasn't run yet */ 2026 - } 2027 - 2028 - new_thread_id(thread_id, thread); 2029 - thread->state = STATE_KILLED; 2030 - 2031 - /* If thread was waiting on itself, it will have been removed above. 2032 - * The wrong order would result in waking the thread first and deadlocking 2033 - * since the slot is already locked. */ 2034 - thread_queue_wake(&thread->queue); 2035 - 2036 - thread->name = NULL; 2037 - 2038 - thread_killed: /* Thread was already killed */ 2039 - /* Removal complete - safe to unlock and reenable interrupts */ 2040 - corelock_unlock(&thread->waiter_cl); 2041 - UNLOCK_THREAD(thread); 2042 - restore_irq(oldlevel); 2043 - 2044 - #if NUM_CORES > 1 2045 - if (old_core < NUM_CORES) 2046 - { 2047 - /* Did a removal on another processor's thread - switch back to 2048 - native core */ 2049 - switch_core(old_core); 2050 - } 2051 - #endif 2052 - } 2053 - #endif /* ALLOW_REMOVE_THREAD */ 2054 - 2055 1794 #ifdef HAVE_PRIORITY_SCHEDULING 2056 1795 /*--------------------------------------------------------------------------- 2057 1796 * Sets the thread's relative base priority for the core it runs on. Any ··· 2205 1944 return core; 2206 1945 } 2207 1946 2208 - int oldlevel = disable_irq_save(); 1947 + disable_irq(); 2209 1948 LOCK_THREAD(current); 2210 1949 2211 - if (current->name == THREAD_DESTRUCT) 2212 - { 2213 - /* Thread being killed - deactivate and let process complete */ 2214 - unsigned int id = current->id; 2215 - UNLOCK_THREAD(current); 2216 - restore_irq(oldlevel); 2217 - thread_wait(id); 2218 - /* Should never be reached */ 2219 - THREAD_PANICF("switch_core->D:*R", current); 2220 - } 2221 - 2222 1950 /* Get us off the running list for the current core */ 2223 1951 RTR_LOCK(core); 2224 1952 remove_from_list_l(&cores[core].running, current); ··· 2274 2002 * are safe to perform. 2275 2003 *--------------------------------------------------------------------------- 2276 2004 */ 2277 - void init_threads(void) 2005 + void INIT_ATTR init_threads(void) 2278 2006 { 2279 2007 const unsigned int core = CURRENT_CORE; 2280 2008 struct thread_entry *thread; ··· 2351 2079 #ifdef INIT_MAIN_THREAD 2352 2080 init_main_thread(&thread->context); 2353 2081 #endif 2354 - } 2355 - 2356 - /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */ 2357 - #if NUM_CORES == 1 2358 - static inline int stack_usage(uintptr_t *stackptr, size_t stack_size) 2359 - #else 2360 - static int stack_usage(uintptr_t *stackptr, size_t stack_size) 2361 - #endif 2362 - { 2363 - unsigned int stack_words = stack_size / sizeof (uintptr_t); 2364 - unsigned int i; 2365 - int usage = 0; 2366 - 2367 - for (i = 0; i < stack_words; i++) 2368 - { 2369 - if (stackptr[i] != DEADBEEF) 2370 - { 2371 - usage = ((stack_words - i) * 100) / stack_words; 2372 - break; 2373 - } 2374 - } 2375 - 2376 - return usage; 2377 - } 2378 - 2379 - /*--------------------------------------------------------------------------- 2380 - * Returns the maximum percentage of stack a thread ever used while running. 2381 - * NOTE: Some large buffer allocations that don't use enough the buffer to 2382 - * overwrite stackptr[0] will not be seen. 2383 - *--------------------------------------------------------------------------- 2384 - */ 2385 - int thread_stack_usage(const struct thread_entry *thread) 2386 - { 2387 - if (LIKELY(thread->stack_size > 0)) 2388 - return stack_usage(thread->stack, thread->stack_size); 2389 - return 0; 2390 - } 2391 - 2392 - #if NUM_CORES > 1 2393 - /*--------------------------------------------------------------------------- 2394 - * Returns the maximum percentage of the core's idle stack ever used during 2395 - * runtime. 2396 - *--------------------------------------------------------------------------- 2397 - */ 2398 - int idle_stack_usage(unsigned int core) 2399 - { 2400 - return stack_usage(idle_stacks[core], IDLE_STACK_SIZE); 2401 - } 2402 - #endif 2403 - 2404 - /*--------------------------------------------------------------------------- 2405 - * Fills in the buffer with the specified thread's name. If the name is NULL, 2406 - * empty, or the thread is in destruct state a formatted ID is written 2407 - * instead. 2408 - *--------------------------------------------------------------------------- 2409 - */ 2410 - void thread_get_name(char *buffer, int size, 2411 - struct thread_entry *thread) 2412 - { 2413 - if (size <= 0) 2414 - return; 2415 - 2416 - *buffer = '\0'; 2417 - 2418 - if (thread) 2419 - { 2420 - /* Display thread name if one or ID if none */ 2421 - const char *name = thread->name; 2422 - const char *fmt = "%s"; 2423 - if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0') 2424 - { 2425 - name = (const char *)(uintptr_t)thread->id; 2426 - fmt = "%04lX"; 2427 - } 2428 - snprintf(buffer, size, fmt, name); 2429 - } 2430 2082 } 2431 2083 2432 2084 /* Unless otherwise defined, do nothing */
+1 -1
firmware/libc/errno.c
··· 1 - #include "thread.h" 1 + #include "../thread-internal.h" 2 2 int * __errno(void) 3 3 { 4 4 return &thread_self_entry()->__errno;
+3 -7
firmware/target/arm/pp/thread-pp.c
··· 45 45 extern uintptr_t cpu_idlestackend[]; 46 46 extern uintptr_t cop_idlestackbegin[]; 47 47 extern uintptr_t cop_idlestackend[]; 48 - static uintptr_t * const idle_stacks[NUM_CORES] = 48 + uintptr_t * const idle_stacks[NUM_CORES] = 49 49 { 50 50 [CPU] = cpu_idlestackbegin, 51 51 [COP] = cop_idlestackbegin ··· 92 92 { 93 93 asm volatile ( 94 94 "cmp %1, #0 \n" /* CPU? */ 95 - "ldrne r0, =commit_dcache \n" /* No? write back data */ 96 - "movne lr, pc \n" 97 - "bxne r0 \n" 95 + "blne commit_dcache \n" 98 96 "mov r0, %0 \n" /* copy thread parameter */ 99 97 "mov sp, %2 \n" /* switch to idle stack */ 100 98 "bl thread_final_exit_do \n" /* finish removal */ ··· 163 161 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ 164 162 "mov r1, #0 \n" /* Clear start address */ 165 163 "str r1, [r0, #40] \n" 166 - "ldr r0, =commit_discard_idcache \n" /* Invalidate new core's cache */ 167 - "mov lr, pc \n" 168 - "bx r0 \n" 164 + "bl commit_discard_idcache \n" /* Invalidate new core's cache */ 169 165 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ 170 166 : : "i"(IDLE_STACK_WORDS) 171 167 );
+8 -72
firmware/target/hosted/sdl/thread-sdl.c
··· 28 28 #include <setjmp.h> 29 29 #include "system-sdl.h" 30 30 #include "thread-sdl.h" 31 - #include "system.h" 32 - #include "kernel.h" 33 - #include "thread.h" 34 - #include "debug.h" 31 + #include "../kernel-internal.h" 35 32 #include "core_alloc.h" 36 33 37 34 /* Define this as 1 to show informational messages that are not errors. */ ··· 165 162 /* Initialize SDL threading */ 166 163 void init_threads(void) 167 164 { 165 + static uintptr_t main_stack[] = { DEADBEEF, 0 }; 168 166 struct thread_entry *thread; 169 167 int n; 170 168 ··· 187 185 then create the SDL thread - it is possible to have a quick, early 188 186 shutdown try to access the structure. */ 189 187 thread = &threads[0]; 190 - thread->stack = (uintptr_t *)" "; 191 - thread->stack_size = 8; 188 + thread->stack = main_stack; 189 + thread->stack_size = sizeof (main_stack); 192 190 thread->name = "main"; 193 191 thread->state = STATE_RUNNING; 194 192 thread->context.s = SDL_CreateSemaphore(0); ··· 439 437 return THREAD_NONE; 440 438 } 441 439 442 - unsigned int thread_queue_wake(struct thread_entry **list) 443 - { 444 - unsigned int result = THREAD_NONE; 445 - 446 - for (;;) 447 - { 448 - unsigned int rc = wakeup_thread_(list); 449 - 450 - if (rc == THREAD_NONE) 451 - break; 452 - 453 - result |= rc; 454 - } 455 - 456 - return result; 457 - } 458 - 459 440 void thread_thaw(unsigned int thread_id) 460 441 { 461 442 struct thread_entry *thread = thread_id_entry(thread_id); ··· 542 523 return 0; 543 524 } 544 525 526 + unsigned int stack_words = stack_size / sizeof (uintptr_t); 527 + for (unsigned int i = stack_words; i-- > 0;) 528 + ((uintptr_t *)stack)[i] = DEADBEEF; 529 + 545 530 thread->stack = stack; 546 531 thread->stack_size = stack_size; 547 532 thread->name = name; ··· 557 542 return thread->id; 558 543 } 559 544 560 - #ifndef ALLOW_REMOVE_THREAD 561 545 static void remove_thread(unsigned int thread_id) 562 - #else 563 - void remove_thread(unsigned int thread_id) 564 - #endif 565 546 { 566 547 struct thread_entry *current = cores[CURRENT_CORE].running; 567 548 struct thread_entry *thread = thread_id_entry(thread_id); ··· 657 638 } 658 639 } 659 640 660 - int thread_stack_usage(const struct thread_entry *thread) 661 - { 662 - return 50; 663 - (void)thread; 664 - } 665 - 666 - /* Return name if one or ID if none */ 667 - void thread_get_name(char *buffer, int size, 668 - struct thread_entry *thread) 669 - { 670 - if (size <= 0) 671 - return; 672 - 673 - *buffer = '\0'; 674 - 675 - if (thread) 676 - { 677 - /* Display thread name if one or ID if none */ 678 - bool named = thread->name && *thread->name; 679 - const char *fmt = named ? "%s" : "%04lX"; 680 - intptr_t name = named ? 681 - (intptr_t)thread->name : (intptr_t)thread->id; 682 - snprintf(buffer, size, fmt, name); 683 - } 684 - } 685 - 686 - /* Unless otherwise defined, do nothing */ 687 - #ifndef YIELD_KERNEL_HOOK 688 - #define YIELD_KERNEL_HOOK() false 689 - #endif 690 - #ifndef SLEEP_KERNEL_HOOK 691 - #define SLEEP_KERNEL_HOOK(ticks) false 692 - #endif 693 - 694 - 695 641 /*--------------------------------------------------------------------------- 696 642 * Suspends a thread's execution for at least the specified number of ticks. 697 643 * ··· 707 653 */ 708 654 unsigned sleep(unsigned ticks) 709 655 { 710 - /* In certain situations, certain bootloaders in particular, a normal 711 - * threading call is inappropriate. */ 712 - if (SLEEP_KERNEL_HOOK(ticks)) 713 - return 0; /* Handled */ 714 - 715 656 disable_irq(); 716 657 sleep_thread(ticks); 717 658 switch_thread(); ··· 725 666 */ 726 667 void yield(void) 727 668 { 728 - /* In certain situations, certain bootloaders in particular, a normal 729 - * threading call is inappropriate. */ 730 - if (YIELD_KERNEL_HOOK()) 731 - return; /* handled */ 732 - 733 669 switch_thread(); 734 670 }