A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita audio rust zig deno mpris rockbox mpd

arm: add initial ARM Cortex-M support

M-profile cores manage interrupts differently from classic cores
and lack the FIQ. Split the interrupt management parts out into
separate headers but keep the endian swapping routines (which are
not profile-dependent) in the common system-arm header.

The initial part of the vector table is common to all Cortex-M
CPUs and is intended to be included by the target linker script,
with the vendor-specific part of the vector table appended to it.

Change-Id: Ib2ad5b9dc41db27940e39033cfef4308923db66d

+520 -256
+1 -1
apps/main.c
··· 500 500 power_init(); 501 501 502 502 enable_irq(); 503 - #ifdef CPU_ARM 503 + #if defined(CPU_ARM_CLASSIC) 504 504 enable_fiq(); 505 505 #endif 506 506 /* current_tick should be ticking by now */
+8 -2
firmware/SOURCES
··· 609 609 target/arm/bits-armv6.S 610 610 target/arm/mmu-armv6.S 611 611 # endif 612 - target/arm/system-arm.c 612 + 613 + #if defined(CPU_ARM_CLASSIC) 614 + target/arm/system-arm-classic.c 615 + #elif defined(CPU_ARM_MICRO) 616 + target/arm/system-arm-micro.c 617 + target/arm/vectors-arm-micro.S 618 + #endif 613 619 614 620 #if CONFIG_STORAGE & STORAGE_ATA 615 621 # ifdef CPU_PP502x ··· 828 834 target/arm/imx233/crt0.S 829 835 #elif CONFIG_CPU==RK27XX 830 836 target/arm/rk27xx/crt0.S 831 - #elif defined(CPU_ARM) 837 + #elif defined(CPU_ARM_CLASSIC) 832 838 target/arm/crt0.S 833 839 #endif /* defined(CPU_*) */ 834 840
+1 -1
firmware/panic.c
··· 86 86 87 87 #if (CONFIG_PLATFORM & PLATFORM_NATIVE) 88 88 /* Disable interrupts */ 89 - #ifdef CPU_ARM 89 + #if defined(CPU_ARM_CLASSIC) 90 90 disable_interrupt(IRQ_FIQ_STATUS); 91 91 #else 92 92 set_irq_level(DISABLE_INTERRUPTS);
+1 -1
firmware/rolo.c
··· 332 332 #endif 333 333 334 334 #if CONFIG_CPU != IMX31L /* We're not finished yet */ 335 - #ifdef CPU_ARM 335 + #if defined(CPU_ARM_CLASSIC) 336 336 /* Should do these together since some ARM version should never have 337 337 * FIQ disabled and not IRQ (imx31 errata). */ 338 338 disable_interrupt(IRQ_FIQ_STATUS);
+280
firmware/target/arm/system-arm-classic.h
··· 1 + /*************************************************************************** 2 + * __________ __ ___. 3 + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 + * \/ \/ \/ \/ \/ 8 + * $Id$ 9 + * 10 + * Copyright (C) 2002 by Alan Korr 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 2 15 + * of the License, or (at your option) any later version. 16 + * 17 + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 + * KIND, either express or implied. 19 + * 20 + ****************************************************************************/ 21 + #ifndef SYSTEM_ARM_CLASSIC_H 22 + #define SYSTEM_ARM_CLASSIC_H 23 + 24 + #define IRQ_ENABLED 0x00 25 + #define IRQ_DISABLED 0x80 26 + #define IRQ_STATUS 0x80 27 + #define FIQ_ENABLED 0x00 28 + #define FIQ_DISABLED 0x40 29 + #define FIQ_STATUS 0x40 30 + #define IRQ_FIQ_ENABLED 0x00 31 + #define IRQ_FIQ_DISABLED 0xc0 32 + #define IRQ_FIQ_STATUS 0xc0 33 + #define HIGHEST_IRQ_LEVEL IRQ_DISABLED 34 + 35 + #define set_irq_level(status) \ 36 + set_interrupt_status((status), IRQ_STATUS) 37 + #define set_fiq_status(status) \ 38 + set_interrupt_status((status), FIQ_STATUS) 39 + 40 + #define disable_irq_save() \ 41 + disable_interrupt_save(IRQ_STATUS) 42 + #define disable_fiq_save() \ 43 + disable_interrupt_save(FIQ_STATUS) 44 + 45 + #define restore_irq(cpsr) \ 46 + restore_interrupt(cpsr) 47 + #define restore_fiq(cpsr) \ 48 + restore_interrupt(cpsr) 49 + 50 + #define disable_irq() \ 51 + disable_interrupt(IRQ_STATUS) 52 + #define enable_irq() \ 53 + enable_interrupt(IRQ_STATUS) 54 + #define disable_fiq() \ 55 + disable_interrupt(FIQ_STATUS) 56 + #define enable_fiq() \ 57 + enable_interrupt(FIQ_STATUS) 58 + 59 + #define irq_enabled() \ 60 + interrupt_enabled(IRQ_STATUS) 61 + #define fiq_enabled() \ 62 + interrupt_enabled(FIQ_STATUS) 63 + #define ints_enabled() \ 64 + interrupt_enabled(IRQ_FIQ_STATUS) 65 + 66 + #define irq_enabled_checkval(val) \ 67 + (((val) & IRQ_STATUS) == 0) 68 + #define fiq_enabled_checkval(val) \ 69 + (((val) & FIQ_STATUS) == 0) 70 + #define ints_enabled_checkval(val) \ 71 + (((val) & IRQ_FIQ_STATUS) == 0) 72 + 73 + #define CPU_MODE_USER 0x10 74 + #define CPU_MODE_FIQ 0x11 75 + #define CPU_MODE_IRQ 0x12 76 + #define CPU_MODE_SVC 0x13 77 + #define CPU_MODE_ABT 0x17 78 + #define CPU_MODE_UNDEF 0x1b 79 + #define CPU_MODE_SYS 0x1f 80 + 81 + /* We run in SYS mode */ 82 + #define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS 83 + 84 + #define is_thread_context() \ 85 + (get_processor_mode() == CPU_MODE_THREAD_CONTEXT) 86 + 87 + /* Assert that the processor is in the desired execution mode 88 + * mode: Processor mode value to test for 89 + * rstatus...: Provide if you already have the value saved, otherwise leave 90 + * blank to get it automatically. 91 + */ 92 + #define ASSERT_CPU_MODE(mode, rstatus...) \ 93 + ({ unsigned long __massert = (mode); \ 94 + unsigned long __mproc = *#rstatus ? \ 95 + ((rstatus +0) & 0x1f) : get_processor_mode(); \ 96 + if (__mproc != __massert) \ 97 + panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \ 98 + __func__, __mproc, __massert); }) 99 + 100 + /* Core-level interrupt masking */ 101 + 102 + static inline int set_interrupt_status(int status, int mask) 103 + { 104 + unsigned long cpsr; 105 + int oldstatus; 106 + /* Read the old levels and set the new ones */ 107 + #if (defined(CREATIVE_ZVM) || defined(CREATIVE_ZV)) && defined(BOOTLOADER) 108 + // FIXME: This workaround is for a problem with inlining; 109 + // for some reason 'mask' gets treated as a variable/non-immediate constant 110 + // but only on this build. All others (including the nearly-identical mrobe500boot) are fine 111 + asm volatile ( 112 + "mrs %1, cpsr \n" 113 + "bic %0, %1, %[mask] \n" 114 + "orr %0, %0, %2 \n" 115 + "msr cpsr_c, %0 \n" 116 + : "=&r,r"(cpsr), "=&r,r"(oldstatus) 117 + : "r,i"(status & mask), [mask]"r,i"(mask)); 118 + #else 119 + asm volatile ( 120 + "mrs %1, cpsr \n" 121 + "bic %0, %1, %[mask] \n" 122 + "orr %0, %0, %2 \n" 123 + "msr cpsr_c, %0 \n" 124 + : "=&r,r"(cpsr), "=&r,r"(oldstatus) 125 + : "r,i"(status & mask), [mask]"i,i"(mask)); 126 + #endif 127 + return oldstatus; 128 + } 129 + 130 + static inline void restore_interrupt(int cpsr) 131 + { 132 + /* Set cpsr_c from value returned by disable_interrupt_save 133 + * or set_interrupt_status */ 134 + asm volatile ("msr cpsr_c, %0" : : "r"(cpsr)); 135 + } 136 + 137 + static inline bool interrupt_enabled(int status) 138 + { 139 + unsigned long cpsr; 140 + asm ("mrs %0, cpsr" : "=r"(cpsr)); 141 + return (cpsr & status) == 0; 142 + } 143 + 144 + static inline unsigned long get_processor_mode(void) 145 + { 146 + unsigned long cpsr; 147 + asm ("mrs %0, cpsr" : "=r"(cpsr)); 148 + return cpsr & 0x1f; 149 + } 150 + 151 + /* ARM_ARCH version section for architecture*/ 152 + 153 + #if ARM_ARCH >= 6 154 + static inline void enable_interrupt(int mask) 155 + { 156 + /* Clear I and/or F disable bit */ 157 + /* mask is expected to be constant and so only relevent branch 158 + * is preserved */ 159 + switch (mask & IRQ_FIQ_STATUS) 160 + { 161 + case IRQ_STATUS: 162 + asm volatile ("cpsie i"); 163 + break; 164 + case FIQ_STATUS: 165 + asm volatile ("cpsie f"); 166 + break; 167 + case IRQ_FIQ_STATUS: 168 + asm volatile ("cpsie if"); 169 + break; 170 + } 171 + } 172 + 173 + static inline void disable_interrupt(int mask) 174 + { 175 + /* Set I and/or F disable bit */ 176 + /* mask is expected to be constant and so only relevent branch 177 + * is preserved */ 178 + switch (mask & IRQ_FIQ_STATUS) 179 + { 180 + case IRQ_STATUS: 181 + asm volatile ("cpsid i"); 182 + break; 183 + case FIQ_STATUS: 184 + asm volatile ("cpsid f"); 185 + break; 186 + case IRQ_FIQ_STATUS: 187 + asm volatile ("cpsid if"); 188 + break; 189 + } 190 + } 191 + 192 + static inline int disable_interrupt_save(int mask) 193 + { 194 + /* Set I and/or F disable bit and return old cpsr value */ 195 + int cpsr; 196 + /* mask is expected to be constant and so only relevent branch 197 + * is preserved */ 198 + asm volatile("mrs %0, cpsr" : "=r"(cpsr)); 199 + switch (mask & IRQ_FIQ_STATUS) 200 + { 201 + case IRQ_STATUS: 202 + asm volatile ("cpsid i"); 203 + break; 204 + case FIQ_STATUS: 205 + asm volatile ("cpsid f"); 206 + break; 207 + case IRQ_FIQ_STATUS: 208 + asm volatile ("cpsid if"); 209 + break; 210 + } 211 + return cpsr; 212 + } 213 + 214 + #else /* ARM_ARCH < 6 */ 215 + 216 + static inline void enable_interrupt(int mask) 217 + { 218 + /* Clear I and/or F disable bit */ 219 + int tmp; 220 + asm volatile ( 221 + "mrs %0, cpsr \n" 222 + "bic %0, %0, %1 \n" 223 + "msr cpsr_c, %0 \n" 224 + : "=&r"(tmp) : "i"(mask)); 225 + } 226 + 227 + static inline void disable_interrupt(int mask) 228 + { 229 + /* Set I and/or F disable bit */ 230 + int tmp; 231 + asm volatile ( 232 + "mrs %0, cpsr \n" 233 + "orr %0, %0, %1 \n" 234 + "msr cpsr_c, %0 \n" 235 + : "=&r"(tmp) : "i"(mask)); 236 + } 237 + 238 + static inline int disable_interrupt_save(int mask) 239 + { 240 + /* Set I and/or F disable bit and return old cpsr value */ 241 + int cpsr, tmp; 242 + asm volatile ( 243 + "mrs %1, cpsr \n" 244 + "orr %0, %1, %2 \n" 245 + "msr cpsr_c, %0 \n" 246 + : "=&r"(tmp), "=&r"(cpsr) 247 + : "i"(mask)); 248 + return cpsr; 249 + } 250 + 251 + #endif /* ARM_ARCH */ 252 + 253 + #if defined(CPU_TCC780X) /* Single core only for now */ \ 254 + || CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \ 255 + || CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \ 256 + || CONFIG_CPU == S5L8702 || CONFIG_CPU == S5L8720 257 + /* Use the generic ARMv4/v5/v6 wait for IRQ */ 258 + static inline void core_sleep(void) 259 + { 260 + asm volatile ( 261 + "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */ 262 + #if CONFIG_CPU == IMX31L 263 + "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */ 264 + #endif 265 + : : "r"(0) 266 + ); 267 + enable_irq(); 268 + } 269 + #else 270 + /* Skip this if special code is required and implemented */ 271 + #if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233 272 + static inline void core_sleep(void) 273 + { 274 + /* TODO: core_sleep not implemented, battery life will be decreased */ 275 + enable_irq(); 276 + } 277 + #endif /* CPU_PP */ 278 + #endif 279 + 280 + #endif /* SYSTEM_ARM_CLASSIC_H */
+58
firmware/target/arm/system-arm-micro.c
··· 1 + /*************************************************************************** 2 + * __________ __ ___. 3 + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 + * \/ \/ \/ \/ \/ 8 + * $Id$ 9 + * 10 + * Copyright (C) 2025 by Aidan MacDonald 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 2 15 + * of the License, or (at your option) any later version. 16 + * 17 + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 + * KIND, either express or implied. 19 + * 20 + ****************************************************************************/ 21 + #include "config.h" 22 + #include "system.h" 23 + 24 + struct armv7m_exception_frame 25 + { 26 + unsigned long r0; 27 + unsigned long r1; 28 + unsigned long r2; 29 + unsigned long r3; 30 + unsigned long r12; 31 + unsigned long lr; 32 + unsigned long pc; 33 + unsigned long xpsr; 34 + }; 35 + 36 + void UIE(void) 37 + { 38 + while (1); 39 + } 40 + 41 + #define ATTR_IRQ_HANDLER __attribute__((weak, alias("UIE"))) 42 + 43 + void nmi_handler(void) ATTR_IRQ_HANDLER; 44 + void hardfault_handler(void) ATTR_IRQ_HANDLER; 45 + void pendsv_handler(void) ATTR_IRQ_HANDLER; 46 + void svcall_handler(void) ATTR_IRQ_HANDLER; 47 + void systick_handler(void) ATTR_IRQ_HANDLER; 48 + 49 + #if ARCH_VERSION >= 7 50 + void memmanage_handler(void) ATTR_IRQ_HANDLER; 51 + void busfault_handler(void) ATTR_IRQ_HANDLER; 52 + void usagefault_handler(void) ATTR_IRQ_HANDLER; 53 + void debugmonitor_handler(void) ATTR_IRQ_HANDLER; 54 + #endif 55 + 56 + #if ARCH_VERSION >= 8 57 + void securefault_handler(void) ATTR_IRQ_HANDLER; 58 + #endif
+99
firmware/target/arm/system-arm-micro.h
··· 1 + /*************************************************************************** 2 + * __________ __ ___. 3 + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 + * \/ \/ \/ \/ \/ 8 + * $Id$ 9 + * 10 + * Copyright (C) 2025 by Aidan MacDonald 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 2 15 + * of the License, or (at your option) any later version. 16 + * 17 + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 + * KIND, either express or implied. 19 + * 20 + ****************************************************************************/ 21 + #ifndef SYSTEM_ARM_MICRO_H 22 + #define SYSTEM_ARM_MICRO_H 23 + 24 + #define IRQ_ENABLED 0x00 25 + #define IRQ_DISABLED 0x01 26 + #define IRQ_STATUS 0x01 27 + #define HIGHEST_IRQ_LEVEL IRQ_DISABLED 28 + 29 + #define disable_irq_save() \ 30 + set_irq_level(IRQ_DISABLED) 31 + 32 + /* For compatibility with ARM classic */ 33 + #define CPU_MODE_THREAD_CONTEXT 0 34 + 35 + #define is_thread_context() \ 36 + (get_interrupt_number() == CPU_MODE_THREAD_CONTEXT) 37 + 38 + /* Assert that the processor is in the desired execution mode 39 + * mode: Processor mode value to test for 40 + * rstatus...: Ignored; for compatibility with ARM classic only. 41 + */ 42 + #define ASSERT_CPU_MODE(mode, rstatus...) \ 43 + ({ unsigned long __massert = (mode); \ 44 + unsigned long __mproc = get_interrupt_number(); \ 45 + if (__mproc != __massert) \ 46 + panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \ 47 + __func__, __mproc, __massert); }) 48 + 49 + /* Core-level interrupt masking */ 50 + 51 + static inline int set_irq_level(int primask) 52 + { 53 + int oldvalue; 54 + 55 + asm volatile ("mrs %0, primask\n" 56 + "msr primask, %1\n" 57 + : "=r"(oldvalue) : "r"(primask)); 58 + 59 + return oldvalue; 60 + } 61 + 62 + static inline void restore_irq(int primask) 63 + { 64 + asm volatile ("msr primask, %0" :: "r"(primask)); 65 + } 66 + 67 + static inline void enable_irq(void) 68 + { 69 + asm volatile ("cpsie i"); 70 + } 71 + 72 + static inline void disable_irq(void) 73 + { 74 + asm volatile ("cpsid i"); 75 + } 76 + 77 + static inline bool irq_enabled(void) 78 + { 79 + int primask; 80 + asm volatile ("mrs %0, primask" : "=r"(primask)); 81 + 82 + return !(primask & 1); 83 + } 84 + 85 + static inline unsigned long get_interrupt_number(void) 86 + { 87 + unsigned long ipsr; 88 + asm volatile ("mrs %0, ipsr" : "=r"(ipsr)); 89 + 90 + return ipsr & 0x1ff; 91 + } 92 + 93 + static inline void core_sleep(void) 94 + { 95 + asm volatile ("wfi"); 96 + enable_irq(); 97 + } 98 + 99 + #endif /* SYSTEM_ARM_MICRO_H */
firmware/target/arm/system-arm.c firmware/target/arm/system-arm-classic.c
+8 -251
firmware/target/arm/system-arm.h
··· 21 21 #ifndef SYSTEM_ARM_H 22 22 #define SYSTEM_ARM_H 23 23 24 + #if ARM_PROFILE == ARM_PROFILE_CLASSIC 25 + # include "system-arm-classic.h" 26 + #elif ARM_PROFILE == ARM_PROFILE_MICRO 27 + # include "system-arm-micro.h" 28 + #else 29 + # error "Unknown ARM architecture profile!" 30 + #endif 31 + 24 32 /* Common to all ARM_ARCH */ 25 33 #define nop \ 26 34 asm volatile ("nop") ··· 29 37 void __div0(void); 30 38 #endif 31 39 32 - #define IRQ_ENABLED 0x00 33 - #define IRQ_DISABLED 0x80 34 - #define IRQ_STATUS 0x80 35 - #define FIQ_ENABLED 0x00 36 - #define FIQ_DISABLED 0x40 37 - #define FIQ_STATUS 0x40 38 - #define IRQ_FIQ_ENABLED 0x00 39 - #define IRQ_FIQ_DISABLED 0xc0 40 - #define IRQ_FIQ_STATUS 0xc0 41 - #define HIGHEST_IRQ_LEVEL IRQ_DISABLED 42 - 43 - #define set_irq_level(status) \ 44 - set_interrupt_status((status), IRQ_STATUS) 45 - #define set_fiq_status(status) \ 46 - set_interrupt_status((status), FIQ_STATUS) 47 - 48 - #define disable_irq_save() \ 49 - disable_interrupt_save(IRQ_STATUS) 50 - #define disable_fiq_save() \ 51 - disable_interrupt_save(FIQ_STATUS) 52 - 53 - #define restore_irq(cpsr) \ 54 - restore_interrupt(cpsr) 55 - #define restore_fiq(cpsr) \ 56 - restore_interrupt(cpsr) 57 - 58 - #define disable_irq() \ 59 - disable_interrupt(IRQ_STATUS) 60 - #define enable_irq() \ 61 - enable_interrupt(IRQ_STATUS) 62 - #define disable_fiq() \ 63 - disable_interrupt(FIQ_STATUS) 64 - #define enable_fiq() \ 65 - enable_interrupt(FIQ_STATUS) 66 - 67 - #define irq_enabled() \ 68 - interrupt_enabled(IRQ_STATUS) 69 - #define fiq_enabled() \ 70 - interrupt_enabled(FIQ_STATUS) 71 - #define ints_enabled() \ 72 - interrupt_enabled(IRQ_FIQ_STATUS) 73 - 74 - #define irq_enabled_checkval(val) \ 75 - (((val) & IRQ_STATUS) == 0) 76 - #define fiq_enabled_checkval(val) \ 77 - (((val) & FIQ_STATUS) == 0) 78 - #define ints_enabled_checkval(val) \ 79 - (((val) & IRQ_FIQ_STATUS) == 0) 80 - 81 - #define CPU_MODE_USER 0x10 82 - #define CPU_MODE_FIQ 0x11 83 - #define CPU_MODE_IRQ 0x12 84 - #define CPU_MODE_SVC 0x13 85 - #define CPU_MODE_ABT 0x17 86 - #define CPU_MODE_UNDEF 0x1b 87 - #define CPU_MODE_SYS 0x1f 88 - 89 - /* We run in SYS mode */ 90 - #define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS 91 - 92 - #define is_thread_context() \ 93 - (get_processor_mode() == CPU_MODE_THREAD_CONTEXT) 94 - 95 - /* Assert that the processor is in the desired execution mode 96 - * mode: Processor mode value to test for 97 - * rstatus...: Provide if you already have the value saved, otherwise leave 98 - * blank to get it automatically. 99 - */ 100 - #define ASSERT_CPU_MODE(mode, rstatus...) \ 101 - ({ unsigned long __massert = (mode); \ 102 - unsigned long __mproc = *#rstatus ? \ 103 - ((rstatus +0) & 0x1f) : get_processor_mode(); \ 104 - if (__mproc != __massert) \ 105 - panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \ 106 - __func__, __mproc, __massert); }) 107 - 108 - /* Core-level interrupt masking */ 109 - 110 - static inline int set_interrupt_status(int status, int mask) 111 - { 112 - unsigned long cpsr; 113 - int oldstatus; 114 - /* Read the old levels and set the new ones */ 115 - #if (defined(CREATIVE_ZVM) ||defined(CREATIVE_ZV)) && defined(BOOTLOADER) 116 - // FIXME: This workaround is for a problem with inlining; 117 - // for some reason 'mask' gets treated as a variable/non-immediate constant 118 - // but only on this build. All others (including the nearly-identical mrobe500boot) are fine 119 - asm volatile ( 120 - "mrs %1, cpsr \n" 121 - "bic %0, %1, %[mask] \n" 122 - "orr %0, %0, %2 \n" 123 - "msr cpsr_c, %0 \n" 124 - : "=&r,r"(cpsr), "=&r,r"(oldstatus) 125 - : "r,i"(status & mask), [mask]"r,i"(mask)); 126 - #else 127 - asm volatile ( 128 - "mrs %1, cpsr \n" 129 - "bic %0, %1, %[mask] \n" 130 - "orr %0, %0, %2 \n" 131 - "msr cpsr_c, %0 \n" 132 - : "=&r,r"(cpsr), "=&r,r"(oldstatus) 133 - : "r,i"(status & mask), [mask]"i,i"(mask)); 134 - #endif 135 - return oldstatus; 136 - } 137 - 138 - static inline void restore_interrupt(int cpsr) 139 - { 140 - /* Set cpsr_c from value returned by disable_interrupt_save 141 - * or set_interrupt_status */ 142 - asm volatile ("msr cpsr_c, %0" : : "r"(cpsr)); 143 - } 144 - 145 - static inline bool interrupt_enabled(int status) 146 - { 147 - unsigned long cpsr; 148 - asm ("mrs %0, cpsr" : "=r"(cpsr)); 149 - return (cpsr & status) == 0; 150 - } 151 - 152 - static inline unsigned long get_processor_mode(void) 153 - { 154 - unsigned long cpsr; 155 - asm ("mrs %0, cpsr" : "=r"(cpsr)); 156 - return cpsr & 0x1f; 157 - } 158 - 159 - /* ARM_ARCH version section for architecture*/ 160 - 161 40 #if ARM_ARCH >= 6 162 41 static inline uint16_t swap16_hw(uint16_t value) 163 42 /* ··· 197 76 return retval; 198 77 } 199 78 200 - static inline void enable_interrupt(int mask) 201 - { 202 - /* Clear I and/or F disable bit */ 203 - /* mask is expected to be constant and so only relevent branch 204 - * is preserved */ 205 - switch (mask & IRQ_FIQ_STATUS) 206 - { 207 - case IRQ_STATUS: 208 - asm volatile ("cpsie i"); 209 - break; 210 - case FIQ_STATUS: 211 - asm volatile ("cpsie f"); 212 - break; 213 - case IRQ_FIQ_STATUS: 214 - asm volatile ("cpsie if"); 215 - break; 216 - } 217 - } 218 - 219 - static inline void disable_interrupt(int mask) 220 - { 221 - /* Set I and/or F disable bit */ 222 - /* mask is expected to be constant and so only relevent branch 223 - * is preserved */ 224 - switch (mask & IRQ_FIQ_STATUS) 225 - { 226 - case IRQ_STATUS: 227 - asm volatile ("cpsid i"); 228 - break; 229 - case FIQ_STATUS: 230 - asm volatile ("cpsid f"); 231 - break; 232 - case IRQ_FIQ_STATUS: 233 - asm volatile ("cpsid if"); 234 - break; 235 - } 236 - } 237 - 238 - static inline int disable_interrupt_save(int mask) 239 - { 240 - /* Set I and/or F disable bit and return old cpsr value */ 241 - int cpsr; 242 - /* mask is expected to be constant and so only relevent branch 243 - * is preserved */ 244 - asm volatile("mrs %0, cpsr" : "=r"(cpsr)); 245 - switch (mask & IRQ_FIQ_STATUS) 246 - { 247 - case IRQ_STATUS: 248 - asm volatile ("cpsid i"); 249 - break; 250 - case FIQ_STATUS: 251 - asm volatile ("cpsid f"); 252 - break; 253 - case IRQ_FIQ_STATUS: 254 - asm volatile ("cpsid if"); 255 - break; 256 - } 257 - return cpsr; 258 - } 259 - 260 79 #else /* ARM_ARCH < 6 */ 261 80 262 81 static inline uint16_t swap16_hw(uint16_t value) ··· 326 145 return value; 327 146 } 328 147 329 - static inline void enable_interrupt(int mask) 330 - { 331 - /* Clear I and/or F disable bit */ 332 - int tmp; 333 - asm volatile ( 334 - "mrs %0, cpsr \n" 335 - "bic %0, %0, %1 \n" 336 - "msr cpsr_c, %0 \n" 337 - : "=&r"(tmp) : "i"(mask)); 338 - } 339 - 340 - static inline void disable_interrupt(int mask) 341 - { 342 - /* Set I and/or F disable bit */ 343 - int tmp; 344 - asm volatile ( 345 - "mrs %0, cpsr \n" 346 - "orr %0, %0, %1 \n" 347 - "msr cpsr_c, %0 \n" 348 - : "=&r"(tmp) : "i"(mask)); 349 - } 350 - 351 - static inline int disable_interrupt_save(int mask) 352 - { 353 - /* Set I and/or F disable bit and return old cpsr value */ 354 - int cpsr, tmp; 355 - asm volatile ( 356 - "mrs %1, cpsr \n" 357 - "orr %0, %1, %2 \n" 358 - "msr cpsr_c, %0 \n" 359 - : "=&r"(tmp), "=&r"(cpsr) 360 - : "i"(mask)); 361 - return cpsr; 362 - } 363 - 364 148 #endif /* ARM_ARCH */ 365 149 366 150 static inline uint32_t swaw32_hw(uint32_t value) ··· 383 167 #endif 384 168 385 169 } 386 - 387 - #if defined(CPU_TCC780X) /* Single core only for now */ \ 388 - || CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \ 389 - || CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \ 390 - || CONFIG_CPU == S5L8702 || CONFIG_CPU == S5L8720 391 - /* Use the generic ARMv4/v5/v6 wait for IRQ */ 392 - static inline void core_sleep(void) 393 - { 394 - asm volatile ( 395 - "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */ 396 - #if CONFIG_CPU == IMX31L 397 - "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */ 398 - #endif 399 - : : "r"(0) 400 - ); 401 - enable_irq(); 402 - } 403 - #else 404 - /* Skip this if special code is required and implemented */ 405 - #if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233 406 - static inline void core_sleep(void) 407 - { 408 - /* TODO: core_sleep not implemented, battery life will be decreased */ 409 - enable_irq(); 410 - } 411 - #endif /* CPU_PP */ 412 - #endif 413 170 414 171 #endif /* SYSTEM_ARM_H */
+64
firmware/target/arm/vectors-arm-micro.S
··· 1 + /*************************************************************************** 2 + * __________ __ ___. 3 + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 + * \/ \/ \/ \/ \/ 8 + * $Id$ 9 + * 10 + * Copyright (C) 2025 by Aidan MacDonald 11 + * 12 + * This program is free software; you can redistribute it and/or 13 + * modify it under the terms of the GNU General Public License 14 + * as published by the Free Software Foundation; either version 2 15 + * of the License, or (at your option) any later version. 16 + * 17 + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 + * KIND, either express or implied. 19 + * 20 + ****************************************************************************/ 21 + 22 + #include "config.h" 23 + 24 + #if ARCH_VERSION < 7 25 + # define memmanage_handler UIE 26 + # define busfault_handler UIE 27 + # define usagefault_handler UIE 28 + #endif 29 + 30 + #if ARCH_VERSION < 8 31 + # define securefault_handler UIE 32 + #endif 33 + 34 + .syntax unified 35 + .text 36 + 37 + /* 38 + * Architecturally defined exception vectors for ARMv6/7/8-M. 39 + * 40 + * The target linker script is expected to place these in the 41 + * correct location and then append the implementation-specific 42 + * vector table entries, which would normally be defined at the 43 + * manufacturer level of the target tree. 44 + */ 45 + .section .vectors.arm,"ax",%progbits 46 + 47 + .global __vectors_arm 48 + __vectors_arm: 49 + .word irqstackend /* [ 0] Stack pointer */ 50 + .word reset_handler /* [ 1] Reset */ 51 + .word nmi_handler /* [ 2] Non-maskable interrupt */ 52 + .word hardfault_handler /* [ 3] */ 53 + .word memmanage_handler /* [ 4] (ARMv7-M and later only) */ 54 + .word busfault_handler /* [ 5] (ARMv7-M and later only) */ 55 + .word usagefault_handler /* [ 6] (ARMv7-M and later only) */ 56 + .word securefault_handler /* [ 7] (ARMv8-M only) */ 57 + .word UIE /* [ 8] Reserved */ 58 + .word UIE /* [ 9] Reserved */ 59 + .word UIE /* [10] Reserved */ 60 + .word svcall_handler /* [11] */ 61 + .word debugmonitor_handler /* [12] (ARMv7-M and later only) */ 62 + .word UIE /* [13] Reserved */ 63 + .word pendsv_handler /* [14] */ 64 + .word systick_handler /* [15] (Optional on ARMv6-M) */