qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170127' into staging

target-arm queue:
* various minor M profile bugfixes
* aspeed/smc: handle dummy bytes when doing fast reads in command mode
* pflash_cfi01: fix per-device sector length in CFI table
* arm: stellaris: make MII accesses complete immediately
* hw/char/exynos4210_uart: Drop unused local variable frame_size
* arm_gicv3: Fix broken logic in ELRSR calculation
* dma: omap: check dma channel data_type

# gpg: Signature made Fri 27 Jan 2017 15:29:39 GMT
# gpg: using RSA key 0x3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg: aka "Peter Maydell <pmaydell@gmail.com>"
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20170127: (22 commits)
dma: omap: check dma channel data_type
arm_gicv3: Fix broken logic in ELRSR calculation
hw/char/exynos4210_uart: Drop unused local variable frame_size
arm: stellaris: make MII accesses complete immediately
armv7m: R14 should reset to 0xffffffff
armv7m: FAULTMASK should be 0 on reset
armv7m: Honour CCR.USERSETMPEND
armv7m: Report no-coprocessor faults correctly
armv7m: set CFSR.UNDEFINSTR on undefined instructions
armv7m: honour CCR.STACKALIGN on exception entry
armv7m: implement CCR, CFSR, HFSR, DFSR, BFAR, and MMFAR
armv7m: add state for v7M CCR, CFSR, HFSR, DFSR, MMFAR, BFAR
armv7m_nvic: keep a pointer to the CPU
target/arm: Drop IS_M() macro
pflash_cfi01: fix per-device sector length in CFI table
armv7m: Clear FAULTMASK on return from non-NMI exceptions
armv7m: Fix reads of CONTROL register bit 1
hw/registerfields.h: Pull FIELD etc macros out of hw/register.h
armv7m: Explicit error for bad vector table
armv7m: Replace armv7m.hack with unassigned_access handler
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+386 -169
-8
hw/arm/armv7m.c
··· 180 180 uint64_t entry; 181 181 uint64_t lowaddr; 182 182 int big_endian; 183 - MemoryRegion *hack = g_new(MemoryRegion, 1); 184 183 185 184 if (cpu_model == NULL) { 186 185 cpu_model = "cortex-m3"; ··· 224 223 exit(1); 225 224 } 226 225 } 227 - 228 - /* Hack to map an additional page of ram at the top of the address 229 - space. This stops qemu complaining about executing code outside RAM 230 - when returning from an exception. */ 231 - memory_region_init_ram(hack, NULL, "armv7m.hack", 0x1000, &error_fatal); 232 - vmstate_register_ram_global(hack); 233 - memory_region_add_subregion(system_memory, 0xfffff000, hack); 234 226 235 227 qemu_register_reset(armv7m_reset, cpu); 236 228 return nvic;
+17 -5
hw/block/pflash_cfi01.c
··· 99 99 char *name; 100 100 void *storage; 101 101 VMChangeStateEntry *vmstate; 102 + bool old_multiple_chip_handling; 102 103 }; 103 104 104 105 static int pflash_post_load(void *opaque, int version_id); ··· 703 704 pflash_t *pfl = CFI_PFLASH01(dev); 704 705 uint64_t total_len; 705 706 int ret; 706 - uint64_t blocks_per_device, device_len; 707 + uint64_t blocks_per_device, sector_len_per_device, device_len; 707 708 int num_devices; 708 709 Error *local_err = NULL; 709 710 ··· 726 727 * in the cfi_table[]. 727 728 */ 728 729 num_devices = pfl->device_width ? (pfl->bank_width / pfl->device_width) : 1; 729 - blocks_per_device = pfl->nb_blocs / num_devices; 730 - device_len = pfl->sector_len * blocks_per_device; 730 + if (pfl->old_multiple_chip_handling) { 731 + blocks_per_device = pfl->nb_blocs / num_devices; 732 + sector_len_per_device = pfl->sector_len; 733 + } else { 734 + blocks_per_device = pfl->nb_blocs; 735 + sector_len_per_device = pfl->sector_len / num_devices; 736 + } 737 + device_len = sector_len_per_device * blocks_per_device; 731 738 732 739 /* XXX: to be fixed */ 733 740 #if 0 ··· 832 839 pfl->cfi_table[0x2A] = 0x0B; 833 840 } 834 841 pfl->writeblock_size = 1 << pfl->cfi_table[0x2A]; 842 + if (!pfl->old_multiple_chip_handling && num_devices > 1) { 843 + pfl->writeblock_size *= num_devices; 844 + } 835 845 836 846 pfl->cfi_table[0x2B] = 0x00; 837 847 /* Number of erase block regions (uniform) */ ··· 839 849 /* Erase block region 1 */ 840 850 pfl->cfi_table[0x2D] = blocks_per_device - 1; 841 851 pfl->cfi_table[0x2E] = (blocks_per_device - 1) >> 8; 842 - pfl->cfi_table[0x2F] = pfl->sector_len >> 8; 843 - pfl->cfi_table[0x30] = pfl->sector_len >> 16; 852 + pfl->cfi_table[0x2F] = sector_len_per_device >> 8; 853 + pfl->cfi_table[0x30] = sector_len_per_device >> 16; 844 854 845 855 /* Extended */ 846 856 pfl->cfi_table[0x31] = 'P'; ··· 898 908 DEFINE_PROP_UINT16("id2", struct pflash_t, ident2, 0), 899 909 DEFINE_PROP_UINT16("id3", struct pflash_t, ident3, 0), 900 910 DEFINE_PROP_STRING("name", struct pflash_t, name), 911 + DEFINE_PROP_BOOL("old-multiple-chip-handling", struct pflash_t, 912 + old_multiple_chip_handling, false), 901 913 DEFINE_PROP_END_OF_LIST(), 902 914 }; 903 915
+1 -5
hw/char/exynos4210_uart.c
··· 306 306 307 307 static void exynos4210_uart_update_parameters(Exynos4210UartState *s) 308 308 { 309 - int speed, parity, data_bits, stop_bits, frame_size; 309 + int speed, parity, data_bits, stop_bits; 310 310 QEMUSerialSetParams ssp; 311 311 uint64_t uclk_rate; 312 312 ··· 314 314 return; 315 315 } 316 316 317 - frame_size = 1; /* start bit */ 318 317 if (s->reg[I_(ULCON)] & 0x20) { 319 - frame_size++; /* parity bit */ 320 318 if (s->reg[I_(ULCON)] & 0x28) { 321 319 parity = 'E'; 322 320 } else { ··· 333 331 } 334 332 335 333 data_bits = (s->reg[I_(ULCON)] & 0x3) + 5; 336 - 337 - frame_size += data_bits + stop_bits; 338 334 339 335 uclk_rate = 24000000; 340 336
+7 -3
hw/dma/omap_dma.c
··· 878 878 ch->burst[0] = (value & 0x0180) >> 7; 879 879 ch->pack[0] = (value & 0x0040) >> 6; 880 880 ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2); 881 - ch->data_type = 1 << (value & 3); 882 881 if (ch->port[0] >= __omap_dma_port_last) 883 882 printf("%s: invalid DMA port %i\n", __FUNCTION__, 884 883 ch->port[0]); 885 884 if (ch->port[1] >= __omap_dma_port_last) 886 885 printf("%s: invalid DMA port %i\n", __FUNCTION__, 887 886 ch->port[1]); 888 - if ((value & 3) == 3) 887 + ch->data_type = 1 << (value & 3); 888 + if ((value & 3) == 3) { 889 889 printf("%s: bad data_type for DMA channel\n", __FUNCTION__); 890 + ch->data_type >>= 1; 891 + } 890 892 break; 891 893 892 894 case 0x02: /* SYS_DMA_CCR_CH0 */ ··· 1988 1990 fprintf(stderr, "%s: bad MReqAddressTranslate sideband signal\n", 1989 1991 __FUNCTION__); 1990 1992 ch->data_type = 1 << (value & 3); 1991 - if ((value & 3) == 3) 1993 + if ((value & 3) == 3) { 1992 1994 printf("%s: bad data_type for DMA channel\n", __FUNCTION__); 1995 + ch->data_type >>= 1; 1996 + } 1993 1997 break; 1994 1998 1995 1999 case 0x14: /* DMA4_CEN */
+1 -1
hw/intc/arm_gicv3_cpuif.c
··· 2430 2430 uint64_t lr = cs->ich_lr_el2[i]; 2431 2431 2432 2432 if ((lr & ICH_LR_EL2_STATE_MASK) == 0 && 2433 - ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) { 2433 + ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) { 2434 2434 value |= (1 << i); 2435 2435 } 2436 2436 }
+41 -17
hw/intc/armv7m_nvic.c
··· 23 23 24 24 typedef struct { 25 25 GICState gic; 26 + ARMCPU *cpu; 26 27 struct { 27 28 uint32_t control; 28 29 uint32_t reload; ··· 155 156 156 157 static uint32_t nvic_readl(nvic_state *s, uint32_t offset) 157 158 { 158 - ARMCPU *cpu; 159 + ARMCPU *cpu = s->cpu; 159 160 uint32_t val; 160 161 int irq; 161 162 ··· 187 188 case 0x1c: /* SysTick Calibration Value. */ 188 189 return 10000; 189 190 case 0xd00: /* CPUID Base. */ 190 - cpu = ARM_CPU(qemu_get_cpu(0)); 191 191 return cpu->midr; 192 192 case 0xd04: /* Interrupt Control State. */ 193 193 /* VECTACTIVE */ 194 - cpu = ARM_CPU(qemu_get_cpu(0)); 195 194 val = cpu->env.v7m.exception; 196 195 if (val == 1023) { 197 196 val = 0; ··· 222 221 val |= (1 << 31); 223 222 return val; 224 223 case 0xd08: /* Vector Table Offset. */ 225 - cpu = ARM_CPU(qemu_get_cpu(0)); 226 224 return cpu->env.v7m.vecbase; 227 225 case 0xd0c: /* Application Interrupt/Reset Control. */ 228 226 return 0xfa050000; ··· 230 228 /* TODO: Implement SLEEPONEXIT. */ 231 229 return 0; 232 230 case 0xd14: /* Configuration Control. */ 233 - /* TODO: Implement Configuration Control bits. */ 234 - return 0; 231 + return cpu->env.v7m.ccr; 235 232 case 0xd24: /* System Handler Status. */ 236 233 val = 0; 237 234 if (s->gic.irq_state[ARMV7M_EXCP_MEM].active) val |= (1 << 0); ··· 250 247 if (s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled) val |= (1 << 18); 251 248 return val; 252 249 case 0xd28: /* Configurable Fault Status. */ 253 - /* TODO: Implement Fault Status. */ 254 - qemu_log_mask(LOG_UNIMP, "Configurable Fault Status unimplemented\n"); 255 - return 0; 250 + return cpu->env.v7m.cfsr; 256 251 case 0xd2c: /* Hard Fault Status. */ 252 + return cpu->env.v7m.hfsr; 257 253 case 0xd30: /* Debug Fault Status. */ 258 - case 0xd34: /* Mem Manage Address. */ 254 + return cpu->env.v7m.dfsr; 255 + case 0xd34: /* MMFAR MemManage Fault Address */ 256 + return cpu->env.v7m.mmfar; 259 257 case 0xd38: /* Bus Fault Address. */ 258 + return cpu->env.v7m.bfar; 260 259 case 0xd3c: /* Aux Fault Status. */ 261 260 /* TODO: Implement fault status registers. */ 262 - qemu_log_mask(LOG_UNIMP, "Fault status registers unimplemented\n"); 261 + qemu_log_mask(LOG_UNIMP, 262 + "Aux Fault status registers unimplemented\n"); 263 263 return 0; 264 264 case 0xd40: /* PFR0. */ 265 265 return 0x00000030; ··· 296 296 297 297 static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) 298 298 { 299 - ARMCPU *cpu; 299 + ARMCPU *cpu = s->cpu; 300 300 uint32_t oldval; 301 301 switch (offset) { 302 302 case 0x10: /* SysTick Control and Status. */ ··· 349 349 } 350 350 break; 351 351 case 0xd08: /* Vector Table Offset. */ 352 - cpu = ARM_CPU(qemu_get_cpu(0)); 353 352 cpu->env.v7m.vecbase = value & 0xffffff80; 354 353 break; 355 354 case 0xd0c: /* Application Interrupt/Reset Control. */ ··· 369 368 } 370 369 break; 371 370 case 0xd10: /* System Control. */ 372 - case 0xd14: /* Configuration Control. */ 373 371 /* TODO: Implement control registers. */ 374 - qemu_log_mask(LOG_UNIMP, "NVIC: SCR and CCR unimplemented\n"); 372 + qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n"); 373 + break; 374 + case 0xd14: /* Configuration Control. */ 375 + /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */ 376 + value &= (R_V7M_CCR_STKALIGN_MASK | 377 + R_V7M_CCR_BFHFNMIGN_MASK | 378 + R_V7M_CCR_DIV_0_TRP_MASK | 379 + R_V7M_CCR_UNALIGN_TRP_MASK | 380 + R_V7M_CCR_USERSETMPEND_MASK | 381 + R_V7M_CCR_NONBASETHRDENA_MASK); 382 + 383 + cpu->env.v7m.ccr = value; 375 384 break; 376 385 case 0xd24: /* System Handler Control. */ 377 386 /* TODO: Real hardware allows you to set/clear the active bits ··· 381 390 s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; 382 391 break; 383 392 case 0xd28: /* Configurable Fault Status. */ 393 + cpu->env.v7m.cfsr &= ~value; /* W1C */ 394 + break; 384 395 case 0xd2c: /* Hard Fault Status. */ 396 + cpu->env.v7m.hfsr &= ~value; /* W1C */ 397 + break; 385 398 case 0xd30: /* Debug Fault Status. */ 399 + cpu->env.v7m.dfsr &= ~value; /* W1C */ 400 + break; 386 401 case 0xd34: /* Mem Manage Address. */ 402 + cpu->env.v7m.mmfar = value; 403 + return; 387 404 case 0xd38: /* Bus Fault Address. */ 405 + cpu->env.v7m.bfar = value; 406 + return; 388 407 case 0xd3c: /* Aux Fault Status. */ 389 408 qemu_log_mask(LOG_UNIMP, 390 - "NVIC: fault status registers unimplemented\n"); 409 + "NVIC: Aux fault status registers unimplemented\n"); 391 410 break; 392 411 case 0xf00: /* Software Triggered Interrupt Register */ 393 - if ((value & 0x1ff) < s->num_irq) { 412 + /* user mode can only write to STIR if CCR.USERSETMPEND permits it */ 413 + if ((value & 0x1ff) < s->num_irq && 414 + (arm_current_el(&cpu->env) || 415 + (cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK))) { 394 416 gic_set_pending_private(&s->gic, 0, value & 0x1ff); 395 417 } 396 418 break; ··· 495 517 NVICClass *nc = NVIC_GET_CLASS(s); 496 518 Error *local_err = NULL; 497 519 520 + s->cpu = ARM_CPU(qemu_get_cpu(0)); 521 + assert(s->cpu); 498 522 /* The NVIC always has only one CPU */ 499 523 s->gic.num_cpu = 1; 500 524 /* Tell the common code we're an NVIC */
+4 -1
hw/net/stellaris_enet.c
··· 416 416 s->thr = value; 417 417 break; 418 418 case 0x20: /* MCTL */ 419 - s->mctl = value; 419 + /* TODO: MII registers aren't modelled. 420 + * Clear START, indicating that the operation completes immediately. 421 + */ 422 + s->mctl = value & ~1; 420 423 break; 421 424 case 0x24: /* MDV */ 422 425 s->mdv = value;
+21
hw/ssi/aspeed_smc.c
··· 69 69 #define R_CTRL0 (0x10 / 4) 70 70 #define CTRL_CMD_SHIFT 16 71 71 #define CTRL_CMD_MASK 0xff 72 + #define CTRL_DUMMY_HIGH_SHIFT 14 72 73 #define CTRL_AST2400_SPI_4BYTE (1 << 13) 74 + #define CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ 73 75 #define CTRL_CE_STOP_ACTIVE (1 << 2) 74 76 #define CTRL_CMD_MODE_MASK 0x3 75 77 #define CTRL_READMODE 0x0 ··· 485 487 return addr; 486 488 } 487 489 490 + static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) 491 + { 492 + const AspeedSMCState *s = fl->controller; 493 + uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; 494 + uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; 495 + uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; 496 + 497 + return ((dummy_high << 2) | dummy_low) * 8; 498 + } 499 + 488 500 static void aspeed_smc_flash_send_addr(AspeedSMCFlash *fl, uint32_t addr) 489 501 { 490 502 const AspeedSMCState *s = fl->controller; ··· 520 532 case CTRL_FREADMODE: 521 533 aspeed_smc_flash_select(fl); 522 534 aspeed_smc_flash_send_addr(fl, addr); 535 + 536 + /* 537 + * Use fake transfers to model dummy bytes. The value should 538 + * be configured to some non-zero value in fast read mode and 539 + * zero in read mode. 540 + */ 541 + for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { 542 + ssi_transfer(fl->controller->spi, 0xFF); 543 + } 523 544 524 545 for (i = 0; i < size; i++) { 525 546 ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
+4
include/hw/compat.h
··· 10 10 .driver = "fw_cfg_io",\ 11 11 .property = "x-file-slots",\ 12 12 .value = stringify(0x10),\ 13 + },{\ 14 + .driver = "pflash_cfi01",\ 15 + .property = "old-multiple-chip-handling",\ 16 + .value = "on",\ 13 17 }, 14 18 15 19 #define HW_COMPAT_2_7 \
+1 -46
include/hw/register.h
··· 13 13 14 14 #include "hw/qdev-core.h" 15 15 #include "exec/memory.h" 16 + #include "hw/registerfields.h" 16 17 17 18 typedef struct RegisterInfo RegisterInfo; 18 19 typedef struct RegisterAccessInfo RegisterAccessInfo; ··· 205 206 */ 206 207 207 208 void register_finalize_block(RegisterInfoArray *r_array); 208 - 209 - /* Define constants for a 32 bit register */ 210 - 211 - /* This macro will define A_FOO, for the byte address of a register 212 - * as well as R_FOO for the uint32_t[] register number (A_FOO / 4). 213 - */ 214 - #define REG32(reg, addr) \ 215 - enum { A_ ## reg = (addr) }; \ 216 - enum { R_ ## reg = (addr) / 4 }; 217 - 218 - /* Define SHIFT, LENGTH and MASK constants for a field within a register */ 219 - 220 - /* This macro will define FOO_BAR_MASK, FOO_BAR_SHIFT and FOO_BAR_LENGTH 221 - * constants for field BAR in register FOO. 222 - */ 223 - #define FIELD(reg, field, shift, length) \ 224 - enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \ 225 - enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \ 226 - enum { R_ ## reg ## _ ## field ## _MASK = \ 227 - MAKE_64BIT_MASK(shift, length)}; 228 - 229 - /* Extract a field from a register */ 230 - #define FIELD_EX32(storage, reg, field) \ 231 - extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 232 - R_ ## reg ## _ ## field ## _LENGTH) 233 - 234 - /* Extract a field from an array of registers */ 235 - #define ARRAY_FIELD_EX32(regs, reg, field) \ 236 - FIELD_EX32((regs)[R_ ## reg], reg, field) 237 - 238 - /* Deposit a register field. 239 - * Assigning values larger then the target field will result in 240 - * compilation warnings. 241 - */ 242 - #define FIELD_DP32(storage, reg, field, val) ({ \ 243 - struct { \ 244 - unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ 245 - } v = { .v = val }; \ 246 - uint32_t d; \ 247 - d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 248 - R_ ## reg ## _ ## field ## _LENGTH, v.v); \ 249 - d; }) 250 - 251 - /* Deposit a field to array of registers. */ 252 - #define ARRAY_FIELD_DP32(regs, reg, field, val) \ 253 - (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val); 254 209 255 210 #endif
+60
include/hw/registerfields.h
··· 1 + /* 2 + * Register Definition API: field macros 3 + * 4 + * Copyright (c) 2016 Xilinx Inc. 5 + * Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com> 6 + * 7 + * This work is licensed under the terms of the GNU GPL, version 2. See 8 + * the COPYING file in the top-level directory. 9 + */ 10 + 11 + #ifndef REGISTERFIELDS_H 12 + #define REGISTERFIELDS_H 13 + 14 + /* Define constants for a 32 bit register */ 15 + 16 + /* This macro will define A_FOO, for the byte address of a register 17 + * as well as R_FOO for the uint32_t[] register number (A_FOO / 4). 18 + */ 19 + #define REG32(reg, addr) \ 20 + enum { A_ ## reg = (addr) }; \ 21 + enum { R_ ## reg = (addr) / 4 }; 22 + 23 + /* Define SHIFT, LENGTH and MASK constants for a field within a register */ 24 + 25 + /* This macro will define FOO_BAR_MASK, FOO_BAR_SHIFT and FOO_BAR_LENGTH 26 + * constants for field BAR in register FOO. 27 + */ 28 + #define FIELD(reg, field, shift, length) \ 29 + enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \ 30 + enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \ 31 + enum { R_ ## reg ## _ ## field ## _MASK = \ 32 + MAKE_64BIT_MASK(shift, length)}; 33 + 34 + /* Extract a field from a register */ 35 + #define FIELD_EX32(storage, reg, field) \ 36 + extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 37 + R_ ## reg ## _ ## field ## _LENGTH) 38 + 39 + /* Extract a field from an array of registers */ 40 + #define ARRAY_FIELD_EX32(regs, reg, field) \ 41 + FIELD_EX32((regs)[R_ ## reg], reg, field) 42 + 43 + /* Deposit a register field. 44 + * Assigning values larger then the target field will result in 45 + * compilation warnings. 46 + */ 47 + #define FIELD_DP32(storage, reg, field, val) ({ \ 48 + struct { \ 49 + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ 50 + } v = { .v = val }; \ 51 + uint32_t d; \ 52 + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 53 + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ 54 + d; }) 55 + 56 + /* Deposit a field to array of registers. */ 57 + #define ARRAY_FIELD_DP32(regs, reg, field, val) \ 58 + (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val); 59 + 60 + #endif
+1
linux-user/main.c
··· 573 573 574 574 switch(trapnr) { 575 575 case EXCP_UDEF: 576 + case EXCP_NOCP: 576 577 { 577 578 TaskState *ts = cs->opaque; 578 579 uint32_t opcode;
+45 -5
target/arm/cpu.c
··· 179 179 /* SVC mode with interrupts disabled. */ 180 180 env->uncached_cpsr = ARM_CPU_MODE_SVC; 181 181 env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; 182 - /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is 183 - * clear at reset. Initial SP and PC are loaded from ROM. 184 - */ 185 - if (IS_M(env)) { 182 + 183 + if (arm_feature(env, ARM_FEATURE_M)) { 186 184 uint32_t initial_msp; /* Loaded from 0x0 */ 187 185 uint32_t initial_pc; /* Loaded from 0x4 */ 188 186 uint8_t *rom; 189 187 190 - env->daif &= ~PSTATE_I; 188 + /* For M profile we store FAULTMASK and PRIMASK in the 189 + * PSTATE F and I bits; these are both clear at reset. 190 + */ 191 + env->daif &= ~(PSTATE_I | PSTATE_F); 192 + 193 + /* The reset value of this bit is IMPDEF, but ARM recommends 194 + * that it resets to 1, so QEMU always does that rather than making 195 + * it dependent on CPU model. 196 + */ 197 + env->v7m.ccr = R_V7M_CCR_STKALIGN_MASK; 198 + 199 + /* Unlike A/R profile, M profile defines the reset LR value */ 200 + env->regs[14] = 0xffffffff; 201 + 202 + /* Load the initial SP and PC from the vector table at address 0 */ 191 203 rom = rom_ptr(0); 192 204 if (rom) { 193 205 /* Address zero is covered by ROM which hasn't yet been ··· 292 304 } 293 305 294 306 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) 307 + static void arm_v7m_unassigned_access(CPUState *cpu, hwaddr addr, 308 + bool is_write, bool is_exec, int opaque, 309 + unsigned size) 310 + { 311 + ARMCPU *arm = ARM_CPU(cpu); 312 + CPUARMState *env = &arm->env; 313 + 314 + /* ARMv7-M interrupt return works by loading a magic value into the PC. 315 + * On real hardware the load causes the return to occur. The qemu 316 + * implementation performs the jump normally, then does the exception 317 + * return by throwing a special exception when when the CPU tries to 318 + * execute code at the magic address. 319 + */ 320 + if (env->v7m.exception != 0 && addr >= 0xfffffff0 && is_exec) { 321 + cpu->exception_index = EXCP_EXCEPTION_EXIT; 322 + cpu_loop_exit(cpu); 323 + } 324 + 325 + /* In real hardware an attempt to access parts of the address space 326 + * with nothing there will usually cause an external abort. 327 + * However our QEMU board models are often missing device models where 328 + * the guest can boot anyway with the default read-as-zero/writes-ignored 329 + * behaviour that you get without a QEMU unassigned_access hook. 330 + * So just return here to retain that default behaviour. 331 + */ 332 + } 333 + 295 334 static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 296 335 { 297 336 CPUClass *cc = CPU_GET_CLASS(cs); ··· 1016 1055 cc->do_interrupt = arm_v7m_cpu_do_interrupt; 1017 1056 #endif 1018 1057 1058 + cc->do_unassigned_access = arm_v7m_unassigned_access; 1019 1059 cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; 1020 1060 } 1021 1061
+55 -7
target/arm/cpu.h
··· 21 21 #define ARM_CPU_H 22 22 23 23 #include "kvm-consts.h" 24 + #include "hw/registerfields.h" 24 25 25 26 #if defined(TARGET_AARCH64) 26 27 /* AArch64 definitions */ ··· 52 53 #define EXCP_VIRQ 14 53 54 #define EXCP_VFIQ 15 54 55 #define EXCP_SEMIHOST 16 /* semihosting call */ 56 + #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ 55 57 56 58 #define ARMV7M_EXCP_RESET 1 57 59 #define ARMV7M_EXCP_NMI 2 ··· 405 407 uint32_t vecbase; 406 408 uint32_t basepri; 407 409 uint32_t control; 408 - int current_sp; 410 + uint32_t ccr; /* Configuration and Control */ 411 + uint32_t cfsr; /* Configurable Fault Status */ 412 + uint32_t hfsr; /* HardFault Status */ 413 + uint32_t dfsr; /* Debug Fault Status Register */ 414 + uint32_t mmfar; /* MemManage Fault Address */ 415 + uint32_t bfar; /* BusFault Address */ 409 416 int exception; 410 417 } v7m; 411 418 ··· 1087 1094 #define ARM_IWMMXT_wCGR2 10 1088 1095 #define ARM_IWMMXT_wCGR3 11 1089 1096 1097 + /* V7M CCR bits */ 1098 + FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) 1099 + FIELD(V7M_CCR, USERSETMPEND, 1, 1) 1100 + FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) 1101 + FIELD(V7M_CCR, DIV_0_TRP, 4, 1) 1102 + FIELD(V7M_CCR, BFHFNMIGN, 8, 1) 1103 + FIELD(V7M_CCR, STKALIGN, 9, 1) 1104 + FIELD(V7M_CCR, DC, 16, 1) 1105 + FIELD(V7M_CCR, IC, 17, 1) 1106 + 1107 + /* V7M CFSR bits for MMFSR */ 1108 + FIELD(V7M_CFSR, IACCVIOL, 0, 1) 1109 + FIELD(V7M_CFSR, DACCVIOL, 1, 1) 1110 + FIELD(V7M_CFSR, MUNSTKERR, 3, 1) 1111 + FIELD(V7M_CFSR, MSTKERR, 4, 1) 1112 + FIELD(V7M_CFSR, MLSPERR, 5, 1) 1113 + FIELD(V7M_CFSR, MMARVALID, 7, 1) 1114 + 1115 + /* V7M CFSR bits for BFSR */ 1116 + FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) 1117 + FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) 1118 + FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) 1119 + FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) 1120 + FIELD(V7M_CFSR, STKERR, 8 + 4, 1) 1121 + FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) 1122 + FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) 1123 + 1124 + /* V7M CFSR bits for UFSR */ 1125 + FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) 1126 + FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) 1127 + FIELD(V7M_CFSR, INVPC, 16 + 2, 1) 1128 + FIELD(V7M_CFSR, NOCP, 16 + 3, 1) 1129 + FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) 1130 + FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) 1131 + 1132 + /* V7M HFSR bits */ 1133 + FIELD(V7M_HFSR, VECTTBL, 1, 1) 1134 + FIELD(V7M_HFSR, FORCED, 30, 1) 1135 + FIELD(V7M_HFSR, DEBUGEVT, 31, 1) 1136 + 1137 + /* V7M DFSR bits */ 1138 + FIELD(V7M_DFSR, HALTED, 0, 1) 1139 + FIELD(V7M_DFSR, BKPT, 1, 1) 1140 + FIELD(V7M_DFSR, DWTTRAP, 2, 1) 1141 + FIELD(V7M_DFSR, VCATCH, 3, 1) 1142 + FIELD(V7M_DFSR, EXTERNAL, 4, 1) 1143 + 1090 1144 /* If adding a feature bit which corresponds to a Linux ELF 1091 1145 * HWCAP bit, remember to update the feature-bit-to-hwcap 1092 1146 * mapping in linux-user/elfload.c:get_elf_hwcap(). ··· 1762 1816 * reading all registers in the list. 1763 1817 */ 1764 1818 bool write_cpustate_to_list(ARMCPU *cpu); 1765 - 1766 - /* Does the core conform to the "MicroController" profile. e.g. Cortex-M3. 1767 - Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are 1768 - conventional cores (ie. Application or Realtime profile). */ 1769 - 1770 - #define IS_M(env) arm_feature(env, ARM_FEATURE_M) 1771 1819 1772 1820 #define ARM_CPUID_TI915T 0x54029152 1773 1821 #define ARM_CPUID_TI925T 0x54029252
+99 -61
target/arm/helper.c
··· 5947 5947 } 5948 5948 5949 5949 /* Switch to V7M main or process stack pointer. */ 5950 - static void switch_v7m_sp(CPUARMState *env, int process) 5950 + static void switch_v7m_sp(CPUARMState *env, bool new_spsel) 5951 5951 { 5952 5952 uint32_t tmp; 5953 - if (env->v7m.current_sp != process) { 5953 + bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK; 5954 + 5955 + if (old_spsel != new_spsel) { 5954 5956 tmp = env->v7m.other_sp; 5955 5957 env->v7m.other_sp = env->regs[13]; 5956 5958 env->regs[13] = tmp; 5957 - env->v7m.current_sp = process; 5959 + 5960 + env->v7m.control = deposit32(env->v7m.control, 5961 + R_V7M_CONTROL_SPSEL_SHIFT, 5962 + R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 5958 5963 } 5959 5964 } 5960 5965 ··· 5964 5969 uint32_t xpsr; 5965 5970 5966 5971 type = env->regs[15]; 5967 - if (env->v7m.exception != 0) 5972 + if (env->v7m.exception != ARMV7M_EXCP_NMI) { 5973 + /* Auto-clear FAULTMASK on return from other than NMI */ 5974 + env->daif &= ~PSTATE_F; 5975 + } 5976 + if (env->v7m.exception != 0) { 5968 5977 armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); 5978 + } 5969 5979 5970 5980 /* Switch to the target stack. */ 5971 5981 switch_v7m_sp(env, (type & 4) != 0); ··· 6014 6024 } 6015 6025 } 6016 6026 6027 + static uint32_t arm_v7m_load_vector(ARMCPU *cpu) 6028 + 6029 + { 6030 + CPUState *cs = CPU(cpu); 6031 + CPUARMState *env = &cpu->env; 6032 + MemTxResult result; 6033 + hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4; 6034 + uint32_t addr; 6035 + 6036 + addr = address_space_ldl(cs->as, vec, 6037 + MEMTXATTRS_UNSPECIFIED, &result); 6038 + if (result != MEMTX_OK) { 6039 + /* Architecturally this should cause a HardFault setting HSFR.VECTTBL, 6040 + * which would then be immediately followed by our failing to load 6041 + * the entry vector for that HardFault, which is a Lockup case. 6042 + * Since we don't model Lockup, we just report this guest error 6043 + * via cpu_abort(). 6044 + */ 6045 + cpu_abort(cs, "Failed to read from exception vector table " 6046 + "entry %08x\n", (unsigned)vec); 6047 + } 6048 + return addr; 6049 + } 6050 + 6017 6051 void arm_v7m_cpu_do_interrupt(CPUState *cs) 6018 6052 { 6019 6053 ARMCPU *cpu = ARM_CPU(cs); ··· 6025 6059 arm_log_exception(cs->exception_index); 6026 6060 6027 6061 lr = 0xfffffff1; 6028 - if (env->v7m.current_sp) 6062 + if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) { 6029 6063 lr |= 4; 6064 + } 6030 6065 if (env->v7m.exception == 0) 6031 6066 lr |= 8; 6032 6067 ··· 6037 6072 switch (cs->exception_index) { 6038 6073 case EXCP_UDEF: 6039 6074 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); 6075 + env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK; 6076 + return; 6077 + case EXCP_NOCP: 6078 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); 6079 + env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK; 6040 6080 return; 6041 6081 case EXCP_SWI: 6042 6082 /* The PC already points to the next instruction. */ ··· 6075 6115 return; /* Never happens. Keep compiler happy. */ 6076 6116 } 6077 6117 6078 - /* Align stack pointer. */ 6079 - /* ??? Should only do this if Configuration Control Register 6080 - STACKALIGN bit is set. */ 6081 - if (env->regs[13] & 4) { 6118 + /* Align stack pointer if the guest wants that */ 6119 + if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) { 6082 6120 env->regs[13] -= 4; 6083 6121 xpsr |= 0x200; 6084 6122 } ··· 6095 6133 /* Clear IT bits */ 6096 6134 env->condexec_bits = 0; 6097 6135 env->regs[14] = lr; 6098 - addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4); 6136 + addr = arm_v7m_load_vector(cpu); 6099 6137 env->regs[15] = addr & 0xfffffffe; 6100 6138 env->thumb = addr & 1; 6101 6139 } ··· 6660 6698 CPUARMState *env = &cpu->env; 6661 6699 unsigned int new_el = env->exception.target_el; 6662 6700 6663 - assert(!IS_M(env)); 6701 + assert(!arm_feature(env, ARM_FEATURE_M)); 6664 6702 6665 6703 arm_log_exception(cs->exception_index); 6666 6704 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), ··· 8243 8281 8244 8282 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 8245 8283 { 8246 - ARMCPU *cpu = arm_env_get_cpu(env); 8284 + uint32_t mask; 8285 + unsigned el = arm_current_el(env); 8286 + 8287 + /* First handle registers which unprivileged can read */ 8247 8288 8248 8289 switch (reg) { 8249 - case 0: /* APSR */ 8250 - return xpsr_read(env) & 0xf8000000; 8251 - case 1: /* IAPSR */ 8252 - return xpsr_read(env) & 0xf80001ff; 8253 - case 2: /* EAPSR */ 8254 - return xpsr_read(env) & 0xff00fc00; 8255 - case 3: /* xPSR */ 8256 - return xpsr_read(env) & 0xff00fdff; 8257 - case 5: /* IPSR */ 8258 - return xpsr_read(env) & 0x000001ff; 8259 - case 6: /* EPSR */ 8260 - return xpsr_read(env) & 0x0700fc00; 8261 - case 7: /* IEPSR */ 8262 - return xpsr_read(env) & 0x0700edff; 8290 + case 0 ... 7: /* xPSR sub-fields */ 8291 + mask = 0; 8292 + if ((reg & 1) && el) { 8293 + mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */ 8294 + } 8295 + if (!(reg & 4)) { 8296 + mask |= 0xf8000000; /* APSR */ 8297 + } 8298 + /* EPSR reads as zero */ 8299 + return xpsr_read(env) & mask; 8300 + break; 8301 + case 20: /* CONTROL */ 8302 + return env->v7m.control; 8303 + } 8304 + 8305 + if (el == 0) { 8306 + return 0; /* unprivileged reads others as zero */ 8307 + } 8308 + 8309 + switch (reg) { 8263 8310 case 8: /* MSP */ 8264 - return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; 8311 + return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ? 8312 + env->v7m.other_sp : env->regs[13]; 8265 8313 case 9: /* PSP */ 8266 - return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; 8314 + return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ? 8315 + env->regs[13] : env->v7m.other_sp; 8267 8316 case 16: /* PRIMASK */ 8268 8317 return (env->daif & PSTATE_I) != 0; 8269 8318 case 17: /* BASEPRI */ ··· 8271 8320 return env->v7m.basepri; 8272 8321 case 19: /* FAULTMASK */ 8273 8322 return (env->daif & PSTATE_F) != 0; 8274 - case 20: /* CONTROL */ 8275 - return env->v7m.control; 8276 8323 default: 8277 - /* ??? For debugging only. */ 8278 - cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg); 8324 + qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 8325 + " register %d\n", reg); 8279 8326 return 0; 8280 8327 } 8281 8328 } 8282 8329 8283 8330 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 8284 8331 { 8285 - ARMCPU *cpu = arm_env_get_cpu(env); 8332 + if (arm_current_el(env) == 0 && reg > 7) { 8333 + /* only xPSR sub-fields may be written by unprivileged */ 8334 + return; 8335 + } 8286 8336 8287 8337 switch (reg) { 8288 - case 0: /* APSR */ 8289 - xpsr_write(env, val, 0xf8000000); 8290 - break; 8291 - case 1: /* IAPSR */ 8292 - xpsr_write(env, val, 0xf8000000); 8293 - break; 8294 - case 2: /* EAPSR */ 8295 - xpsr_write(env, val, 0xfe00fc00); 8296 - break; 8297 - case 3: /* xPSR */ 8298 - xpsr_write(env, val, 0xfe00fc00); 8299 - break; 8300 - case 5: /* IPSR */ 8301 - /* IPSR bits are readonly. */ 8302 - break; 8303 - case 6: /* EPSR */ 8304 - xpsr_write(env, val, 0x0600fc00); 8305 - break; 8306 - case 7: /* IEPSR */ 8307 - xpsr_write(env, val, 0x0600fc00); 8338 + case 0 ... 7: /* xPSR sub-fields */ 8339 + /* only APSR is actually writable */ 8340 + if (reg & 4) { 8341 + xpsr_write(env, val, 0xf8000000); /* APSR */ 8342 + } 8308 8343 break; 8309 8344 case 8: /* MSP */ 8310 - if (env->v7m.current_sp) 8345 + if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) { 8311 8346 env->v7m.other_sp = val; 8312 - else 8347 + } else { 8313 8348 env->regs[13] = val; 8349 + } 8314 8350 break; 8315 8351 case 9: /* PSP */ 8316 - if (env->v7m.current_sp) 8352 + if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) { 8317 8353 env->regs[13] = val; 8318 - else 8354 + } else { 8319 8355 env->v7m.other_sp = val; 8356 + } 8320 8357 break; 8321 8358 case 16: /* PRIMASK */ 8322 8359 if (val & 1) { ··· 8341 8378 } 8342 8379 break; 8343 8380 case 20: /* CONTROL */ 8344 - env->v7m.control = val & 3; 8345 - switch_v7m_sp(env, (val & 2) != 0); 8381 + switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 8382 + env->v7m.control = val & (R_V7M_CONTROL_SPSEL_MASK | 8383 + R_V7M_CONTROL_NPRIV_MASK); 8346 8384 break; 8347 8385 default: 8348 - /* ??? For debugging only. */ 8349 - cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg); 8386 + qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 8387 + " register %d\n", reg); 8350 8388 return; 8351 8389 } 8352 8390 }
+7
target/arm/internals.h
··· 25 25 #ifndef TARGET_ARM_INTERNALS_H 26 26 #define TARGET_ARM_INTERNALS_H 27 27 28 + #include "hw/registerfields.h" 29 + 28 30 /* register banks for CPU modes */ 29 31 #define BANK_USRSYS 0 30 32 #define BANK_SVC 1 ··· 74 76 * This gives a 62.5MHz timer. 75 77 */ 76 78 #define GTIMER_SCALE 16 79 + 80 + /* Bit definitions for the v7M CONTROL register */ 81 + FIELD(V7M_CONTROL, NPRIV, 0, 1) 82 + FIELD(V7M_CONTROL, SPSEL, 1, 1) 83 + FIELD(V7M_CONTROL, FPCA, 2, 1) 77 84 78 85 /* 79 86 * For AArch64, map a given EL to an index in the banked_spsr array.
+8 -4
target/arm/machine.c
··· 99 99 100 100 static const VMStateDescription vmstate_m = { 101 101 .name = "cpu/m", 102 - .version_id = 1, 103 - .minimum_version_id = 1, 102 + .version_id = 3, 103 + .minimum_version_id = 3, 104 104 .needed = m_needed, 105 105 .fields = (VMStateField[]) { 106 - VMSTATE_UINT32(env.v7m.other_sp, ARMCPU), 107 106 VMSTATE_UINT32(env.v7m.vecbase, ARMCPU), 108 107 VMSTATE_UINT32(env.v7m.basepri, ARMCPU), 109 108 VMSTATE_UINT32(env.v7m.control, ARMCPU), 110 - VMSTATE_INT32(env.v7m.current_sp, ARMCPU), 109 + VMSTATE_UINT32(env.v7m.ccr, ARMCPU), 110 + VMSTATE_UINT32(env.v7m.cfsr, ARMCPU), 111 + VMSTATE_UINT32(env.v7m.hfsr, ARMCPU), 112 + VMSTATE_UINT32(env.v7m.dfsr, ARMCPU), 113 + VMSTATE_UINT32(env.v7m.mmfar, ARMCPU), 114 + VMSTATE_UINT32(env.v7m.bfar, ARMCPU), 111 115 VMSTATE_INT32(env.v7m.exception, ARMCPU), 112 116 VMSTATE_END_OF_LIST() 113 117 }
+14 -6
target/arm/translate.c
··· 10217 10217 break; 10218 10218 case 6: case 7: case 14: case 15: 10219 10219 /* Coprocessor. */ 10220 + if (arm_dc_feature(s, ARM_FEATURE_M)) { 10221 + /* We don't currently implement M profile FP support, 10222 + * so this entire space should give a NOCP fault. 10223 + */ 10224 + gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(), 10225 + default_exception_el(s)); 10226 + break; 10227 + } 10220 10228 if (((insn >> 24) & 3) == 3) { 10221 10229 /* Translate into the equivalent ARM encoding. */ 10222 10230 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); ··· 11719 11727 break; 11720 11728 } 11721 11729 #else 11722 - if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) { 11723 - /* We always get here via a jump, so know we are not in a 11724 - conditional execution block. */ 11725 - gen_exception_internal(EXCP_EXCEPTION_EXIT); 11726 - dc->is_jmp = DISAS_EXC; 11727 - break; 11730 + if (arm_dc_feature(dc, ARM_FEATURE_M)) { 11731 + /* Branches to the magic exception-return addresses should 11732 + * already have been caught via the arm_v7m_unassigned_access hook, 11733 + * and never get here. 11734 + */ 11735 + assert(dc->pc < 0xfffffff0); 11728 11736 } 11729 11737 #endif 11730 11738