qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20171012' into staging

target-arm queue:
* v8M: SG, BLXNS, secure-return
* v8M: fixes for coverity issues in previous patches
* arm: fix armv7m_init() declaration to match definition
* watchdog/aspeed: fix variable type to store reload value

# gpg: Signature made Thu 12 Oct 2017 17:02:49 BST
# gpg: using RSA key 0x3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg: aka "Peter Maydell <pmaydell@gmail.com>"
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20171012:
nvic: Fix miscalculation of offsets into ITNS array
nvic: Add missing 'break'
target/arm: Implement SG instruction corner cases
target/arm: Support some Thumb insns being always unconditional
target-arm: Simplify insn_crosses_page()
target/arm: Pull Thumb insn word loads up to top level
target-arm: Don't check for "Thumb2 or M profile" for not-Thumb1
target/arm: Implement secure function return
target/arm: Implement BLXNS
target/arm: Implement SG instruction
target/arm: Add M profile secure MMU index values to get_a32_user_mem_index()
arm: fix armv7m_init() declaration to match definition
watchdog/aspeed: fix variable type to store reload value

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+521 -115
+3 -2
hw/intc/armv7m_nvic.c
··· 698 698 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; 699 699 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 700 700 { 701 - int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ; 701 + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 702 702 int i; 703 703 704 704 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { ··· 1102 1102 switch (offset) { 1103 1103 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */ 1104 1104 { 1105 - int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ; 1105 + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; 1106 1106 int i; 1107 1107 1108 1108 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { ··· 1447 1447 return; 1448 1448 } 1449 1449 cpu->env.sau.ctrl = value & 3; 1450 + break; 1450 1451 case 0xdd4: /* SAU_TYPE */ 1451 1452 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { 1452 1453 goto bad_offset;
+2 -2
hw/watchdog/wdt_aspeed.c
··· 100 100 101 101 static void aspeed_wdt_reload(AspeedWDTState *s, bool pclk) 102 102 { 103 - uint32_t reload; 103 + uint64_t reload; 104 104 105 105 if (pclk) { 106 106 reload = muldiv64(s->regs[WDT_RELOAD_VALUE], NANOSECONDS_PER_SECOND, 107 107 s->pclk_freq); 108 108 } else { 109 - reload = s->regs[WDT_RELOAD_VALUE] * 1000; 109 + reload = s->regs[WDT_RELOAD_VALUE] * 1000ULL; 110 110 } 111 111 112 112 if (aspeed_wdt_is_enabled(s)) {
+1 -1
include/hw/arm/arm.h
··· 25 25 26 26 /* armv7m.c */ 27 27 DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq, 28 - const char *kernel_filename, const char *cpu_model); 28 + const char *kernel_filename, const char *cpu_type); 29 29 /** 30 30 * armv7m_load_kernel: 31 31 * @cpu: CPU
+293 -13
target/arm/helper.c
··· 41 41 bool irvalid; 42 42 } V8M_SAttributes; 43 43 44 + static void v8m_security_lookup(CPUARMState *env, uint32_t address, 45 + MMUAccessType access_type, ARMMMUIdx mmu_idx, 46 + V8M_SAttributes *sattrs); 47 + 44 48 /* Definitions for the PMCCNTR and PMCR registers */ 45 49 #define PMCRD 0x8 46 50 #define PMCRC 0x4 ··· 5893 5897 g_assert_not_reached(); 5894 5898 } 5895 5899 5900 + void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 5901 + { 5902 + /* translate.c should never generate calls here in user-only mode */ 5903 + g_assert_not_reached(); 5904 + } 5905 + 5896 5906 void switch_mode(CPUARMState *env, int mode) 5897 5907 { 5898 5908 ARMCPU *cpu = arm_env_get_cpu(env); ··· 6164 6174 * - if the return value is a magic value, do exception return (like BX) 6165 6175 * - otherwise bit 0 of the return value is the target security state 6166 6176 */ 6167 - if (dest >= 0xff000000) { 6177 + uint32_t min_magic; 6178 + 6179 + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6180 + /* Covers FNC_RETURN and EXC_RETURN magic */ 6181 + min_magic = FNC_RETURN_MIN_MAGIC; 6182 + } else { 6183 + /* EXC_RETURN magic only */ 6184 + min_magic = EXC_RETURN_MIN_MAGIC; 6185 + } 6186 + 6187 + if (dest >= min_magic) { 6168 6188 /* This is an exception return magic value; put it where 6169 6189 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6170 6190 * Note that if we ever add gen_ss_advance() singlestep support to ··· 6185 6205 env->regs[15] = dest & ~1; 6186 6206 } 6187 6207 6208 + void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6209 + { 6210 + /* Handle v7M BLXNS: 6211 + * - bit 0 of the destination address is the target security state 6212 + */ 6213 + 6214 + /* At this point regs[15] is the address just after the BLXNS */ 6215 + uint32_t nextinst = env->regs[15] | 1; 6216 + uint32_t sp = env->regs[13] - 8; 6217 + uint32_t saved_psr; 6218 + 6219 + /* translate.c will have made BLXNS UNDEF unless we're secure */ 6220 + assert(env->v7m.secure); 6221 + 6222 + if (dest & 1) { 6223 + /* target is Secure, so this is just a normal BLX, 6224 + * except that the low bit doesn't indicate Thumb/not. 6225 + */ 6226 + env->regs[14] = nextinst; 6227 + env->thumb = 1; 6228 + env->regs[15] = dest & ~1; 6229 + return; 6230 + } 6231 + 6232 + /* Target is non-secure: first push a stack frame */ 6233 + if (!QEMU_IS_ALIGNED(sp, 8)) { 6234 + qemu_log_mask(LOG_GUEST_ERROR, 6235 + "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6236 + } 6237 + 6238 + saved_psr = env->v7m.exception; 6239 + if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6240 + saved_psr |= XPSR_SFPA; 6241 + } 6242 + 6243 + /* Note that these stores can throw exceptions on MPU faults */ 6244 + cpu_stl_data(env, sp, nextinst); 6245 + cpu_stl_data(env, sp + 4, saved_psr); 6246 + 6247 + env->regs[13] = sp; 6248 + env->regs[14] = 0xfeffffff; 6249 + if (arm_v7m_is_handler_mode(env)) { 6250 + /* Write a dummy value to IPSR, to avoid leaking the current secure 6251 + * exception number to non-secure code. This is guaranteed not 6252 + * to cause write_v7m_exception() to actually change stacks. 6253 + */ 6254 + write_v7m_exception(env, 1); 6255 + } 6256 + switch_v7m_security_state(env, 0); 6257 + env->thumb = 1; 6258 + env->regs[15] = dest; 6259 + } 6260 + 6188 6261 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6189 6262 bool spsel) 6190 6263 { ··· 6407 6480 bool exc_secure = false; 6408 6481 bool return_to_secure; 6409 6482 6410 - /* We can only get here from an EXCP_EXCEPTION_EXIT, and 6411 - * gen_bx_excret() enforces the architectural rule 6412 - * that jumps to magic addresses don't have magic behaviour unless 6413 - * we're in Handler mode (compare pseudocode BXWritePC()). 6483 + /* If we're not in Handler mode then jumps to magic exception-exit 6484 + * addresses don't have magic behaviour. However for the v8M 6485 + * security extensions the magic secure-function-return has to 6486 + * work in thread mode too, so to avoid doing an extra check in 6487 + * the generated code we allow exception-exit magic to also cause the 6488 + * internal exception and bring us here in thread mode. Correct code 6489 + * will never try to do this (the following insn fetch will always 6490 + * fault) so we the overhead of having taken an unnecessary exception 6491 + * doesn't matter. 6414 6492 */ 6415 - assert(arm_v7m_is_handler_mode(env)); 6493 + if (!arm_v7m_is_handler_mode(env)) { 6494 + return; 6495 + } 6416 6496 6417 6497 /* In the spec pseudocode ExceptionReturn() is called directly 6418 6498 * from BXWritePC() and gets the full target PC value including ··· 6702 6782 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 6703 6783 } 6704 6784 6785 + static bool do_v7m_function_return(ARMCPU *cpu) 6786 + { 6787 + /* v8M security extensions magic function return. 6788 + * We may either: 6789 + * (1) throw an exception (longjump) 6790 + * (2) return true if we successfully handled the function return 6791 + * (3) return false if we failed a consistency check and have 6792 + * pended a UsageFault that needs to be taken now 6793 + * 6794 + * At this point the magic return value is split between env->regs[15] 6795 + * and env->thumb. We don't bother to reconstitute it because we don't 6796 + * need it (all values are handled the same way). 6797 + */ 6798 + CPUARMState *env = &cpu->env; 6799 + uint32_t newpc, newpsr, newpsr_exc; 6800 + 6801 + qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 6802 + 6803 + { 6804 + bool threadmode, spsel; 6805 + TCGMemOpIdx oi; 6806 + ARMMMUIdx mmu_idx; 6807 + uint32_t *frame_sp_p; 6808 + uint32_t frameptr; 6809 + 6810 + /* Pull the return address and IPSR from the Secure stack */ 6811 + threadmode = !arm_v7m_is_handler_mode(env); 6812 + spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 6813 + 6814 + frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 6815 + frameptr = *frame_sp_p; 6816 + 6817 + /* These loads may throw an exception (for MPU faults). We want to 6818 + * do them as secure, so work out what MMU index that is. 6819 + */ 6820 + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 6821 + oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 6822 + newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 6823 + newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 6824 + 6825 + /* Consistency checks on new IPSR */ 6826 + newpsr_exc = newpsr & XPSR_EXCP; 6827 + if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 6828 + (env->v7m.exception == 1 && newpsr_exc != 0))) { 6829 + /* Pend the fault and tell our caller to take it */ 6830 + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 6831 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 6832 + env->v7m.secure); 6833 + qemu_log_mask(CPU_LOG_INT, 6834 + "...taking INVPC UsageFault: " 6835 + "IPSR consistency check failed\n"); 6836 + return false; 6837 + } 6838 + 6839 + *frame_sp_p = frameptr + 8; 6840 + } 6841 + 6842 + /* This invalidates frame_sp_p */ 6843 + switch_v7m_security_state(env, true); 6844 + env->v7m.exception = newpsr_exc; 6845 + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 6846 + if (newpsr & XPSR_SFPA) { 6847 + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 6848 + } 6849 + xpsr_write(env, 0, XPSR_IT); 6850 + env->thumb = newpc & 1; 6851 + env->regs[15] = newpc & ~1; 6852 + 6853 + qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 6854 + return true; 6855 + } 6856 + 6705 6857 static void arm_log_exception(int idx) 6706 6858 { 6707 6859 if (qemu_loglevel_mask(CPU_LOG_INT)) { ··· 6736 6888 } 6737 6889 } 6738 6890 6891 + static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 6892 + uint32_t addr, uint16_t *insn) 6893 + { 6894 + /* Load a 16-bit portion of a v7M instruction, returning true on success, 6895 + * or false on failure (in which case we will have pended the appropriate 6896 + * exception). 6897 + * We need to do the instruction fetch's MPU and SAU checks 6898 + * like this because there is no MMU index that would allow 6899 + * doing the load with a single function call. Instead we must 6900 + * first check that the security attributes permit the load 6901 + * and that they don't mismatch on the two halves of the instruction, 6902 + * and then we do the load as a secure load (ie using the security 6903 + * attributes of the address, not the CPU, as architecturally required). 6904 + */ 6905 + CPUState *cs = CPU(cpu); 6906 + CPUARMState *env = &cpu->env; 6907 + V8M_SAttributes sattrs = {}; 6908 + MemTxAttrs attrs = {}; 6909 + ARMMMUFaultInfo fi = {}; 6910 + MemTxResult txres; 6911 + target_ulong page_size; 6912 + hwaddr physaddr; 6913 + int prot; 6914 + uint32_t fsr; 6915 + 6916 + v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 6917 + if (!sattrs.nsc || sattrs.ns) { 6918 + /* This must be the second half of the insn, and it straddles a 6919 + * region boundary with the second half not being S&NSC. 6920 + */ 6921 + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 6922 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 6923 + qemu_log_mask(CPU_LOG_INT, 6924 + "...really SecureFault with SFSR.INVEP\n"); 6925 + return false; 6926 + } 6927 + if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 6928 + &physaddr, &attrs, &prot, &page_size, &fsr, &fi)) { 6929 + /* the MPU lookup failed */ 6930 + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 6931 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 6932 + qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 6933 + return false; 6934 + } 6935 + *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 6936 + attrs, &txres); 6937 + if (txres != MEMTX_OK) { 6938 + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 6939 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 6940 + qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 6941 + return false; 6942 + } 6943 + return true; 6944 + } 6945 + 6946 + static bool v7m_handle_execute_nsc(ARMCPU *cpu) 6947 + { 6948 + /* Check whether this attempt to execute code in a Secure & NS-Callable 6949 + * memory region is for an SG instruction; if so, then emulate the 6950 + * effect of the SG instruction and return true. Otherwise pend 6951 + * the correct kind of exception and return false. 6952 + */ 6953 + CPUARMState *env = &cpu->env; 6954 + ARMMMUIdx mmu_idx; 6955 + uint16_t insn; 6956 + 6957 + /* We should never get here unless get_phys_addr_pmsav8() caused 6958 + * an exception for NS executing in S&NSC memory. 6959 + */ 6960 + assert(!env->v7m.secure); 6961 + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 6962 + 6963 + /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 6964 + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 6965 + 6966 + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 6967 + return false; 6968 + } 6969 + 6970 + if (!env->thumb) { 6971 + goto gen_invep; 6972 + } 6973 + 6974 + if (insn != 0xe97f) { 6975 + /* Not an SG instruction first half (we choose the IMPDEF 6976 + * early-SG-check option). 6977 + */ 6978 + goto gen_invep; 6979 + } 6980 + 6981 + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 6982 + return false; 6983 + } 6984 + 6985 + if (insn != 0xe97f) { 6986 + /* Not an SG instruction second half (yes, both halves of the SG 6987 + * insn have the same hex value) 6988 + */ 6989 + goto gen_invep; 6990 + } 6991 + 6992 + /* OK, we have confirmed that we really have an SG instruction. 6993 + * We know we're NS in S memory so don't need to repeat those checks. 6994 + */ 6995 + qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 6996 + ", executing it\n", env->regs[15]); 6997 + env->regs[14] &= ~1; 6998 + switch_v7m_security_state(env, true); 6999 + xpsr_write(env, 0, XPSR_IT); 7000 + env->regs[15] += 4; 7001 + return true; 7002 + 7003 + gen_invep: 7004 + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7005 + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7006 + qemu_log_mask(CPU_LOG_INT, 7007 + "...really SecureFault with SFSR.INVEP\n"); 7008 + return false; 7009 + } 7010 + 6739 7011 void arm_v7m_cpu_do_interrupt(CPUState *cs) 6740 7012 { 6741 7013 ARMCPU *cpu = ARM_CPU(cs); ··· 6778 7050 * the SG instruction have the same security attributes.) 6779 7051 * Everything else must generate an INVEP SecureFault, so we 6780 7052 * emulate the SG instruction here. 6781 - * TODO: actually emulate SG. 6782 7053 */ 6783 - env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 6784 - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 6785 - qemu_log_mask(CPU_LOG_INT, 6786 - "...really SecureFault with SFSR.INVEP\n"); 7054 + if (v7m_handle_execute_nsc(cpu)) { 7055 + return; 7056 + } 6787 7057 break; 6788 7058 case M_FAKE_FSR_SFAULT: 6789 7059 /* Various flavours of SecureFault for attempts to execute or ··· 6868 7138 case EXCP_IRQ: 6869 7139 break; 6870 7140 case EXCP_EXCEPTION_EXIT: 6871 - do_v7m_exception_exit(cpu); 6872 - return; 7141 + if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7142 + /* Must be v8M security extension function return */ 7143 + assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7144 + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7145 + if (do_v7m_function_return(cpu)) { 7146 + return; 7147 + } 7148 + } else { 7149 + do_v7m_exception_exit(cpu); 7150 + return; 7151 + } 7152 + break; 6873 7153 default: 6874 7154 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 6875 7155 return; /* Never happens. Keep compiler happy. */
+1
target/arm/helper.h
··· 64 64 DEF_HELPER_2(v7m_mrs, i32, env, i32) 65 65 66 66 DEF_HELPER_2(v7m_bxns, void, env, i32) 67 + DEF_HELPER_2(v7m_blxns, void, env, i32) 67 68 68 69 DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) 69 70 DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
+8
target/arm/internals.h
··· 60 60 FIELD(V7M_CONTROL, NPRIV, 0, 1) 61 61 FIELD(V7M_CONTROL, SPSEL, 1, 1) 62 62 FIELD(V7M_CONTROL, FPCA, 2, 1) 63 + FIELD(V7M_CONTROL, SFPA, 3, 1) 63 64 64 65 /* Bit definitions for v7M exception return payload */ 65 66 FIELD(V7M_EXCRET, ES, 0, 1) ··· 70 71 FIELD(V7M_EXCRET, DCRS, 5, 1) 71 72 FIELD(V7M_EXCRET, S, 6, 1) 72 73 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 74 + 75 + /* Minimum value which is a magic number for exception return */ 76 + #define EXC_RETURN_MIN_MAGIC 0xff000000 77 + /* Minimum number which is a magic number for function or exception return 78 + * when using v8M security extension 79 + */ 80 + #define FNC_RETURN_MIN_MAGIC 0xfefffffe 73 81 74 82 /* We use a few fake FSR values for internal purposes in M profile. 75 83 * M profile cores don't have A/R format FSRs, but currently our
+213 -97
target/arm/translate.c
··· 165 165 case ARMMMUIdx_MPriv: 166 166 case ARMMMUIdx_MNegPri: 167 167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser); 168 + case ARMMMUIdx_MSUser: 169 + case ARMMMUIdx_MSPriv: 170 + case ARMMMUIdx_MSNegPri: 171 + return arm_to_core_mmu_idx(ARMMMUIdx_MSUser); 168 172 case ARMMMUIdx_S2NS: 169 173 default: 170 174 g_assert_not_reached(); ··· 960 964 * s->base.is_jmp that we need to do the rest of the work later. 961 965 */ 962 966 gen_bx(s, var); 963 - if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) { 967 + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) || 968 + (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { 964 969 s->base.is_jmp = DISAS_BX_EXCRET; 965 970 } 966 971 } ··· 969 974 { 970 975 /* Generate the code to finish possible exception return and end the TB */ 971 976 TCGLabel *excret_label = gen_new_label(); 977 + uint32_t min_magic; 978 + 979 + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) { 980 + /* Covers FNC_RETURN and EXC_RETURN magic */ 981 + min_magic = FNC_RETURN_MIN_MAGIC; 982 + } else { 983 + /* EXC_RETURN magic only */ 984 + min_magic = EXC_RETURN_MIN_MAGIC; 985 + } 972 986 973 987 /* Is the new PC value in the magic range indicating exception return? */ 974 - tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label); 988 + tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label); 975 989 /* No: end the TB as we would for a DISAS_JMP */ 976 990 if (is_singlestepping(s)) { 977 991 gen_singlestep_exception(s); ··· 1009 1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. 1010 1024 */ 1011 1025 gen_helper_v7m_bxns(cpu_env, var); 1026 + tcg_temp_free_i32(var); 1027 + s->base.is_jmp = DISAS_EXIT; 1028 + } 1029 + 1030 + static inline void gen_blxns(DisasContext *s, int rm) 1031 + { 1032 + TCGv_i32 var = load_reg(s, rm); 1033 + 1034 + /* We don't need to sync condexec state, for the same reason as bxns. 1035 + * We do however need to set the PC, because the blxns helper reads it. 1036 + * The blxns helper may throw an exception. 1037 + */ 1038 + gen_set_pc_im(s, s->pc); 1039 + gen_helper_v7m_blxns(cpu_env, var); 1012 1040 tcg_temp_free_i32(var); 1013 1041 s->base.is_jmp = DISAS_EXIT; 1014 1042 } ··· 9592 9620 } 9593 9621 } 9594 9622 9623 + static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn) 9624 + { 9625 + /* Return true if this is a 16 bit instruction. We must be precise 9626 + * about this (matching the decode). We assume that s->pc still 9627 + * points to the first 16 bits of the insn. 9628 + */ 9629 + if ((insn >> 11) < 0x1d) { 9630 + /* Definitely a 16-bit instruction */ 9631 + return true; 9632 + } 9633 + 9634 + /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the 9635 + * first half of a 32-bit Thumb insn. Thumb-1 cores might 9636 + * end up actually treating this as two 16-bit insns, though, 9637 + * if it's half of a bl/blx pair that might span a page boundary. 9638 + */ 9639 + if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) { 9640 + /* Thumb2 cores (including all M profile ones) always treat 9641 + * 32-bit insns as 32-bit. 9642 + */ 9643 + return false; 9644 + } 9645 + 9646 + if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) { 9647 + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix 9648 + * is not on the next page; we merge this into a 32-bit 9649 + * insn. 9650 + */ 9651 + return false; 9652 + } 9653 + /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF); 9654 + * 0b1111_1xxx_xxxx_xxxx : BL suffix; 9655 + * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page 9656 + * -- handle as single 16 bit insn 9657 + */ 9658 + return true; 9659 + } 9660 + 9595 9661 /* Return true if this is a Thumb-2 logical op. */ 9596 9662 static int 9597 9663 thumb2_logic_op(int op) ··· 9677 9743 9678 9744 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction 9679 9745 is not legal. */ 9680 - static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) 9746 + static int disas_thumb2_insn(DisasContext *s, uint32_t insn) 9681 9747 { 9682 - uint32_t insn, imm, shift, offset; 9748 + uint32_t imm, shift, offset; 9683 9749 uint32_t rd, rn, rm, rs; 9684 9750 TCGv_i32 tmp; 9685 9751 TCGv_i32 tmp2; ··· 9691 9757 int conds; 9692 9758 int logic_cc; 9693 9759 9694 - if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) 9695 - || arm_dc_feature(s, ARM_FEATURE_M))) { 9696 - /* Thumb-1 cores may need to treat bl and blx as a pair of 9697 - 16-bit instructions to get correct prefetch abort behavior. */ 9698 - insn = insn_hw1; 9699 - if ((insn & (1 << 12)) == 0) { 9700 - ARCH(5); 9701 - /* Second half of blx. */ 9702 - offset = ((insn & 0x7ff) << 1); 9703 - tmp = load_reg(s, 14); 9704 - tcg_gen_addi_i32(tmp, tmp, offset); 9705 - tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); 9706 - 9707 - tmp2 = tcg_temp_new_i32(); 9708 - tcg_gen_movi_i32(tmp2, s->pc | 1); 9709 - store_reg(s, 14, tmp2); 9710 - gen_bx(s, tmp); 9711 - return 0; 9712 - } 9713 - if (insn & (1 << 11)) { 9714 - /* Second half of bl. */ 9715 - offset = ((insn & 0x7ff) << 1) | 1; 9716 - tmp = load_reg(s, 14); 9717 - tcg_gen_addi_i32(tmp, tmp, offset); 9718 - 9719 - tmp2 = tcg_temp_new_i32(); 9720 - tcg_gen_movi_i32(tmp2, s->pc | 1); 9721 - store_reg(s, 14, tmp2); 9722 - gen_bx(s, tmp); 9723 - return 0; 9724 - } 9725 - if ((s->pc & ~TARGET_PAGE_MASK) == 0) { 9726 - /* Instruction spans a page boundary. Implement it as two 9727 - 16-bit instructions in case the second half causes an 9728 - prefetch abort. */ 9729 - offset = ((int32_t)insn << 21) >> 9; 9730 - tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); 9731 - return 0; 9732 - } 9733 - /* Fall through to 32-bit decode. */ 9734 - } 9735 - 9736 - insn = arm_lduw_code(env, s->pc, s->sctlr_b); 9737 - s->pc += 2; 9738 - insn |= (uint32_t)insn_hw1 << 16; 9739 - 9760 + /* The only 32 bit insn that's allowed for Thumb1 is the combined 9761 + * BL/BLX prefix and suffix. 9762 + */ 9740 9763 if ((insn & 0xf800e800) != 0xf000e800) { 9741 9764 ARCH(6T2); 9742 9765 } ··· 9755 9778 * - load/store doubleword, load/store exclusive, ldacq/strel, 9756 9779 * table branch. 9757 9780 */ 9758 - if (insn & 0x01200000) { 9781 + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) && 9782 + arm_dc_feature(s, ARM_FEATURE_V8)) { 9783 + /* 0b1110_1001_0111_1111_1110_1001_0111_111 9784 + * - SG (v8M only) 9785 + * The bulk of the behaviour for this instruction is implemented 9786 + * in v7m_handle_execute_nsc(), which deals with the insn when 9787 + * it is executed by a CPU in non-secure state from memory 9788 + * which is Secure & NonSecure-Callable. 9789 + * Here we only need to handle the remaining cases: 9790 + * * in NS memory (including the "security extension not 9791 + * implemented" case) : NOP 9792 + * * in S memory but CPU already secure (clear IT bits) 9793 + * We know that the attribute for the memory this insn is 9794 + * in must match the current CPU state, because otherwise 9795 + * get_phys_addr_pmsav8 would have generated an exception. 9796 + */ 9797 + if (s->v8m_secure) { 9798 + /* Like the IT insn, we don't need to generate any code */ 9799 + s->condexec_cond = 0; 9800 + s->condexec_mask = 0; 9801 + } 9802 + } else if (insn & 0x01200000) { 9759 9803 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx 9760 9804 * - load/store dual (post-indexed) 9761 9805 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx ··· 11051 11095 return 1; 11052 11096 } 11053 11097 11054 - static void disas_thumb_insn(CPUARMState *env, DisasContext *s) 11098 + static void disas_thumb_insn(DisasContext *s, uint32_t insn) 11055 11099 { 11056 - uint32_t val, insn, op, rm, rn, rd, shift, cond; 11100 + uint32_t val, op, rm, rn, rd, shift, cond; 11057 11101 int32_t offset; 11058 11102 int i; 11059 11103 TCGv_i32 tmp; 11060 11104 TCGv_i32 tmp2; 11061 11105 TCGv_i32 addr; 11062 - 11063 - if (s->condexec_mask) { 11064 - cond = s->condexec_cond; 11065 - if (cond != 0x0e) { /* Skip conditional when condition is AL. */ 11066 - s->condlabel = gen_new_label(); 11067 - arm_gen_test_cc(cond ^ 1, s->condlabel); 11068 - s->condjmp = 1; 11069 - } 11070 - } 11071 - 11072 - insn = arm_lduw_code(env, s->pc, s->sctlr_b); 11073 - s->pc += 2; 11074 11106 11075 11107 switch (insn >> 12) { 11076 11108 case 0: case 1: ··· 11218 11250 goto undef; 11219 11251 } 11220 11252 if (link) { 11221 - /* BLXNS: not yet implemented */ 11222 - goto undef; 11253 + gen_blxns(s, rm); 11223 11254 } else { 11224 11255 gen_bxns(s, rm); 11225 11256 } ··· 11803 11834 11804 11835 case 14: 11805 11836 if (insn & (1 << 11)) { 11806 - if (disas_thumb2_insn(env, s, insn)) 11807 - goto undef32; 11837 + /* thumb_insn_is_16bit() ensures we can't get here for 11838 + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX: 11839 + * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF) 11840 + */ 11841 + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); 11842 + ARCH(5); 11843 + offset = ((insn & 0x7ff) << 1); 11844 + tmp = load_reg(s, 14); 11845 + tcg_gen_addi_i32(tmp, tmp, offset); 11846 + tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); 11847 + 11848 + tmp2 = tcg_temp_new_i32(); 11849 + tcg_gen_movi_i32(tmp2, s->pc | 1); 11850 + store_reg(s, 14, tmp2); 11851 + gen_bx(s, tmp); 11808 11852 break; 11809 11853 } 11810 11854 /* unconditional branch */ ··· 11815 11859 break; 11816 11860 11817 11861 case 15: 11818 - if (disas_thumb2_insn(env, s, insn)) 11819 - goto undef32; 11862 + /* thumb_insn_is_16bit() ensures we can't get here for 11863 + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX. 11864 + */ 11865 + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); 11866 + 11867 + if (insn & (1 << 11)) { 11868 + /* 0b1111_1xxx_xxxx_xxxx : BL suffix */ 11869 + offset = ((insn & 0x7ff) << 1) | 1; 11870 + tmp = load_reg(s, 14); 11871 + tcg_gen_addi_i32(tmp, tmp, offset); 11872 + 11873 + tmp2 = tcg_temp_new_i32(); 11874 + tcg_gen_movi_i32(tmp2, s->pc | 1); 11875 + store_reg(s, 14, tmp2); 11876 + gen_bx(s, tmp); 11877 + } else { 11878 + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */ 11879 + uint32_t uoffset = ((int32_t)insn << 21) >> 9; 11880 + 11881 + tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset); 11882 + } 11820 11883 break; 11821 11884 } 11822 - return; 11823 - undef32: 11824 - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 11825 - default_exception_el(s)); 11826 11885 return; 11827 11886 illegal_op: 11828 11887 undef: ··· 11834 11893 { 11835 11894 /* Return true if the insn at dc->pc might cross a page boundary. 11836 11895 * (False positives are OK, false negatives are not.) 11896 + * We know this is a Thumb insn, and our caller ensures we are 11897 + * only called if dc->pc is less than 4 bytes from the page 11898 + * boundary, so we cross the page if the first 16 bits indicate 11899 + * that this is a 32 bit insn. 11837 11900 */ 11838 - uint16_t insn; 11839 - 11840 - if ((s->pc & 3) == 0) { 11841 - /* At a 4-aligned address we can't be crossing a page */ 11842 - return false; 11843 - } 11844 - 11845 - /* This must be a Thumb insn */ 11846 - insn = arm_lduw_code(env, s->pc, s->sctlr_b); 11901 + uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b); 11847 11902 11848 - if ((insn >> 11) >= 0x1d) { 11849 - /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the 11850 - * First half of a 32-bit Thumb insn. Thumb-1 cores might 11851 - * end up actually treating this as two 16-bit insns (see the 11852 - * code at the start of disas_thumb2_insn()) but we don't bother 11853 - * to check for that as it is unlikely, and false positives here 11854 - * are harmless. 11855 - */ 11856 - return true; 11857 - } 11858 - /* Definitely a 16-bit insn, can't be crossing a page. */ 11859 - return false; 11903 + return !thumb_insn_is_16bit(s, insn); 11860 11904 } 11861 11905 11862 11906 static int arm_tr_init_disas_context(DisasContextBase *dcbase, ··· 12089 12133 in init_disas_context by adjusting max_insns. */ 12090 12134 } 12091 12135 12136 + static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn) 12137 + { 12138 + /* Return true if this Thumb insn is always unconditional, 12139 + * even inside an IT block. This is true of only a very few 12140 + * instructions: BKPT, HLT, and SG. 12141 + * 12142 + * A larger class of instructions are UNPREDICTABLE if used 12143 + * inside an IT block; we do not need to detect those here, because 12144 + * what we do by default (perform the cc check and update the IT 12145 + * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE 12146 + * choice for those situations. 12147 + * 12148 + * insn is either a 16-bit or a 32-bit instruction; the two are 12149 + * distinguishable because for the 16-bit case the top 16 bits 12150 + * are zeroes, and that isn't a valid 32-bit encoding. 12151 + */ 12152 + if ((insn & 0xffffff00) == 0xbe00) { 12153 + /* BKPT */ 12154 + return true; 12155 + } 12156 + 12157 + if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) && 12158 + !arm_dc_feature(s, ARM_FEATURE_M)) { 12159 + /* HLT: v8A only. This is unconditional even when it is going to 12160 + * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3. 12161 + * For v7 cores this was a plain old undefined encoding and so 12162 + * honours its cc check. (We might be using the encoding as 12163 + * a semihosting trap, but we don't change the cc check behaviour 12164 + * on that account, because a debugger connected to a real v7A 12165 + * core and emulating semihosting traps by catching the UNDEF 12166 + * exception would also only see cases where the cc check passed. 12167 + * No guest code should be trying to do a HLT semihosting trap 12168 + * in an IT block anyway. 12169 + */ 12170 + return true; 12171 + } 12172 + 12173 + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) && 12174 + arm_dc_feature(s, ARM_FEATURE_M)) { 12175 + /* SG: v8M only */ 12176 + return true; 12177 + } 12178 + 12179 + return false; 12180 + } 12181 + 12092 12182 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 12093 12183 { 12094 12184 DisasContext *dc = container_of(dcbase, DisasContext, base); 12095 12185 CPUARMState *env = cpu->env_ptr; 12186 + uint32_t insn; 12187 + bool is_16bit; 12096 12188 12097 12189 if (arm_pre_translate_insn(dc)) { 12098 12190 return; 12099 12191 } 12100 12192 12101 - disas_thumb_insn(env, dc); 12193 + insn = arm_lduw_code(env, dc->pc, dc->sctlr_b); 12194 + is_16bit = thumb_insn_is_16bit(dc, insn); 12195 + dc->pc += 2; 12196 + if (!is_16bit) { 12197 + uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b); 12198 + 12199 + insn = insn << 16 | insn2; 12200 + dc->pc += 2; 12201 + } 12202 + 12203 + if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { 12204 + uint32_t cond = dc->condexec_cond; 12205 + 12206 + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ 12207 + dc->condlabel = gen_new_label(); 12208 + arm_gen_test_cc(cond ^ 1, dc->condlabel); 12209 + dc->condjmp = 1; 12210 + } 12211 + } 12212 + 12213 + if (is_16bit) { 12214 + disas_thumb_insn(dc, insn); 12215 + } else { 12216 + disas_thumb2_insn(dc, insn); 12217 + } 12102 12218 12103 12219 /* Advance the Thumb condexec condition. */ 12104 12220 if (dc->condexec_mask) {