qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/arm: Add MTE bits to tb_flags

Cache the composite ATA setting.

Cache when MTE is fully enabled, i.e. access to tags are enabled
and tag checks affect the PE. Do this for both the normal context
and the UNPRIV context.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-9-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

authored by

Richard Henderson and committed by
Peter Maydell
81ae05fa 4b779ceb

+75 -4
+8 -4
target/arm/cpu.h
··· 3187 3187 * | | | TBFLAG_A32 | | 3188 3188 * | | +-----+----------+ TBFLAG_AM32 | 3189 3189 * | TBFLAG_ANY | |TBFLAG_M32| | 3190 - * | | +-+----------+--------------| 3191 - * | | | TBFLAG_A64 | 3192 - * +--------------+---------+---------------------------+ 3193 - * 31 20 15 0 3190 + * | +-----------+----------+--------------| 3191 + * | | TBFLAG_A64 | 3192 + * +--------------+-------------------------------------+ 3193 + * 31 20 0 3194 3194 * 3195 3195 * Unless otherwise noted, these bits are cached in env->hflags. 3196 3196 */ ··· 3257 3257 FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ 3258 3258 FIELD(TBFLAG_A64, TBID, 12, 2) 3259 3259 FIELD(TBFLAG_A64, UNPRIV, 14, 1) 3260 + FIELD(TBFLAG_A64, ATA, 15, 1) 3261 + FIELD(TBFLAG_A64, TCMA, 16, 2) 3262 + FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1) 3263 + FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) 3260 3264 3261 3265 /** 3262 3266 * cpu_mmu_index:
+40
target/arm/helper.c
··· 10655 10655 } 10656 10656 } 10657 10657 10658 + static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) 10659 + { 10660 + if (regime_has_2_ranges(mmu_idx)) { 10661 + return extract64(tcr, 57, 2); 10662 + } else { 10663 + /* Replicate the single TCMA bit so we always have 2 bits. */ 10664 + return extract32(tcr, 30, 1) * 3; 10665 + } 10666 + } 10667 + 10658 10668 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10659 10669 ARMMMUIdx mmu_idx, bool data) 10660 10670 { ··· 12677 12687 default: 12678 12688 break; 12679 12689 } 12690 + } 12691 + 12692 + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { 12693 + /* 12694 + * Set MTE_ACTIVE if any access may be Checked, and leave clear 12695 + * if all accesses must be Unchecked: 12696 + * 1) If no TBI, then there are no tags in the address to check, 12697 + * 2) If Tag Check Override, then all accesses are Unchecked, 12698 + * 3) If Tag Check Fail == 0, then Checked access have no effect, 12699 + * 4) If no Allocation Tag Access, then all accesses are Unchecked. 12700 + */ 12701 + if (allocation_tag_access_enabled(env, el, sctlr)) { 12702 + flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1); 12703 + if (tbid 12704 + && !(env->pstate & PSTATE_TCO) 12705 + && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { 12706 + flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1); 12707 + } 12708 + } 12709 + /* And again for unprivileged accesses, if required. */ 12710 + if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV) 12711 + && tbid 12712 + && !(env->pstate & PSTATE_TCO) 12713 + && (sctlr & SCTLR_TCF0) 12714 + && allocation_tag_access_enabled(env, 0, sctlr)) { 12715 + flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1); 12716 + } 12717 + /* Cache TCMA as well as TBI. */ 12718 + flags = FIELD_DP32(flags, TBFLAG_A64, TCMA, 12719 + aa64_va_parameter_tcma(tcr, mmu_idx)); 12680 12720 } 12681 12721 12682 12722 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+18
target/arm/internals.h
··· 1198 1198 return target_el; 1199 1199 } 1200 1200 1201 + /* Determine if allocation tags are available. */ 1202 + static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1203 + uint64_t sctlr) 1204 + { 1205 + if (el < 3 1206 + && arm_feature(env, ARM_FEATURE_EL3) 1207 + && !(env->cp15.scr_el3 & SCR_ATA)) { 1208 + return false; 1209 + } 1210 + if (el < 2 1211 + && arm_feature(env, ARM_FEATURE_EL2) 1212 + && !(arm_hcr_el2_eff(env) & HCR_ATA)) { 1213 + return false; 1214 + } 1215 + sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1216 + return sctlr != 0; 1217 + } 1218 + 1201 1219 #ifndef CONFIG_USER_ONLY 1202 1220 1203 1221 /* Security attributes for an address, as returned by v8m_security_lookup. */
+4
target/arm/translate-a64.c
··· 14171 14171 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); 14172 14172 dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII); 14173 14173 dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID); 14174 + dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA); 14174 14175 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); 14175 14176 #if !defined(CONFIG_USER_ONLY) 14176 14177 dc->user = (dc->current_el == 0); ··· 14182 14183 dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT); 14183 14184 dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE); 14184 14185 dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV); 14186 + dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA); 14187 + dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE); 14188 + dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE); 14185 14189 dc->vec_len = 0; 14186 14190 dc->vec_stride = 0; 14187 14191 dc->cp_regs = arm_cpu->cp_regs;
+5
target/arm/translate.h
··· 30 30 ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ 31 31 uint8_t tbii; /* TBI1|TBI0 for insns */ 32 32 uint8_t tbid; /* TBI1|TBI0 for data */ 33 + uint8_t tcma; /* TCMA1|TCMA0 for MTE */ 33 34 bool ns; /* Use non-secure CPREG bank on access */ 34 35 int fp_excp_el; /* FP exception EL or 0 if enabled */ 35 36 int sve_excp_el; /* SVE exception EL or 0 if enabled */ ··· 77 78 bool unpriv; 78 79 /* True if v8.3-PAuth is active. */ 79 80 bool pauth_active; 81 + /* True if v8.5-MTE access to tags is enabled. */ 82 + bool ata; 83 + /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ 84 + bool mte_active[2]; 80 85 /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ 81 86 bool bt; 82 87 /* True if any CP15 access is trapped by HSTR_EL2 */