qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/arm: Always pass cacheattr to get_phys_addr

We need to check the memattr of a page in order to determine
whether it is Tagged for MTE. Between Stage1 and Stage2,
this becomes simpler if we always collect this data, instead
of occasionally being presented with NULL.

Use the nonnull attribute to allow the compiler to check that
all pointer arguments are non-null.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-42-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

authored by

Richard Henderson and committed by
Peter Maydell
7e98e21c 34669338

+42 -36
+30 -30
target/arm/helper.c
··· 44 44 bool s1_is_el0, 45 45 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 46 46 target_ulong *page_size_ptr, 47 - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 47 + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 48 + __attribute__((nonnull)); 48 49 #endif 49 50 50 51 static void switch_mode(CPUARMState *env, int mode); ··· 11101 11102 arm_tlb_bti_gp(txattrs) = true; 11102 11103 } 11103 11104 11104 - if (cacheattrs != NULL) { 11105 - if (mmu_idx == ARMMMUIdx_Stage2) { 11106 - cacheattrs->attrs = convert_stage2_attrs(env, 11107 - extract32(attrs, 0, 4)); 11108 - } else { 11109 - /* Index into MAIR registers for cache attributes */ 11110 - uint8_t attrindx = extract32(attrs, 0, 3); 11111 - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 11112 - assert(attrindx <= 7); 11113 - cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 11114 - } 11115 - cacheattrs->shareability = extract32(attrs, 6, 2); 11105 + if (mmu_idx == ARMMMUIdx_Stage2) { 11106 + cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); 11107 + } else { 11108 + /* Index into MAIR registers for cache attributes */ 11109 + uint8_t attrindx = extract32(attrs, 0, 3); 11110 + uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 11111 + assert(attrindx <= 7); 11112 + cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 11116 11113 } 11114 + cacheattrs->shareability = extract32(attrs, 6, 2); 11117 11115 11118 11116 *phys_ptr = descaddr; 11119 11117 *page_size_ptr = page_size; ··· 11948 11946 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, 11949 11947 mmu_idx == ARMMMUIdx_E10_0, 11950 11948 phys_ptr, attrs, &s2_prot, 11951 - page_size, fi, 11952 - cacheattrs != NULL ? &cacheattrs2 : NULL); 11949 + page_size, fi, &cacheattrs2); 11953 11950 fi->s2addr = ipa; 11954 11951 /* Combine the S1 and S2 perms. */ 11955 11952 *prot &= s2_prot; 11956 11953 11957 - /* Combine the S1 and S2 cache attributes, if needed */ 11958 - if (!ret && cacheattrs != NULL) { 11959 - if (env->cp15.hcr_el2 & HCR_DC) { 11960 - /* 11961 - * HCR.DC forces the first stage attributes to 11962 - * Normal Non-Shareable, 11963 - * Inner Write-Back Read-Allocate Write-Allocate, 11964 - * Outer Write-Back Read-Allocate Write-Allocate. 11965 - */ 11966 - cacheattrs->attrs = 0xff; 11967 - cacheattrs->shareability = 0; 11968 - } 11969 - *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11954 + /* If S2 fails, return early. */ 11955 + if (ret) { 11956 + return ret; 11970 11957 } 11971 11958 11972 - return ret; 11959 + /* Combine the S1 and S2 cache attributes. */ 11960 + if (env->cp15.hcr_el2 & HCR_DC) { 11961 + /* 11962 + * HCR.DC forces the first stage attributes to 11963 + * Normal Non-Shareable, 11964 + * Inner Write-Back Read-Allocate Write-Allocate, 11965 + * Outer Write-Back Read-Allocate Write-Allocate. 11966 + */ 11967 + cacheattrs->attrs = 0xff; 11968 + cacheattrs->shareability = 0; 11969 + } 11970 + *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11971 + return 0; 11973 11972 } else { 11974 11973 /* 11975 11974 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. ··· 12094 12093 bool ret; 12095 12094 ARMMMUFaultInfo fi = {}; 12096 12095 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 12096 + ARMCacheAttrs cacheattrs = {}; 12097 12097 12098 12098 *attrs = (MemTxAttrs) {}; 12099 12099 12100 12100 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 12101 - attrs, &prot, &page_size, &fi, NULL); 12101 + attrs, &prot, &page_size, &fi, &cacheattrs); 12102 12102 12103 12103 if (ret) { 12104 12104 return -1;
+2 -1
target/arm/internals.h
··· 1294 1294 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1295 1295 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 1296 1296 target_ulong *page_size, 1297 - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 1297 + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 1298 + __attribute__((nonnull)); 1298 1299 1299 1300 void arm_log_exception(int idx); 1300 1301
+7 -4
target/arm/m_helper.c
··· 187 187 hwaddr physaddr; 188 188 int prot; 189 189 ARMMMUFaultInfo fi = {}; 190 + ARMCacheAttrs cacheattrs = {}; 190 191 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 191 192 int exc; 192 193 bool exc_secure; 193 194 194 195 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 195 - &attrs, &prot, &page_size, &fi, NULL)) { 196 + &attrs, &prot, &page_size, &fi, &cacheattrs)) { 196 197 /* MPU/SAU lookup failed */ 197 198 if (fi.type == ARMFault_QEMU_SFault) { 198 199 if (mode == STACK_LAZYFP) { ··· 279 280 hwaddr physaddr; 280 281 int prot; 281 282 ARMMMUFaultInfo fi = {}; 283 + ARMCacheAttrs cacheattrs = {}; 282 284 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 283 285 int exc; 284 286 bool exc_secure; 285 287 uint32_t value; 286 288 287 289 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 288 - &attrs, &prot, &page_size, &fi, NULL)) { 290 + &attrs, &prot, &page_size, &fi, &cacheattrs)) { 289 291 /* MPU/SAU lookup failed */ 290 292 if (fi.type == ARMFault_QEMU_SFault) { 291 293 qemu_log_mask(CPU_LOG_INT, ··· 1928 1930 V8M_SAttributes sattrs = {}; 1929 1931 MemTxAttrs attrs = {}; 1930 1932 ARMMMUFaultInfo fi = {}; 1933 + ARMCacheAttrs cacheattrs = {}; 1931 1934 MemTxResult txres; 1932 1935 target_ulong page_size; 1933 1936 hwaddr physaddr; ··· 1945 1948 "...really SecureFault with SFSR.INVEP\n"); 1946 1949 return false; 1947 1950 } 1948 - if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 1949 - &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 1951 + if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr, 1952 + &attrs, &prot, &page_size, &fi, &cacheattrs)) { 1950 1953 /* the MPU lookup failed */ 1951 1954 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 1952 1955 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
+3 -1
target/arm/tlb_helper.c
··· 166 166 int prot, ret; 167 167 MemTxAttrs attrs = {}; 168 168 ARMMMUFaultInfo fi = {}; 169 + ARMCacheAttrs cacheattrs = {}; 169 170 170 171 /* 171 172 * Walk the page table and (if the mapping exists) add the page ··· 175 176 */ 176 177 ret = get_phys_addr(&cpu->env, address, access_type, 177 178 core_to_arm_mmu_idx(&cpu->env, mmu_idx), 178 - &phys_addr, &attrs, &prot, &page_size, &fi, NULL); 179 + &phys_addr, &attrs, &prot, &page_size, 180 + &fi, &cacheattrs); 179 181 if (likely(!ret)) { 180 182 /* 181 183 * Map a single [sub]page. Regions smaller than our declared