qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

accel/tcg: add size paremeter in tlb_fill()

The MC68040 MMU provides the size of the access that
triggers the page fault.

This size is set in the Special Status Word which
is written in the stack frame of the access fault
exception.

So we need the size in m68k_cpu_unassigned_access() and
m68k_cpu_handle_mmu_fault().

To be able to do that, this patch modifies the prototype of
handle_mmu_fault handler, tlb_fill() and probe_write().
do_unassigned_access() already includes a size parameter.

This patch also updates handle_mmu_fault handlers and
tlb_fill() of all targets (only parameter, no code change).

Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20180118193846.24953-2-laurent@vivier.eu>

+129 -121
+8 -5
accel/tcg/cputlb.c
··· 880 880 if (unlikely(env->tlb_table[mmu_idx][index].addr_code != 881 881 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) { 882 882 if (!VICTIM_TLB_HIT(addr_read, addr)) { 883 - tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0); 883 + tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 884 884 } 885 885 } 886 886 iotlbentry = &env->iotlb[mmu_idx][index]; ··· 928 928 * Otherwise the function will return, and there will be a valid 929 929 * entry in the TLB for this access. 930 930 */ 931 - void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, 931 + void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 932 932 uintptr_t retaddr) 933 933 { 934 934 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); ··· 938 938 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 939 939 /* TLB entry is for a different page */ 940 940 if (!VICTIM_TLB_HIT(addr_write, addr)) { 941 - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); 941 + tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 942 + mmu_idx, retaddr); 942 943 } 943 944 } 944 945 } ··· 981 982 if ((addr & TARGET_PAGE_MASK) 982 983 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 983 984 if (!VICTIM_TLB_HIT(addr_write, addr)) { 984 - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); 985 + tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 986 + mmu_idx, retaddr); 985 987 } 986 988 tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; 987 989 } ··· 995 997 996 998 /* Let the guest notice RMW on a write-only page. */ 997 999 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 998 - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); 1000 + tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 1001 + mmu_idx, retaddr); 999 1002 /* Since we don't support reads and writes to different addresses, 1000 1003 and we do have the proper page loaded for write, this shouldn't 1001 1004 ever return. But just in case, handle via stop-the-world. */
+8 -6
accel/tcg/softmmu_template.h
··· 124 124 if ((addr & TARGET_PAGE_MASK) 125 125 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 126 126 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { 127 - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, 127 + tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, 128 128 mmu_idx, retaddr); 129 129 } 130 130 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; ··· 191 191 if ((addr & TARGET_PAGE_MASK) 192 192 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 193 193 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { 194 - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, 194 + tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, 195 195 mmu_idx, retaddr); 196 196 } 197 197 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; ··· 283 283 if ((addr & TARGET_PAGE_MASK) 284 284 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 285 285 if (!VICTIM_TLB_HIT(addr_write, addr)) { 286 - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); 286 + tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, 287 + mmu_idx, retaddr); 287 288 } 288 289 tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK; 289 290 } ··· 316 317 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; 317 318 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) 318 319 && !VICTIM_TLB_HIT(addr_write, page2)) { 319 - tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, 320 + tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, 320 321 mmu_idx, retaddr); 321 322 } 322 323 ··· 359 360 if ((addr & TARGET_PAGE_MASK) 360 361 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 361 362 if (!VICTIM_TLB_HIT(addr_write, addr)) { 362 - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); 363 + tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, 364 + mmu_idx, retaddr); 363 365 } 364 366 tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK; 365 367 } ··· 392 394 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; 393 395 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) 394 396 && !VICTIM_TLB_HIT(addr_write, page2)) { 395 - tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, 397 + tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, 396 398 mmu_idx, retaddr); 397 399 } 398 400
+1 -1
accel/tcg/user-exec.c
··· 149 149 cc = CPU_GET_CLASS(cpu); 150 150 /* see if it is an MMU fault */ 151 151 g_assert(cc->handle_mmu_fault); 152 - ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX); 152 + ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX); 153 153 154 154 if (ret == 0) { 155 155 /* The MMU fault was handled without causing real CPU fault.
+3 -3
include/exec/exec-all.h
··· 253 253 hwaddr paddr, int prot, 254 254 int mmu_idx, target_ulong size); 255 255 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); 256 - void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, 256 + void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 257 257 uintptr_t retaddr); 258 258 #else 259 259 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) ··· 436 436 struct MemoryRegion *iotlb_to_region(CPUState *cpu, 437 437 hwaddr index, MemTxAttrs attrs); 438 438 439 - void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type, 440 - int mmu_idx, uintptr_t retaddr); 439 + void tlb_fill(CPUState *cpu, target_ulong addr, int size, 440 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 441 441 442 442 #endif 443 443
+1 -1
include/qom/cpu.h
··· 174 174 Error **errp); 175 175 void (*set_pc)(CPUState *cpu, vaddr value); 176 176 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); 177 - int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw, 177 + int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw, 178 178 int mmu_index); 179 179 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); 180 180 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
+1 -1
target/alpha/cpu.h
··· 479 479 is returned if the signal was handled by the virtual CPU. */ 480 480 int cpu_alpha_signal_handler(int host_signum, void *pinfo, 481 481 void *puc); 482 - int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 482 + int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 483 483 int mmu_idx); 484 484 void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int); 485 485 void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
+2 -2
target/alpha/helper.c
··· 103 103 } 104 104 105 105 #if defined(CONFIG_USER_ONLY) 106 - int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 106 + int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 107 107 int rw, int mmu_idx) 108 108 { 109 109 AlphaCPU *cpu = ALPHA_CPU(cs); ··· 247 247 return (fail >= 0 ? -1 : phys); 248 248 } 249 249 250 - int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw, 250 + int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw, 251 251 int mmu_idx) 252 252 { 253 253 AlphaCPU *cpu = ALPHA_CPU(cs);
+3 -3
target/alpha/mem_helper.c
··· 69 69 NULL, it means that the function was called in C code (i.e. not 70 70 from generated code or from helper.c) */ 71 71 /* XXX: fix it to restore all registers */ 72 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 73 - int mmu_idx, uintptr_t retaddr) 72 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 73 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 74 74 { 75 75 int ret; 76 76 77 - ret = alpha_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 77 + ret = alpha_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 78 78 if (unlikely(ret != 0)) { 79 79 /* Exception index and error code are already set */ 80 80 cpu_loop_exit_restore(cs, retaddr);
+2 -2
target/arm/cpu.c
··· 1689 1689 }; 1690 1690 1691 1691 #ifdef CONFIG_USER_ONLY 1692 - static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 1693 - int mmu_idx) 1692 + static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 1693 + int rw, int mmu_idx) 1694 1694 { 1695 1695 ARMCPU *cpu = ARM_CPU(cs); 1696 1696 CPUARMState *env = &cpu->env;
+2 -2
target/arm/op_helper.c
··· 172 172 * NULL, it means that the function was called in C code (i.e. not 173 173 * from generated code or from helper.c) 174 174 */ 175 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 176 - int mmu_idx, uintptr_t retaddr) 175 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 176 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 177 177 { 178 178 bool ret; 179 179 ARMMMUFaultInfo fi = {};
+1 -1
target/cris/cpu.h
··· 283 283 return !!(env->pregs[PR_CCS] & U_FLAG); 284 284 } 285 285 286 - int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 286 + int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 287 287 int mmu_idx); 288 288 289 289 /* Support function regs. */
+2 -2
target/cris/helper.c
··· 53 53 cris_cpu_do_interrupt(cs); 54 54 } 55 55 56 - int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 56 + int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 57 57 int mmu_idx) 58 58 { 59 59 CRISCPU *cpu = CRIS_CPU(cs); ··· 76 76 env->pregs[PR_CCS] = ccs; 77 77 } 78 78 79 - int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 79 + int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 80 80 int mmu_idx) 81 81 { 82 82 CRISCPU *cpu = CRIS_CPU(cs);
+3 -3
target/cris/op_helper.c
··· 41 41 /* Try to fill the TLB and return an exception if error. If retaddr is 42 42 NULL, it means that the function was called in C code (i.e. not 43 43 from generated code or from helper.c) */ 44 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 45 - int mmu_idx, uintptr_t retaddr) 44 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 45 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 46 46 { 47 47 CRISCPU *cpu = CRIS_CPU(cs); 48 48 CPUCRISState *env = &cpu->env; ··· 50 50 51 51 D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__, 52 52 env->pc, env->pregs[PR_EDA], (void *)retaddr); 53 - ret = cris_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 53 + ret = cris_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 54 54 if (unlikely(ret)) { 55 55 if (retaddr) { 56 56 /* now we have a real cpu fault */
+2 -1
target/hppa/cpu.h
··· 132 132 #define cpu_signal_handler cpu_hppa_signal_handler 133 133 134 134 int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc); 135 - int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int midx); 135 + int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, 136 + int rw, int midx); 136 137 int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 137 138 int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 138 139 void hppa_cpu_do_interrupt(CPUState *cpu);
+1 -1
target/hppa/helper.c
··· 65 65 env->psw_cb = cb; 66 66 } 67 67 68 - int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 68 + int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 69 69 int rw, int mmu_idx) 70 70 { 71 71 HPPACPU *cpu = HPPA_CPU(cs);
+1 -1
target/hppa/op_helper.c
··· 139 139 /* Nothing is stored, but protection is checked and the 140 140 cacheline is marked dirty. */ 141 141 #ifndef CONFIG_USER_ONLY 142 - probe_write(env, addr, cpu_mmu_index(env, 0), ra); 142 + probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); 143 143 #endif 144 144 break; 145 145 }
+1 -1
target/i386/cpu.h
··· 1504 1504 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); 1505 1505 1506 1506 /* helper.c */ 1507 - int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, 1507 + int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size, 1508 1508 int is_write, int mmu_idx); 1509 1509 void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 1510 1510
+2 -2
target/i386/excp_helper.c
··· 138 138 } 139 139 140 140 #if defined(CONFIG_USER_ONLY) 141 - int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, 141 + int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, 142 142 int is_write, int mmu_idx) 143 143 { 144 144 X86CPU *cpu = X86_CPU(cs); ··· 162 162 * 0 = nothing more to do 163 163 * 1 = generate PF fault 164 164 */ 165 - int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, 165 + int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, 166 166 int is_write1, int mmu_idx) 167 167 { 168 168 X86CPU *cpu = X86_CPU(cs);
+3 -3
target/i386/mem_helper.c
··· 199 199 * from generated code or from helper.c) 200 200 */ 201 201 /* XXX: fix it to restore all registers */ 202 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 203 - int mmu_idx, uintptr_t retaddr) 202 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 203 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 204 204 { 205 205 int ret; 206 206 207 - ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 207 + ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 208 208 if (ret) { 209 209 X86CPU *cpu = X86_CPU(cs); 210 210 CPUX86State *env = &cpu->env;
+1 -1
target/lm32/cpu.h
··· 263 263 #define cpu_list lm32_cpu_list 264 264 #define cpu_signal_handler cpu_lm32_signal_handler 265 265 266 - int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 266 + int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 267 267 int mmu_idx); 268 268 269 269 #include "exec/cpu-all.h"
+1 -1
target/lm32/helper.c
··· 25 25 #include "exec/semihost.h" 26 26 #include "exec/log.h" 27 27 28 - int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 28 + int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 29 29 int mmu_idx) 30 30 { 31 31 LM32CPU *cpu = LM32_CPU(cs);
+3 -3
target/lm32/op_helper.c
··· 144 144 * NULL, it means that the function was called in C code (i.e. not 145 145 * from generated code or from helper.c) 146 146 */ 147 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 148 - int mmu_idx, uintptr_t retaddr) 147 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 148 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 149 149 { 150 150 int ret; 151 151 152 - ret = lm32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 152 + ret = lm32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 153 153 if (unlikely(ret)) { 154 154 /* now we have a real cpu fault */ 155 155 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/m68k/cpu.h
··· 418 418 return (env->sr & SR_S) == 0 ? 1 : 0; 419 419 } 420 420 421 - int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 421 + int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 422 422 int mmu_idx); 423 423 424 424 #include "exec/cpu-all.h"
+2 -2
target/m68k/helper.c
··· 308 308 309 309 #if defined(CONFIG_USER_ONLY) 310 310 311 - int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 311 + int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 312 312 int mmu_idx) 313 313 { 314 314 M68kCPU *cpu = M68K_CPU(cs); ··· 328 328 return addr; 329 329 } 330 330 331 - int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 331 + int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 332 332 int mmu_idx) 333 333 { 334 334 int prot;
+3 -3
target/m68k/op_helper.c
··· 39 39 /* Try to fill the TLB and return an exception if error. If retaddr is 40 40 NULL, it means that the function was called in C code (i.e. not 41 41 from generated code or from helper.c) */ 42 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 43 - int mmu_idx, uintptr_t retaddr) 42 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 43 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 44 44 { 45 45 int ret; 46 46 47 - ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 47 + ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 48 48 if (unlikely(ret)) { 49 49 /* now we have a real cpu fault */ 50 50 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/microblaze/cpu.h
··· 367 367 return MMU_KERNEL_IDX; 368 368 } 369 369 370 - int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 370 + int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 371 371 int mmu_idx); 372 372 373 373 #include "exec/cpu-all.h"
+2 -2
target/microblaze/helper.c
··· 38 38 env->regs[14] = env->sregs[SR_PC]; 39 39 } 40 40 41 - int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 41 + int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 42 42 int mmu_idx) 43 43 { 44 44 cs->exception_index = 0xaa; ··· 48 48 49 49 #else /* !CONFIG_USER_ONLY */ 50 50 51 - int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 51 + int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 52 52 int mmu_idx) 53 53 { 54 54 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+3 -3
target/microblaze/op_helper.c
··· 33 33 * NULL, it means that the function was called in C code (i.e. not 34 34 * from generated code or from helper.c) 35 35 */ 36 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 37 - int mmu_idx, uintptr_t retaddr) 36 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 37 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 38 38 { 39 39 int ret; 40 40 41 - ret = mb_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 41 + ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 42 42 if (unlikely(ret)) { 43 43 /* now we have a real cpu fault */ 44 44 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/mips/helper.c
··· 535 535 } 536 536 #endif 537 537 538 - int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 538 + int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 539 539 int mmu_idx) 540 540 { 541 541 MIPSCPU *cpu = MIPS_CPU(cs);
+1 -1
target/mips/internal.h
··· 202 202 void cpu_mips_stop_count(CPUMIPSState *env); 203 203 204 204 /* helper.c */ 205 - int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 205 + int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 206 206 int mmu_idx); 207 207 208 208 /* op_helper.c */
+5 -5
target/mips/op_helper.c
··· 2451 2451 do_raise_exception_err(env, excp, error_code, retaddr); 2452 2452 } 2453 2453 2454 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 2455 - int mmu_idx, uintptr_t retaddr) 2454 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 2455 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 2456 2456 { 2457 2457 int ret; 2458 2458 2459 - ret = mips_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 2459 + ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 2460 2460 if (ret) { 2461 2461 MIPSCPU *cpu = MIPS_CPU(cs); 2462 2462 CPUMIPSState *env = &cpu->env; ··· 4190 4190 target_ulong page_addr; 4191 4191 if (unlikely(MSA_PAGESPAN(addr))) { 4192 4192 /* first page */ 4193 - probe_write(env, addr, mmu_idx, retaddr); 4193 + probe_write(env, addr, 0, mmu_idx, retaddr); 4194 4194 /* second page */ 4195 4195 page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 4196 - probe_write(env, page_addr, mmu_idx, retaddr); 4196 + probe_write(env, page_addr, 0, mmu_idx, retaddr); 4197 4197 } 4198 4198 #endif 4199 4199 }
+1 -1
target/moxie/cpu.h
··· 142 142 *flags = 0; 143 143 } 144 144 145 - int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, 145 + int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, 146 146 int rw, int mmu_idx); 147 147 148 148 #endif /* MOXIE_CPU_H */
+5 -5
target/moxie/helper.c
··· 29 29 /* Try to fill the TLB and return an exception if error. If retaddr is 30 30 NULL, it means that the function was called in C code (i.e. not 31 31 from generated code or from helper.c) */ 32 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 33 - int mmu_idx, uintptr_t retaddr) 32 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 33 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 34 34 { 35 35 int ret; 36 36 37 - ret = moxie_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 37 + ret = moxie_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 38 38 if (unlikely(ret)) { 39 39 cpu_loop_exit_restore(cs, retaddr); 40 40 } ··· 94 94 cs->exception_index = -1; 95 95 } 96 96 97 - int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 97 + int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 98 98 int rw, int mmu_idx) 99 99 { 100 100 MoxieCPU *cpu = MOXIE_CPU(cs); ··· 107 107 108 108 #else /* !CONFIG_USER_ONLY */ 109 109 110 - int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 110 + int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 111 111 int rw, int mmu_idx) 112 112 { 113 113 MoxieCPU *cpu = MOXIE_CPU(cs);
+1 -1
target/nios2/cpu.h
··· 252 252 MMU_SUPERVISOR_IDX; 253 253 } 254 254 255 - int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, 255 + int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, int size, 256 256 int rw, int mmu_idx); 257 257 258 258 static inline int cpu_interrupts_enabled(CPUNios2State *env)
+4 -2
target/nios2/helper.c
··· 37 37 env->regs[R_EA] = env->regs[R_PC] + 4; 38 38 } 39 39 40 - int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) 40 + int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 41 + int rw, int mmu_idx) 41 42 { 42 43 cs->exception_index = 0xaa; 43 44 /* Page 0x1000 is kuser helper */ ··· 232 233 return 1; 233 234 } 234 235 235 - int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) 236 + int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 237 + int rw, int mmu_idx) 236 238 { 237 239 Nios2CPU *cpu = NIOS2_CPU(cs); 238 240 CPUNios2State *env = &cpu->env;
+3 -3
target/nios2/mmu.c
··· 35 35 #define MMU_LOG(x) 36 36 #endif 37 37 38 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 39 - int mmu_idx, uintptr_t retaddr) 38 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 39 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 40 40 { 41 41 int ret; 42 42 43 - ret = nios2_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 43 + ret = nios2_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 44 44 if (unlikely(ret)) { 45 45 /* now we have a real cpu fault */ 46 46 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/openrisc/cpu.h
··· 356 356 int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 357 357 int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 358 358 void openrisc_translate_init(void); 359 - int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, 359 + int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, 360 360 int rw, int mmu_idx); 361 361 int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc); 362 362
+4 -4
target/openrisc/mmu.c
··· 178 178 } 179 179 180 180 #ifndef CONFIG_USER_ONLY 181 - int openrisc_cpu_handle_mmu_fault(CPUState *cs, 182 - vaddr address, int rw, int mmu_idx) 181 + int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 182 + int rw, int mmu_idx) 183 183 { 184 184 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 185 185 int ret = 0; ··· 202 202 return ret; 203 203 } 204 204 #else 205 - int openrisc_cpu_handle_mmu_fault(CPUState *cs, 206 - vaddr address, int rw, int mmu_idx) 205 + int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 206 + int rw, int mmu_idx) 207 207 { 208 208 OpenRISCCPU *cpu = OPENRISC_CPU(cs); 209 209 int ret = 0;
+3 -3
target/openrisc/mmu_helper.c
··· 25 25 26 26 #ifndef CONFIG_USER_ONLY 27 27 28 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 29 - int mmu_idx, uintptr_t retaddr) 28 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 29 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 30 30 { 31 31 int ret; 32 32 33 - ret = openrisc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 33 + ret = openrisc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 34 34 35 35 if (ret) { 36 36 /* Raise Exception. */
+1 -1
target/ppc/cpu.h
··· 1308 1308 int cpu_ppc_signal_handler (int host_signum, void *pinfo, 1309 1309 void *puc); 1310 1310 #if defined(CONFIG_USER_ONLY) 1311 - int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 1311 + int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 1312 1312 int mmu_idx); 1313 1313 #endif 1314 1314
+2 -2
target/ppc/mmu_helper.c
··· 2925 2925 NULL, it means that the function was called in C code (i.e. not 2926 2926 from generated code or from helper.c) */ 2927 2927 /* XXX: fix it to restore all registers */ 2928 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 2929 - int mmu_idx, uintptr_t retaddr) 2928 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 2929 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 2930 2930 { 2931 2931 PowerPCCPU *cpu = POWERPC_CPU(cs); 2932 2932 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+1 -1
target/ppc/user_only_helper.c
··· 21 21 #include "qemu/osdep.h" 22 22 #include "cpu.h" 23 23 24 - int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 24 + int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 25 25 int mmu_idx) 26 26 { 27 27 PowerPCCPU *cpu = POWERPC_CPU(cs);
+2 -2
target/s390x/excp_helper.c
··· 55 55 cs->exception_index = -1; 56 56 } 57 57 58 - int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 58 + int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 59 59 int rw, int mmu_idx) 60 60 { 61 61 S390CPU *cpu = S390_CPU(cs); ··· 83 83 } 84 84 } 85 85 86 - int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, 86 + int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size, 87 87 int rw, int mmu_idx) 88 88 { 89 89 S390CPU *cpu = S390_CPU(cs);
+1 -1
target/s390x/internal.h
··· 323 323 void s390x_cpu_debug_excp_handler(CPUState *cs); 324 324 void s390_cpu_do_interrupt(CPUState *cpu); 325 325 bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); 326 - int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 326 + int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 327 327 int mmu_idx); 328 328 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 329 329 MMUAccessType access_type,
+4 -4
target/s390x/mem_helper.c
··· 39 39 NULL, it means that the function was called in C code (i.e. not 40 40 from generated code or from helper.c) */ 41 41 /* XXX: fix it to restore all registers */ 42 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 43 - int mmu_idx, uintptr_t retaddr) 42 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 43 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 44 44 { 45 - int ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 45 + int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 46 46 if (unlikely(ret != 0)) { 47 47 cpu_loop_exit_restore(cs, retaddr); 48 48 } ··· 1440 1440 1441 1441 /* Sanity check writability of the store address. */ 1442 1442 #ifndef CONFIG_USER_ONLY 1443 - probe_write(env, a2, mem_idx, ra); 1443 + probe_write(env, a2, 0, mem_idx, ra); 1444 1444 #endif 1445 1445 1446 1446 /* Note that the compare-and-swap is atomic, and the store is atomic, but
+1 -1
target/sh4/cpu.h
··· 246 246 void sh4_translate_init(void); 247 247 int cpu_sh4_signal_handler(int host_signum, void *pinfo, 248 248 void *puc); 249 - int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 249 + int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 250 250 int mmu_idx); 251 251 252 252 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+2 -2
target/sh4/helper.c
··· 34 34 cs->exception_index = -1; 35 35 } 36 36 37 - int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 37 + int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 38 38 int mmu_idx) 39 39 { 40 40 SuperHCPU *cpu = SUPERH_CPU(cs); ··· 458 458 return get_mmu_address(env, physical, prot, address, rw, access_type); 459 459 } 460 460 461 - int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 461 + int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 462 462 int mmu_idx) 463 463 { 464 464 SuperHCPU *cpu = SUPERH_CPU(cs);
+3 -3
target/sh4/op_helper.c
··· 40 40 cpu_loop_exit_restore(cs, retaddr); 41 41 } 42 42 43 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 44 - int mmu_idx, uintptr_t retaddr) 43 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 44 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 45 45 { 46 46 int ret; 47 47 48 - ret = superh_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 48 + ret = superh_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 49 49 if (ret) { 50 50 /* now we have a real cpu fault */ 51 51 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/sparc/cpu.h
··· 582 582 void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); 583 583 void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf); 584 584 /* mmu_helper.c */ 585 - int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 585 + int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 586 586 int mmu_idx); 587 587 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev); 588 588 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);
+3 -3
target/sparc/ldst_helper.c
··· 1929 1929 NULL, it means that the function was called in C code (i.e. not 1930 1930 from generated code or from helper.c) */ 1931 1931 /* XXX: fix it to restore all registers */ 1932 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 1933 - int mmu_idx, uintptr_t retaddr) 1932 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 1933 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1934 1934 { 1935 1935 int ret; 1936 1936 1937 - ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 1937 + ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 1938 1938 if (ret) { 1939 1939 cpu_loop_exit_restore(cs, retaddr); 1940 1940 }
+3 -3
target/sparc/mmu_helper.c
··· 27 27 28 28 #if defined(CONFIG_USER_ONLY) 29 29 30 - int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 30 + int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 31 31 int mmu_idx) 32 32 { 33 33 SPARCCPU *cpu = SPARC_CPU(cs); ··· 208 208 } 209 209 210 210 /* Perform address translation */ 211 - int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 211 + int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 212 212 int mmu_idx) 213 213 { 214 214 SPARCCPU *cpu = SPARC_CPU(cs); ··· 713 713 } 714 714 715 715 /* Perform address translation */ 716 - int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 716 + int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 717 717 int mmu_idx) 718 718 { 719 719 SPARCCPU *cpu = SPARC_CPU(cs);
+2 -2
target/tilegx/cpu.c
··· 112 112 cs->exception_index = -1; 113 113 } 114 114 115 - static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 116 - int mmu_idx) 115 + static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 116 + int rw, int mmu_idx) 117 117 { 118 118 TileGXCPU *cpu = TILEGX_CPU(cs); 119 119
+2 -2
target/tricore/op_helper.c
··· 2806 2806 cpu_loop_exit_restore(cs, pc); 2807 2807 } 2808 2808 2809 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 2810 - int mmu_idx, uintptr_t retaddr) 2809 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 2810 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 2811 2811 { 2812 2812 int ret; 2813 2813 ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+1 -1
target/unicore32/cpu.h
··· 181 181 } 182 182 } 183 183 184 - int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, 184 + int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 185 185 int mmu_idx); 186 186 void uc32_translate_init(void); 187 187 void switch_mode(CPUUniCore32State *, int);
+1 -1
target/unicore32/helper.c
··· 230 230 cpu_abort(cs, "NO interrupt in user mode\n"); 231 231 } 232 232 233 - int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 233 + int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 234 234 int access_type, int mmu_idx) 235 235 { 236 236 cpu_abort(cs, "NO mmu fault in user mode\n");
+3 -3
target/unicore32/op_helper.c
··· 244 244 } 245 245 246 246 #ifndef CONFIG_USER_ONLY 247 - void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 248 - int mmu_idx, uintptr_t retaddr) 247 + void tlb_fill(CPUState *cs, target_ulong addr, int size, 248 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 249 249 { 250 250 int ret; 251 251 252 - ret = uc32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 252 + ret = uc32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 253 253 if (unlikely(ret)) { 254 254 /* now we have a real cpu fault */ 255 255 cpu_loop_exit_restore(cs, retaddr);
+1 -1
target/unicore32/softmmu.c
··· 215 215 return code; 216 216 } 217 217 218 - int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 218 + int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 219 219 int access_type, int mmu_idx) 220 220 { 221 221 UniCore32CPU *cpu = UNICORE32_CPU(cs);
+2 -2
target/xtensa/op_helper.c
··· 50 50 } 51 51 } 52 52 53 - void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type, 54 - int mmu_idx, uintptr_t retaddr) 53 + void tlb_fill(CPUState *cs, target_ulong vaddr, int size, 54 + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 55 55 { 56 56 XtensaCPU *cpu = XTENSA_CPU(cs); 57 57 CPUXtensaState *env = &cpu->env;