qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180411' into staging

Handle read-modify-write i/o with icount

# gpg: Signature made Wed 11 Apr 2018 00:07:23 BST
# gpg: using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20180411:
icount: fix cpu_restore_state_from_tb for non-tb-exit cases

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+45 -45
+5 -5
accel/tcg/cpu-exec-common.c
··· 27 27 /* exit the current TB, but without causing any exception to be raised */ 28 28 void cpu_loop_exit_noexc(CPUState *cpu) 29 29 { 30 - /* XXX: restore cpu registers saved in host registers */ 31 - 32 30 cpu->exception_index = -1; 33 - siglongjmp(cpu->jmp_env, 1); 31 + cpu_loop_exit(cpu); 34 32 } 35 33 36 34 #if defined(CONFIG_SOFTMMU) ··· 65 63 66 64 void cpu_loop_exit(CPUState *cpu) 67 65 { 66 + /* Undo the setting in cpu_tb_exec. */ 67 + cpu->can_do_io = 1; 68 68 siglongjmp(cpu->jmp_env, 1); 69 69 } 70 70 71 71 void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) 72 72 { 73 73 if (pc) { 74 - cpu_restore_state(cpu, pc); 74 + cpu_restore_state(cpu, pc, true); 75 75 } 76 - siglongjmp(cpu->jmp_env, 1); 76 + cpu_loop_exit(cpu); 77 77 } 78 78 79 79 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
-1
accel/tcg/cpu-exec.c
··· 704 704 g_assert(cpu == current_cpu); 705 705 g_assert(cc == CPU_GET_CLASS(cpu)); 706 706 #endif /* buggy compiler */ 707 - cpu->can_do_io = 1; 708 707 tb_lock_reset(); 709 708 if (qemu_mutex_iothread_locked()) { 710 709 qemu_mutex_unlock_iothread();
+14 -13
accel/tcg/translate-all.c
··· 299 299 300 300 /* The cpu state corresponding to 'searched_pc' is restored. 301 301 * Called with tb_lock held. 302 + * When reset_icount is true, current TB will be interrupted and 303 + * icount should be recalculated. 302 304 */ 303 305 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 304 - uintptr_t searched_pc) 306 + uintptr_t searched_pc, bool reset_icount) 305 307 { 306 308 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 307 309 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; ··· 333 335 return -1; 334 336 335 337 found: 336 - if (tb->cflags & CF_USE_ICOUNT) { 338 + if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) { 337 339 assert(use_icount); 338 - /* Reset the cycle counter to the start of the block. */ 339 - cpu->icount_decr.u16.low += num_insns; 340 - /* Clear the IO flag. */ 341 - cpu->can_do_io = 0; 340 + /* Reset the cycle counter to the start of the block 341 + and shift if to the number of actually executed instructions */ 342 + cpu->icount_decr.u16.low += num_insns - i; 342 343 } 343 - cpu->icount_decr.u16.low -= i; 344 344 restore_state_to_opc(env, tb, data); 345 345 346 346 #ifdef CONFIG_PROFILER ··· 351 351 return 0; 352 352 } 353 353 354 - bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) 354 + bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) 355 355 { 356 356 TranslationBlock *tb; 357 357 bool r = false; ··· 377 377 tb_lock(); 378 378 tb = tb_find_pc(host_pc); 379 379 if (tb) { 380 - cpu_restore_state_from_tb(cpu, tb, host_pc); 380 + cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); 381 381 if (tb->cflags & CF_NOCACHE) { 382 382 /* one-shot translation, invalidate it immediately */ 383 383 tb_phys_invalidate(tb, -1); ··· 1511 1511 restore the CPU state */ 1512 1512 1513 1513 current_tb_modified = 1; 1514 - cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); 1514 + cpu_restore_state_from_tb(cpu, current_tb, 1515 + cpu->mem_io_pc, true); 1515 1516 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, 1516 1517 &current_flags); 1517 1518 } ··· 1634 1635 restore the CPU state */ 1635 1636 1636 1637 current_tb_modified = 1; 1637 - cpu_restore_state_from_tb(cpu, current_tb, pc); 1638 + cpu_restore_state_from_tb(cpu, current_tb, pc, true); 1638 1639 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, 1639 1640 &current_flags); 1640 1641 } ··· 1700 1701 tb = tb_find_pc(cpu->mem_io_pc); 1701 1702 if (tb) { 1702 1703 /* We can use retranslation to find the PC. */ 1703 - cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); 1704 + cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true); 1704 1705 tb_phys_invalidate(tb, -1); 1705 1706 } else { 1706 1707 /* The exception probably happened in a helper. The CPU state should ··· 1736 1737 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 1737 1738 (void *)retaddr); 1738 1739 } 1739 - cpu_restore_state_from_tb(cpu, tb, retaddr); 1740 + cpu_restore_state_from_tb(cpu, tb, retaddr, true); 1740 1741 1741 1742 /* On MIPS and SH, delay slot instructions can only be restarted if 1742 1743 they were already the first instruction in the TB. If this is not
+1 -1
accel/tcg/user-exec.c
··· 168 168 } 169 169 170 170 /* Now we have a real cpu fault. */ 171 - cpu_restore_state(cpu, pc); 171 + cpu_restore_state(cpu, pc, true); 172 172 173 173 sigprocmask(SIG_SETMASK, old_set, NULL); 174 174 cpu_loop_exit(cpu);
+1 -2
hw/misc/mips_itu.c
··· 174 174 static void QEMU_NORETURN block_thread_and_exit(ITCStorageCell *c) 175 175 { 176 176 c->blocked_threads |= 1ULL << current_cpu->cpu_index; 177 - cpu_restore_state(current_cpu, current_cpu->mem_io_pc); 178 177 current_cpu->halted = 1; 179 178 current_cpu->exception_index = EXCP_HLT; 180 - cpu_loop_exit(current_cpu); 179 + cpu_loop_exit_restore(current_cpu, current_cpu->mem_io_pc); 181 180 } 182 181 183 182 /* ITC Bypass View */
+4 -1
include/exec/exec-all.h
··· 50 50 * cpu_restore_state: 51 51 * @cpu: the vCPU state is to be restore to 52 52 * @searched_pc: the host PC the fault occurred at 53 + * @will_exit: true if the TB executed will be interrupted after some 54 + cpu adjustments. Required for maintaining the correct 55 + icount valus 53 56 * @return: true if state was restored, false otherwise 54 57 * 55 58 * Attempt to restore the state for a fault occurring in translated 56 59 * code. If the searched_pc is not in translated code no state is 57 60 * restored and the function returns false. 58 61 */ 59 - bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); 62 + bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 60 63 61 64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 62 65 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+1 -1
target/alpha/helper.c
··· 482 482 cs->exception_index = excp; 483 483 env->error_code = error; 484 484 if (retaddr) { 485 - cpu_restore_state(cs, retaddr); 485 + cpu_restore_state(cs, retaddr, true); 486 486 /* Floating-point exceptions (our only users) point to the next PC. */ 487 487 env->pc += 4; 488 488 }
+2 -4
target/alpha/mem_helper.c
··· 34 34 uint64_t pc; 35 35 uint32_t insn; 36 36 37 - cpu_restore_state(cs, retaddr); 37 + cpu_restore_state(cs, retaddr, true); 38 38 39 39 pc = env->pc; 40 40 insn = cpu_ldl_code(env, pc); ··· 56 56 AlphaCPU *cpu = ALPHA_CPU(cs); 57 57 CPUAlphaState *env = &cpu->env; 58 58 59 - cpu_restore_state(cs, retaddr); 60 - 61 59 env->trap_arg0 = addr; 62 60 env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0; 63 61 cs->exception_index = EXCP_MCHK; 64 62 env->error_code = 0; 65 - cpu_loop_exit(cs); 63 + cpu_loop_exit_restore(cs, retaddr); 66 64 } 67 65 68 66 /* try to fill the TLB and return an exception if error. If retaddr is
+3 -3
target/arm/op_helper.c
··· 180 180 ARMCPU *cpu = ARM_CPU(cs); 181 181 182 182 /* now we have a real cpu fault */ 183 - cpu_restore_state(cs, retaddr); 183 + cpu_restore_state(cs, retaddr, true); 184 184 185 185 deliver_fault(cpu, addr, access_type, mmu_idx, &fi); 186 186 } ··· 195 195 ARMMMUFaultInfo fi = {}; 196 196 197 197 /* now we have a real cpu fault */ 198 - cpu_restore_state(cs, retaddr); 198 + cpu_restore_state(cs, retaddr, true); 199 199 200 200 fi.type = ARMFault_Alignment; 201 201 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); ··· 215 215 ARMMMUFaultInfo fi = {}; 216 216 217 217 /* now we have a real cpu fault */ 218 - cpu_restore_state(cs, retaddr); 218 + cpu_restore_state(cs, retaddr, true); 219 219 220 220 fi.ea = arm_extabort_type(response); 221 221 fi.type = ARMFault_SyncExternal;
+2 -2
target/cris/op_helper.c
··· 54 54 if (unlikely(ret)) { 55 55 if (retaddr) { 56 56 /* now we have a real cpu fault */ 57 - if (cpu_restore_state(cs, retaddr)) { 58 - /* Evaluate flags after retranslation. */ 57 + if (cpu_restore_state(cs, retaddr, true)) { 58 + /* Evaluate flags after retranslation. */ 59 59 helper_top_evaluate_flags(env); 60 60 } 61 61 }
+1 -1
target/i386/helper.c
··· 991 991 992 992 cpu_interrupt(cs, CPU_INTERRUPT_TPR); 993 993 } else if (tcg_enabled()) { 994 - cpu_restore_state(cs, cs->mem_io_pc); 994 + cpu_restore_state(cs, cs->mem_io_pc, false); 995 995 996 996 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); 997 997 }
+1 -1
target/i386/svm_helper.c
··· 584 584 { 585 585 CPUState *cs = CPU(x86_env_get_cpu(env)); 586 586 587 - cpu_restore_state(cs, retaddr); 587 + cpu_restore_state(cs, retaddr, true); 588 588 589 589 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" 590 590 PRIx64 ", " TARGET_FMT_lx ")!\n",
+2 -2
target/m68k/op_helper.c
··· 1056 1056 CPUState *cs = CPU(m68k_env_get_cpu(env)); 1057 1057 1058 1058 /* Recover PC and CC_OP for the beginning of the insn. */ 1059 - cpu_restore_state(cs, GETPC()); 1059 + cpu_restore_state(cs, GETPC(), true); 1060 1060 1061 1061 /* flags have been modified by gen_flush_flags() */ 1062 1062 env->cc_op = CC_OP_FLAGS; ··· 1087 1087 CPUState *cs = CPU(m68k_env_get_cpu(env)); 1088 1088 1089 1089 /* Recover PC and CC_OP for the beginning of the insn. */ 1090 - cpu_restore_state(cs, GETPC()); 1090 + cpu_restore_state(cs, GETPC(), true); 1091 1091 1092 1092 /* flags have been modified by gen_flush_flags() */ 1093 1093 env->cc_op = CC_OP_FLAGS;
+1 -1
target/moxie/helper.c
··· 48 48 /* Stash the exception type. */ 49 49 env->sregs[2] = ex; 50 50 /* Stash the address where the exception occurred. */ 51 - cpu_restore_state(cs, GETPC()); 51 + cpu_restore_state(cs, GETPC(), true); 52 52 env->sregs[5] = env->pc; 53 53 /* Jump to the exception handline routine. */ 54 54 env->pc = env->sregs[1];
+4 -4
target/openrisc/sys_helper.c
··· 46 46 break; 47 47 48 48 case TO_SPR(0, 16): /* NPC */ 49 - cpu_restore_state(cs, GETPC()); 49 + cpu_restore_state(cs, GETPC(), true); 50 50 /* ??? Mirror or1ksim in not trashing delayed branch state 51 51 when "jumping" to the current instruction. */ 52 52 if (env->pc != rb) { ··· 146 146 case TO_SPR(8, 0): /* PMR */ 147 147 env->pmr = rb; 148 148 if (env->pmr & PMR_DME || env->pmr & PMR_SME) { 149 - cpu_restore_state(cs, GETPC()); 149 + cpu_restore_state(cs, GETPC(), true); 150 150 env->pc += 4; 151 151 cs->halted = 1; 152 152 raise_exception(cpu, EXCP_HALTED); ··· 230 230 return env->evbar; 231 231 232 232 case TO_SPR(0, 16): /* NPC (equals PC) */ 233 - cpu_restore_state(cs, GETPC()); 233 + cpu_restore_state(cs, GETPC(), false); 234 234 return env->pc; 235 235 236 236 case TO_SPR(0, 17): /* SR */ 237 237 return cpu_get_sr(env); 238 238 239 239 case TO_SPR(0, 18): /* PPC */ 240 - cpu_restore_state(cs, GETPC()); 240 + cpu_restore_state(cs, GETPC(), false); 241 241 return env->ppc; 242 242 243 243 case TO_SPR(0, 32): /* EPCR */
+1 -1
target/tricore/op_helper.c
··· 31 31 { 32 32 CPUState *cs = CPU(tricore_env_get_cpu(env)); 33 33 /* in case we come from a helper-call we need to restore the PC */ 34 - cpu_restore_state(cs, pc); 34 + cpu_restore_state(cs, pc, true); 35 35 36 36 /* Tin is loaded into d[15] */ 37 37 env->gpr_d[15] = tin;
+2 -2
target/xtensa/op_helper.c
··· 52 52 53 53 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && 54 54 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { 55 - cpu_restore_state(CPU(cpu), retaddr); 55 + cpu_restore_state(CPU(cpu), retaddr, true); 56 56 HELPER(exception_cause_vaddr)(env, 57 57 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); 58 58 } ··· 78 78 paddr & TARGET_PAGE_MASK, 79 79 access, mmu_idx, page_size); 80 80 } else { 81 - cpu_restore_state(cs, retaddr); 81 + cpu_restore_state(cs, retaddr, true); 82 82 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); 83 83 } 84 84 }