qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

tcg: let plugins instrument virtual memory accesses

To capture all memory accesses we need hook into all the various
helper functions that are involved in memory operations as well as the
injected inline helper calls. A later commit will allow us to resolve
the actual guest HW addresses by replaying the lookup.

Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: drop haddr handling, just deal in vaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

authored by

Emilio G. Cota and committed by
Alex Bennée
e6d86bed cfec3885

+74 -36
+4
accel/tcg/atomic_common.inc.c
··· 25 25 static inline void 26 26 atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info) 27 27 { 28 + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); 29 + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); 28 30 } 29 31 30 32 static inline ··· 36 38 static inline 37 39 void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info) 38 40 { 41 + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); 39 42 } 40 43 41 44 static inline ··· 47 50 static inline 48 51 void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info) 49 52 { 53 + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); 50 54 }
+1
accel/tcg/atomic_template.h
··· 18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 19 */ 20 20 21 + #include "qemu/plugin.h" 21 22 #include "trace/mem.h" 22 23 23 24 #if DATA_SIZE == 16
+3
accel/tcg/cpu-exec.c
··· 268 268 qemu_mutex_unlock_iothread(); 269 269 } 270 270 assert_no_pages_locked(); 271 + qemu_plugin_disable_mem_helpers(cpu); 271 272 } 272 273 273 274 if (cpu_in_exclusive_context(cpu)) { ··· 701 702 if (qemu_mutex_iothread_locked()) { 702 703 qemu_mutex_unlock_iothread(); 703 704 } 705 + qemu_plugin_disable_mem_helpers(cpu); 706 + 704 707 assert_no_pages_locked(); 705 708 } 706 709
+1
include/exec/cpu-defs.h
··· 214 214 * Since this is placed within CPUNegativeOffsetState, the smallest 215 215 * negative offsets are at the end of the struct. 216 216 */ 217 + 217 218 typedef struct CPUTLB { 218 219 CPUTLBCommon c; 219 220 CPUTLBDesc d[NB_MMU_MODES];
+16 -12
include/exec/cpu_ldst_template.h
··· 28 28 #include "trace-root.h" 29 29 #endif 30 30 31 + #include "qemu/plugin.h" 31 32 #include "trace/mem.h" 32 33 33 34 #if DATA_SIZE == 8 ··· 86 87 target_ulong addr; 87 88 int mmu_idx = CPU_MMU_INDEX; 88 89 TCGMemOpIdx oi; 89 - 90 90 #if !defined(SOFTMMU_CODE_ACCESS) 91 - trace_guest_mem_before_exec( 92 - env_cpu(env), ptr, 93 - trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx)); 91 + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx); 92 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 94 93 #endif 95 94 96 95 addr = ptr; ··· 104 103 uintptr_t hostaddr = addr + entry->addend; 105 104 res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr); 106 105 } 106 + #ifndef SOFTMMU_CODE_ACCESS 107 + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); 108 + #endif 107 109 return res; 108 110 } 109 111 ··· 124 126 target_ulong addr; 125 127 int mmu_idx = CPU_MMU_INDEX; 126 128 TCGMemOpIdx oi; 127 - 128 129 #if !defined(SOFTMMU_CODE_ACCESS) 129 - trace_guest_mem_before_exec( 130 - env_cpu(env), ptr, 131 - trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx)); 130 + uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx); 131 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 132 132 #endif 133 133 134 134 addr = ptr; ··· 142 142 uintptr_t hostaddr = addr + entry->addend; 143 143 res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr); 144 144 } 145 + #ifndef SOFTMMU_CODE_ACCESS 146 + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); 147 + #endif 145 148 return res; 146 149 } 147 150 ··· 165 168 target_ulong addr; 166 169 int mmu_idx = CPU_MMU_INDEX; 167 170 TCGMemOpIdx oi; 168 - 169 171 #if !defined(SOFTMMU_CODE_ACCESS) 170 - trace_guest_mem_before_exec( 171 - env_cpu(env), ptr, 172 - trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx)); 172 + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx); 173 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 173 174 #endif 174 175 175 176 addr = ptr; ··· 183 184 uintptr_t hostaddr = addr + entry->addend; 184 185 glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v); 185 186 } 187 + #ifndef SOFTMMU_CODE_ACCESS 188 + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); 189 + #endif 186 190 } 187 191 188 192 static inline void
+17 -15
include/exec/cpu_ldst_useronly_template.h
··· 64 64 static inline RES_TYPE 65 65 glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) 66 66 { 67 - #ifdef CODE_ACCESS 68 67 RES_TYPE ret; 68 + #ifdef CODE_ACCESS 69 69 set_helper_retaddr(1); 70 70 ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); 71 71 clear_helper_retaddr(); 72 - return ret; 73 72 #else 74 - trace_guest_mem_before_exec( 75 - env_cpu(env), ptr, 76 - trace_mem_build_info(SHIFT, false, MO_TE, false, MMU_USER_IDX)); 77 - return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); 73 + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, 74 + MMU_USER_IDX); 75 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 76 + ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); 78 77 #endif 78 + return ret; 79 79 } 80 80 81 81 #ifndef CODE_ACCESS ··· 96 96 static inline int 97 97 glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) 98 98 { 99 - #ifdef CODE_ACCESS 100 99 int ret; 100 + #ifdef CODE_ACCESS 101 101 set_helper_retaddr(1); 102 102 ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); 103 103 clear_helper_retaddr(); 104 - return ret; 105 104 #else 106 - trace_guest_mem_before_exec( 107 - env_cpu(env), ptr, 108 - trace_mem_build_info(SHIFT, true, MO_TE, false, MMU_USER_IDX)); 109 - return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); 105 + uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, 106 + MMU_USER_IDX); 107 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 108 + ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); 109 + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); 110 110 #endif 111 + return ret; 111 112 } 112 113 113 114 #ifndef CODE_ACCESS ··· 130 131 glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr, 131 132 RES_TYPE v) 132 133 { 133 - trace_guest_mem_before_exec( 134 - env_cpu(env), ptr, 135 - trace_mem_build_info(SHIFT, false, MO_TE, true, MMU_USER_IDX)); 134 + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, 135 + MMU_USER_IDX); 136 + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); 136 137 glue(glue(st, SUFFIX), _p)(g2h(ptr), v); 138 + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); 137 139 } 138 140 139 141 static inline void
+31 -9
tcg/tcg-op.c
··· 30 30 #include "tcg-mo.h" 31 31 #include "trace-tcg.h" 32 32 #include "trace/mem.h" 33 + #include "exec/plugin-gen.h" 33 34 34 35 /* Reduce the number of ifdefs below. This assumes that all uses of 35 36 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that ··· 2684 2685 tcg_debug_assert(idx == TB_EXIT_REQUESTED); 2685 2686 } 2686 2687 2688 + plugin_gen_disable_mem_helpers(); 2687 2689 tcg_gen_op1i(INDEX_op_exit_tb, val); 2688 2690 } 2689 2691 ··· 2696 2698 tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); 2697 2699 tcg_ctx->goto_tb_issue_mask |= 1 << idx; 2698 2700 #endif 2701 + plugin_gen_disable_mem_helpers(); 2699 2702 /* When not chaining, we simply fall through to the "fallback" exit. */ 2700 2703 if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { 2701 2704 tcg_gen_op1i(INDEX_op_goto_tb, idx); ··· 2705 2708 void tcg_gen_lookup_and_goto_ptr(void) 2706 2709 { 2707 2710 if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { 2708 - TCGv_ptr ptr = tcg_temp_new_ptr(); 2711 + TCGv_ptr ptr; 2712 + 2713 + plugin_gen_disable_mem_helpers(); 2714 + ptr = tcg_temp_new_ptr(); 2709 2715 gen_helper_lookup_tb_ptr(ptr, cpu_env); 2710 2716 tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); 2711 2717 tcg_temp_free_ptr(ptr); ··· 2788 2794 } 2789 2795 } 2790 2796 2797 + static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) 2798 + { 2799 + #ifdef CONFIG_PLUGIN 2800 + if (tcg_ctx->plugin_insn == NULL) { 2801 + return; 2802 + } 2803 + plugin_gen_empty_mem_callback(vaddr, info); 2804 + #endif 2805 + } 2806 + 2791 2807 void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) 2792 2808 { 2793 2809 MemOp orig_memop; 2810 + uint16_t info = trace_mem_get_info(memop, idx, 0); 2794 2811 2795 2812 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2796 2813 memop = tcg_canonicalize_memop(memop, 0, 0); 2797 - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, 2798 - addr, trace_mem_get_info(memop, idx, 0)); 2814 + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); 2799 2815 2800 2816 orig_memop = memop; 2801 2817 if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { ··· 2807 2823 } 2808 2824 2809 2825 gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); 2826 + plugin_gen_mem_callbacks(addr, info); 2810 2827 2811 2828 if ((orig_memop ^ memop) & MO_BSWAP) { 2812 2829 switch (orig_memop & MO_SIZE) { ··· 2828 2845 void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) 2829 2846 { 2830 2847 TCGv_i32 swap = NULL; 2848 + uint16_t info = trace_mem_get_info(memop, idx, 1); 2831 2849 2832 2850 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2833 2851 memop = tcg_canonicalize_memop(memop, 0, 1); 2834 - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, 2835 - addr, trace_mem_get_info(memop, idx, 1)); 2852 + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); 2836 2853 2837 2854 if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { 2838 2855 swap = tcg_temp_new_i32(); ··· 2852 2869 } 2853 2870 2854 2871 gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); 2872 + plugin_gen_mem_callbacks(addr, info); 2855 2873 2856 2874 if (swap) { 2857 2875 tcg_temp_free_i32(swap); ··· 2861 2879 void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) 2862 2880 { 2863 2881 MemOp orig_memop; 2882 + uint16_t info; 2864 2883 2865 2884 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { 2866 2885 tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); ··· 2874 2893 2875 2894 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2876 2895 memop = tcg_canonicalize_memop(memop, 1, 0); 2877 - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, 2878 - addr, trace_mem_get_info(memop, idx, 0)); 2896 + info = trace_mem_get_info(memop, idx, 0); 2897 + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); 2879 2898 2880 2899 orig_memop = memop; 2881 2900 if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { ··· 2887 2906 } 2888 2907 2889 2908 gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); 2909 + plugin_gen_mem_callbacks(addr, info); 2890 2910 2891 2911 if ((orig_memop ^ memop) & MO_BSWAP) { 2892 2912 switch (orig_memop & MO_SIZE) { ··· 2914 2934 void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) 2915 2935 { 2916 2936 TCGv_i64 swap = NULL; 2937 + uint16_t info; 2917 2938 2918 2939 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { 2919 2940 tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); ··· 2922 2943 2923 2944 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2924 2945 memop = tcg_canonicalize_memop(memop, 1, 1); 2925 - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, 2926 - addr, trace_mem_get_info(memop, idx, 1)); 2946 + info = trace_mem_get_info(memop, idx, 1); 2947 + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); 2927 2948 2928 2949 if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { 2929 2950 swap = tcg_temp_new_i64(); ··· 2947 2968 } 2948 2969 2949 2970 gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); 2971 + plugin_gen_mem_callbacks(addr, info); 2950 2972 2951 2973 if (swap) { 2952 2974 tcg_temp_free_i64(swap);
+1
tcg/tcg.h
··· 29 29 #include "exec/memop.h" 30 30 #include "exec/tb-context.h" 31 31 #include "qemu/bitops.h" 32 + #include "qemu/plugin.h" 32 33 #include "qemu/queue.h" 33 34 #include "tcg-mo.h" 34 35 #include "tcg-target.h"