qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

plugin: add core code

Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: moved directory and merged various fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

authored by

Emilio G. Cota and committed by
Alex Bennée
54cb65d8 975c4553

+1241 -1
+8 -1
Makefile
··· 853 853 854 854 ICON_SIZES=16x16 24x24 32x32 48x48 64x64 128x128 256x256 512x512 855 855 856 - install: all $(if $(BUILD_DOCS),install-doc) install-datadir install-localstatedir \ 856 + install-includedir: 857 + $(INSTALL_DIR) "$(DESTDIR)$(includedir)" 858 + 859 + install: all $(if $(BUILD_DOCS),install-doc) \ 860 + install-datadir install-localstatedir install-includedir \ 857 861 $(if $(INSTALL_BLOBS),$(edk2-decompressed)) \ 858 862 recurse-install 859 863 ifneq ($(TOOLS),) ··· 915 919 "$(DESTDIR)$(qemu_desktopdir)/qemu.desktop" 916 920 ifdef CONFIG_GTK 917 921 $(MAKE) -C po $@ 922 + endif 923 + ifeq ($(CONFIG_PLUGIN),y) 924 + $(INSTALL_DATA) $(SRC_PATH)/include/qemu/qemu-plugin.h "$(DESTDIR)$(includedir)/qemu-plugin.h" 918 925 endif 919 926 $(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/keymaps" 920 927 set -e; for x in $(KEYMAPS); do \
+2
Makefile.target
··· 119 119 obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o 120 120 LIBS := $(libs_cpu) $(LIBS) 121 121 122 + obj-$(CONFIG_PLUGIN) += plugins/ 123 + 122 124 ######################################################### 123 125 # Linux user emulator target 124 126
+3
configure
··· 3616 3616 if test "$modules" = yes; then 3617 3617 glib_modules="$glib_modules gmodule-export-2.0" 3618 3618 fi 3619 + if test "$plugins" = yes; then 3620 + glib_modules="$glib_modules gmodule-2.0" 3621 + fi 3619 3622 3620 3623 # This workaround is required due to a bug in pkg-config file for glib as it 3621 3624 # doesn't define GLIB_STATIC_COMPILATION for pkg-config --static
+6
include/hw/core/cpu.h
··· 29 29 #include "qemu/rcu_queue.h" 30 30 #include "qemu/queue.h" 31 31 #include "qemu/thread.h" 32 + #include "qemu/plugin.h" 32 33 33 34 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, 34 35 void *opaque); ··· 344 345 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes 345 346 * to @trace_dstate). 346 347 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). 348 + * @plugin_mask: Plugin event bitmap. Modified only via async work. 347 349 * @ignore_memory_transaction_failures: Cached copy of the MachineState 348 350 * flag of the same name: allows the board to suppress calling of the 349 351 * CPU do_transaction_failed hook function. ··· 427 429 /* Used for events with 'vcpu' and *without* the 'disabled' properties */ 428 430 DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); 429 431 DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS); 432 + 433 + DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX); 434 + 435 + GArray *plugin_mem_cbs; 430 436 431 437 /* TODO Move common fields from CPUArchState here. */ 432 438 int cpu_index;
+255
include/qemu/plugin.h
··· 1 + /* 2 + * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 3 + * 4 + * License: GNU GPL, version 2 or later. 5 + * See the COPYING file in the top-level directory. 6 + */ 7 + #ifndef QEMU_PLUGIN_H 8 + #define QEMU_PLUGIN_H 9 + 10 + #include "qemu/config-file.h" 11 + #include "qemu/qemu-plugin.h" 12 + #include "qemu/error-report.h" 13 + #include "qemu/queue.h" 14 + #include "qemu/option.h" 15 + 16 + /* 17 + * Option parsing/processing. 18 + * Note that we can load an arbitrary number of plugins. 19 + */ 20 + struct qemu_plugin_desc; 21 + typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList; 22 + 23 + #ifdef CONFIG_PLUGIN 24 + extern QemuOptsList qemu_plugin_opts; 25 + 26 + static inline void qemu_plugin_add_opts(void) 27 + { 28 + qemu_add_opts(&qemu_plugin_opts); 29 + } 30 + 31 + void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head); 32 + int qemu_plugin_load_list(QemuPluginList *head); 33 + #else /* !CONFIG_PLUGIN */ 34 + static inline void qemu_plugin_add_opts(void) 35 + { } 36 + 37 + static inline void qemu_plugin_opt_parse(const char *optarg, 38 + QemuPluginList *head) 39 + { 40 + error_report("plugin interface not enabled in this build"); 41 + exit(1); 42 + } 43 + 44 + static inline int qemu_plugin_load_list(QemuPluginList *head) 45 + { 46 + return 0; 47 + } 48 + #endif /* !CONFIG_PLUGIN */ 49 + 50 + /* 51 + * Events that plugins can subscribe to. 52 + */ 53 + enum qemu_plugin_event { 54 + QEMU_PLUGIN_EV_VCPU_INIT, 55 + QEMU_PLUGIN_EV_VCPU_EXIT, 56 + QEMU_PLUGIN_EV_VCPU_TB_TRANS, 57 + QEMU_PLUGIN_EV_VCPU_IDLE, 58 + QEMU_PLUGIN_EV_VCPU_RESUME, 59 + QEMU_PLUGIN_EV_VCPU_SYSCALL, 60 + QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, 61 + QEMU_PLUGIN_EV_FLUSH, 62 + QEMU_PLUGIN_EV_ATEXIT, 63 + QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */ 64 + }; 65 + 66 + union qemu_plugin_cb_sig { 67 + qemu_plugin_simple_cb_t simple; 68 + qemu_plugin_udata_cb_t udata; 69 + qemu_plugin_vcpu_simple_cb_t vcpu_simple; 70 + qemu_plugin_vcpu_udata_cb_t vcpu_udata; 71 + qemu_plugin_vcpu_tb_trans_cb_t vcpu_tb_trans; 72 + qemu_plugin_vcpu_mem_cb_t vcpu_mem; 73 + qemu_plugin_vcpu_syscall_cb_t vcpu_syscall; 74 + qemu_plugin_vcpu_syscall_ret_cb_t vcpu_syscall_ret; 75 + void *generic; 76 + }; 77 + 78 + enum plugin_dyn_cb_type { 79 + PLUGIN_CB_INSN, 80 + PLUGIN_CB_MEM, 81 + PLUGIN_N_CB_TYPES, 82 + }; 83 + 84 + enum plugin_dyn_cb_subtype { 85 + PLUGIN_CB_REGULAR, 86 + PLUGIN_CB_INLINE, 87 + PLUGIN_N_CB_SUBTYPES, 88 + }; 89 + 90 + /* 91 + * A dynamic callback has an insertion point that is determined at run-time. 92 + * Usually the insertion point is somewhere in the code cache; think for 93 + * instance of a callback to be called upon the execution of a particular TB. 94 + */ 95 + struct qemu_plugin_dyn_cb { 96 + union qemu_plugin_cb_sig f; 97 + void *userp; 98 + unsigned tcg_flags; 99 + enum plugin_dyn_cb_subtype type; 100 + /* @rw applies to mem callbacks only (both regular and inline) */ 101 + enum qemu_plugin_mem_rw rw; 102 + /* fields specific to each dyn_cb type go here */ 103 + union { 104 + struct { 105 + enum qemu_plugin_op op; 106 + uint64_t imm; 107 + } inline_insn; 108 + }; 109 + }; 110 + 111 + struct qemu_plugin_insn { 112 + GByteArray *data; 113 + uint64_t vaddr; 114 + void *haddr; 115 + GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES]; 116 + bool calls_helpers; 117 + bool mem_helper; 118 + }; 119 + 120 + /* 121 + * qemu_plugin_insn allocate and cleanup functions. We don't expect to 122 + * cleanup many of these structures. They are reused for each fresh 123 + * translation. 124 + */ 125 + 126 + static inline void qemu_plugin_insn_cleanup_fn(gpointer data) 127 + { 128 + struct qemu_plugin_insn *insn = (struct qemu_plugin_insn *) data; 129 + g_byte_array_free(insn->data, true); 130 + } 131 + 132 + static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void) 133 + { 134 + int i, j; 135 + struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1); 136 + insn->data = g_byte_array_sized_new(4); 137 + 138 + for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { 139 + for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { 140 + insn->cbs[i][j] = g_array_new(false, false, 141 + sizeof(struct qemu_plugin_dyn_cb)); 142 + } 143 + } 144 + return insn; 145 + } 146 + 147 + struct qemu_plugin_tb { 148 + GPtrArray *insns; 149 + size_t n; 150 + uint64_t vaddr; 151 + uint64_t vaddr2; 152 + void *haddr1; 153 + void *haddr2; 154 + GArray *cbs[PLUGIN_N_CB_SUBTYPES]; 155 + }; 156 + 157 + /** 158 + * qemu_plugin_tb_insn_get(): get next plugin record for translation. 159 + * 160 + */ 161 + static inline 162 + struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) 163 + { 164 + struct qemu_plugin_insn *insn; 165 + int i, j; 166 + 167 + if (unlikely(tb->n == tb->insns->len)) { 168 + struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc(); 169 + g_ptr_array_add(tb->insns, new_insn); 170 + } 171 + insn = g_ptr_array_index(tb->insns, tb->n++); 172 + g_byte_array_set_size(insn->data, 0); 173 + insn->calls_helpers = false; 174 + insn->mem_helper = false; 175 + 176 + for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { 177 + for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { 178 + g_array_set_size(insn->cbs[i][j], 0); 179 + } 180 + } 181 + 182 + return insn; 183 + } 184 + 185 + #ifdef CONFIG_PLUGIN 186 + 187 + void qemu_plugin_vcpu_init_hook(CPUState *cpu); 188 + void qemu_plugin_vcpu_exit_hook(CPUState *cpu); 189 + void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb); 190 + void qemu_plugin_vcpu_idle_cb(CPUState *cpu); 191 + void qemu_plugin_vcpu_resume_cb(CPUState *cpu); 192 + void 193 + qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, 194 + uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5, 195 + uint64_t a6, uint64_t a7, uint64_t a8); 196 + void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret); 197 + 198 + void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo); 199 + 200 + void qemu_plugin_flush_cb(void); 201 + 202 + void qemu_plugin_atexit_cb(void); 203 + 204 + void qemu_plugin_add_dyn_cb_arr(GArray *arr); 205 + 206 + void qemu_plugin_disable_mem_helpers(CPUState *cpu); 207 + 208 + #else /* !CONFIG_PLUGIN */ 209 + 210 + static inline void qemu_plugin_vcpu_init_hook(CPUState *cpu) 211 + { } 212 + 213 + static inline void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 214 + { } 215 + 216 + static inline void qemu_plugin_tb_trans_cb(CPUState *cpu, 217 + struct qemu_plugin_tb *tb) 218 + { } 219 + 220 + static inline void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 221 + { } 222 + 223 + static inline void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 224 + { } 225 + 226 + static inline void 227 + qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 228 + uint64_t a3, uint64_t a4, uint64_t a5, uint64_t a6, 229 + uint64_t a7, uint64_t a8) 230 + { } 231 + 232 + static inline 233 + void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 234 + { } 235 + 236 + static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, 237 + uint32_t meminfo) 238 + { } 239 + 240 + static inline void qemu_plugin_flush_cb(void) 241 + { } 242 + 243 + static inline void qemu_plugin_atexit_cb(void) 244 + { } 245 + 246 + static inline 247 + void qemu_plugin_add_dyn_cb_arr(GArray *arr) 248 + { } 249 + 250 + static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu) 251 + { } 252 + 253 + #endif /* !CONFIG_PLUGIN */ 254 + 255 + #endif /* QEMU_PLUGIN_H */
+6
plugins/Makefile.objs
··· 1 + # 2 + # Plugin Support 3 + # 4 + 5 + obj-y += loader.o 6 + obj-y += core.o
+502
plugins/core.c
··· 1 + /* 2 + * QEMU Plugin Core code 3 + * 4 + * This is the core code that deals with injecting instrumentation into the code 5 + * 6 + * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 7 + * Copyright (C) 2019, Linaro 8 + * 9 + * License: GNU GPL, version 2 or later. 10 + * See the COPYING file in the top-level directory. 11 + * 12 + * SPDX-License-Identifier: GPL-2.0-or-later 13 + */ 14 + #include "qemu/osdep.h" 15 + #include "qemu/error-report.h" 16 + #include "qemu/config-file.h" 17 + #include "qapi/error.h" 18 + #include "qemu/option.h" 19 + #include "qemu/rcu_queue.h" 20 + #include "qemu/xxhash.h" 21 + #include "qemu/rcu.h" 22 + #include "hw/core/cpu.h" 23 + #include "exec/cpu-common.h" 24 + 25 + #include "cpu.h" 26 + #include "exec/exec-all.h" 27 + #include "exec/helper-proto.h" 28 + #include "sysemu/sysemu.h" 29 + #include "tcg/tcg.h" 30 + #include "tcg/tcg-op.h" 31 + #include "trace/mem-internal.h" /* mem_info macros */ 32 + #include "plugin.h" 33 + 34 + struct qemu_plugin_cb { 35 + struct qemu_plugin_ctx *ctx; 36 + union qemu_plugin_cb_sig f; 37 + void *udata; 38 + QLIST_ENTRY(qemu_plugin_cb) entry; 39 + }; 40 + 41 + struct qemu_plugin_state plugin; 42 + 43 + struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) 44 + { 45 + struct qemu_plugin_ctx *ctx; 46 + qemu_plugin_id_t *id_p; 47 + 48 + id_p = g_hash_table_lookup(plugin.id_ht, &id); 49 + ctx = container_of(id_p, struct qemu_plugin_ctx, id); 50 + if (ctx == NULL) { 51 + error_report("plugin: invalid plugin id %" PRIu64, id); 52 + abort(); 53 + } 54 + return ctx; 55 + } 56 + 57 + static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) 58 + { 59 + bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); 60 + cpu_tb_jmp_cache_clear(cpu); 61 + } 62 + 63 + static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) 64 + { 65 + CPUState *cpu = container_of(k, CPUState, cpu_index); 66 + run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); 67 + 68 + if (cpu->created) { 69 + async_run_on_cpu(cpu, plugin_cpu_update__async, mask); 70 + } else { 71 + plugin_cpu_update__async(cpu, mask); 72 + } 73 + } 74 + 75 + void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 76 + enum qemu_plugin_event ev) 77 + { 78 + struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 79 + 80 + if (cb == NULL) { 81 + return; 82 + } 83 + QLIST_REMOVE_RCU(cb, entry); 84 + g_free(cb); 85 + ctx->callbacks[ev] = NULL; 86 + if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { 87 + clear_bit(ev, plugin.mask); 88 + g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); 89 + } 90 + } 91 + 92 + static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) 93 + { 94 + struct qemu_plugin_cb *cb, *next; 95 + 96 + switch (ev) { 97 + case QEMU_PLUGIN_EV_VCPU_INIT: 98 + case QEMU_PLUGIN_EV_VCPU_EXIT: 99 + case QEMU_PLUGIN_EV_VCPU_IDLE: 100 + case QEMU_PLUGIN_EV_VCPU_RESUME: 101 + /* iterate safely; plugins might uninstall themselves at any time */ 102 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 103 + qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; 104 + 105 + func(cb->ctx->id, cpu->cpu_index); 106 + } 107 + break; 108 + default: 109 + g_assert_not_reached(); 110 + } 111 + } 112 + 113 + static void plugin_cb__simple(enum qemu_plugin_event ev) 114 + { 115 + struct qemu_plugin_cb *cb, *next; 116 + 117 + switch (ev) { 118 + case QEMU_PLUGIN_EV_FLUSH: 119 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 120 + qemu_plugin_simple_cb_t func = cb->f.simple; 121 + 122 + func(cb->ctx->id); 123 + } 124 + break; 125 + default: 126 + g_assert_not_reached(); 127 + } 128 + } 129 + 130 + static void plugin_cb__udata(enum qemu_plugin_event ev) 131 + { 132 + struct qemu_plugin_cb *cb, *next; 133 + 134 + switch (ev) { 135 + case QEMU_PLUGIN_EV_ATEXIT: 136 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 137 + qemu_plugin_udata_cb_t func = cb->f.udata; 138 + 139 + func(cb->ctx->id, cb->udata); 140 + } 141 + break; 142 + default: 143 + g_assert_not_reached(); 144 + } 145 + } 146 + 147 + static void 148 + do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 149 + void *func, void *udata) 150 + { 151 + struct qemu_plugin_ctx *ctx; 152 + 153 + qemu_rec_mutex_lock(&plugin.lock); 154 + ctx = plugin_id_to_ctx_locked(id); 155 + /* if the plugin is on its way out, ignore this request */ 156 + if (unlikely(ctx->uninstalling)) { 157 + goto out_unlock; 158 + } 159 + if (func) { 160 + struct qemu_plugin_cb *cb = ctx->callbacks[ev]; 161 + 162 + if (cb) { 163 + cb->f.generic = func; 164 + cb->udata = udata; 165 + } else { 166 + cb = g_new(struct qemu_plugin_cb, 1); 167 + cb->ctx = ctx; 168 + cb->f.generic = func; 169 + cb->udata = udata; 170 + ctx->callbacks[ev] = cb; 171 + QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); 172 + if (!test_bit(ev, plugin.mask)) { 173 + set_bit(ev, plugin.mask); 174 + g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, 175 + NULL); 176 + } 177 + } 178 + } else { 179 + plugin_unregister_cb__locked(ctx, ev); 180 + } 181 + out_unlock: 182 + qemu_rec_mutex_unlock(&plugin.lock); 183 + } 184 + 185 + void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 186 + void *func) 187 + { 188 + do_plugin_register_cb(id, ev, func, NULL); 189 + } 190 + 191 + void 192 + plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 193 + void *func, void *udata) 194 + { 195 + do_plugin_register_cb(id, ev, func, udata); 196 + } 197 + 198 + void qemu_plugin_vcpu_init_hook(CPUState *cpu) 199 + { 200 + bool success; 201 + 202 + qemu_rec_mutex_lock(&plugin.lock); 203 + plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); 204 + success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, 205 + &cpu->cpu_index); 206 + g_assert(success); 207 + qemu_rec_mutex_unlock(&plugin.lock); 208 + 209 + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); 210 + } 211 + 212 + void qemu_plugin_vcpu_exit_hook(CPUState *cpu) 213 + { 214 + bool success; 215 + 216 + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); 217 + 218 + qemu_rec_mutex_lock(&plugin.lock); 219 + success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); 220 + g_assert(success); 221 + qemu_rec_mutex_unlock(&plugin.lock); 222 + } 223 + 224 + struct plugin_for_each_args { 225 + struct qemu_plugin_ctx *ctx; 226 + qemu_plugin_vcpu_simple_cb_t cb; 227 + }; 228 + 229 + static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) 230 + { 231 + struct plugin_for_each_args *args = udata; 232 + int cpu_index = *(int *)k; 233 + 234 + args->cb(args->ctx->id, cpu_index); 235 + } 236 + 237 + void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, 238 + qemu_plugin_vcpu_simple_cb_t cb) 239 + { 240 + struct plugin_for_each_args args; 241 + 242 + if (cb == NULL) { 243 + return; 244 + } 245 + qemu_rec_mutex_lock(&plugin.lock); 246 + args.ctx = plugin_id_to_ctx_locked(id); 247 + args.cb = cb; 248 + g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); 249 + qemu_rec_mutex_unlock(&plugin.lock); 250 + } 251 + 252 + /* Allocate and return a callback record */ 253 + static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) 254 + { 255 + GArray *cbs = *arr; 256 + 257 + if (!cbs) { 258 + cbs = g_array_sized_new(false, false, 259 + sizeof(struct qemu_plugin_dyn_cb), 1); 260 + *arr = cbs; 261 + } 262 + 263 + g_array_set_size(cbs, cbs->len + 1); 264 + return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); 265 + } 266 + 267 + void plugin_register_inline_op(GArray **arr, 268 + enum qemu_plugin_mem_rw rw, 269 + enum qemu_plugin_op op, void *ptr, 270 + uint64_t imm) 271 + { 272 + struct qemu_plugin_dyn_cb *dyn_cb; 273 + 274 + dyn_cb = plugin_get_dyn_cb(arr); 275 + dyn_cb->userp = ptr; 276 + dyn_cb->type = PLUGIN_CB_INLINE; 277 + dyn_cb->rw = rw; 278 + dyn_cb->inline_insn.op = op; 279 + dyn_cb->inline_insn.imm = imm; 280 + } 281 + 282 + static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) 283 + { 284 + uint32_t ret; 285 + 286 + switch (flags) { 287 + case QEMU_PLUGIN_CB_RW_REGS: 288 + ret = 0; 289 + case QEMU_PLUGIN_CB_R_REGS: 290 + ret = TCG_CALL_NO_WG; 291 + break; 292 + case QEMU_PLUGIN_CB_NO_REGS: 293 + default: 294 + ret = TCG_CALL_NO_RWG; 295 + } 296 + return ret; 297 + } 298 + 299 + inline void 300 + plugin_register_dyn_cb__udata(GArray **arr, 301 + qemu_plugin_vcpu_udata_cb_t cb, 302 + enum qemu_plugin_cb_flags flags, void *udata) 303 + { 304 + struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); 305 + 306 + dyn_cb->userp = udata; 307 + dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 308 + dyn_cb->f.vcpu_udata = cb; 309 + dyn_cb->type = PLUGIN_CB_REGULAR; 310 + } 311 + 312 + void plugin_register_vcpu_mem_cb(GArray **arr, 313 + void *cb, 314 + enum qemu_plugin_cb_flags flags, 315 + enum qemu_plugin_mem_rw rw, 316 + void *udata) 317 + { 318 + struct qemu_plugin_dyn_cb *dyn_cb; 319 + 320 + dyn_cb = plugin_get_dyn_cb(arr); 321 + dyn_cb->userp = udata; 322 + dyn_cb->tcg_flags = cb_to_tcg_flags(flags); 323 + dyn_cb->type = PLUGIN_CB_REGULAR; 324 + dyn_cb->rw = rw; 325 + dyn_cb->f.generic = cb; 326 + } 327 + 328 + void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) 329 + { 330 + struct qemu_plugin_cb *cb, *next; 331 + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; 332 + 333 + /* no plugin_mask check here; caller should have checked */ 334 + 335 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 336 + qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; 337 + 338 + func(cb->ctx->id, tb); 339 + } 340 + } 341 + 342 + void 343 + qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, 344 + uint64_t a3, uint64_t a4, uint64_t a5, 345 + uint64_t a6, uint64_t a7, uint64_t a8) 346 + { 347 + struct qemu_plugin_cb *cb, *next; 348 + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; 349 + 350 + if (!test_bit(ev, cpu->plugin_mask)) { 351 + return; 352 + } 353 + 354 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 355 + qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; 356 + 357 + func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); 358 + } 359 + } 360 + 361 + void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) 362 + { 363 + struct qemu_plugin_cb *cb, *next; 364 + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; 365 + 366 + if (!test_bit(ev, cpu->plugin_mask)) { 367 + return; 368 + } 369 + 370 + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { 371 + qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; 372 + 373 + func(cb->ctx->id, cpu->cpu_index, num, ret); 374 + } 375 + } 376 + 377 + void qemu_plugin_vcpu_idle_cb(CPUState *cpu) 378 + { 379 + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); 380 + } 381 + 382 + void qemu_plugin_vcpu_resume_cb(CPUState *cpu) 383 + { 384 + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); 385 + } 386 + 387 + void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, 388 + qemu_plugin_vcpu_simple_cb_t cb) 389 + { 390 + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); 391 + } 392 + 393 + void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, 394 + qemu_plugin_vcpu_simple_cb_t cb) 395 + { 396 + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); 397 + } 398 + 399 + void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, 400 + qemu_plugin_simple_cb_t cb) 401 + { 402 + plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); 403 + } 404 + 405 + static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) 406 + { 407 + g_array_free((GArray *) p, true); 408 + return true; 409 + } 410 + 411 + void qemu_plugin_flush_cb(void) 412 + { 413 + qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); 414 + qht_reset(&plugin.dyn_cb_arr_ht); 415 + 416 + plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); 417 + } 418 + 419 + void exec_inline_op(struct qemu_plugin_dyn_cb *cb) 420 + { 421 + uint64_t *val = cb->userp; 422 + 423 + switch (cb->inline_insn.op) { 424 + case QEMU_PLUGIN_INLINE_ADD_U64: 425 + *val += cb->inline_insn.imm; 426 + break; 427 + default: 428 + g_assert_not_reached(); 429 + } 430 + } 431 + 432 + void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) 433 + { 434 + GArray *arr = cpu->plugin_mem_cbs; 435 + size_t i; 436 + 437 + if (arr == NULL) { 438 + return; 439 + } 440 + for (i = 0; i < arr->len; i++) { 441 + struct qemu_plugin_dyn_cb *cb = 442 + &g_array_index(arr, struct qemu_plugin_dyn_cb, i); 443 + int w = !!(info & TRACE_MEM_ST) + 1; 444 + 445 + if (!(w & cb->rw)) { 446 + break; 447 + } 448 + switch (cb->type) { 449 + case PLUGIN_CB_REGULAR: 450 + cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); 451 + break; 452 + case PLUGIN_CB_INLINE: 453 + exec_inline_op(cb); 454 + break; 455 + default: 456 + g_assert_not_reached(); 457 + } 458 + } 459 + } 460 + 461 + void qemu_plugin_atexit_cb(void) 462 + { 463 + plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); 464 + } 465 + 466 + void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, 467 + qemu_plugin_udata_cb_t cb, 468 + void *udata) 469 + { 470 + plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); 471 + } 472 + 473 + /* 474 + * Call this function after longjmp'ing to the main loop. It's possible that the 475 + * last instruction of a TB might have used helpers, and therefore the 476 + * "disable" instruction will never execute because it ended up as dead code. 477 + */ 478 + void qemu_plugin_disable_mem_helpers(CPUState *cpu) 479 + { 480 + cpu->plugin_mem_cbs = NULL; 481 + } 482 + 483 + static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) 484 + { 485 + return ap == bp; 486 + } 487 + 488 + static void __attribute__((__constructor__)) plugin_init(void) 489 + { 490 + int i; 491 + 492 + for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { 493 + QLIST_INIT(&plugin.cb_lists[i]); 494 + } 495 + qemu_rec_mutex_init(&plugin.lock); 496 + plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); 497 + plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); 498 + QTAILQ_INIT(&plugin.ctxs); 499 + qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, 500 + QHT_MODE_AUTO_RESIZE); 501 + atexit(qemu_plugin_atexit_cb); 502 + }
+362
plugins/loader.c
··· 1 + /* 2 + * QEMU Plugin Core Loader Code 3 + * 4 + * This is the code responsible for loading and unloading the plugins. 5 + * Aside from the basic housekeeping tasks we also need to ensure any 6 + * generated code is flushed when we remove a plugin so we cannot end 7 + * up calling and unloaded helper function. 8 + * 9 + * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 10 + * Copyright (C) 2019, Linaro 11 + * 12 + * License: GNU GPL, version 2 or later. 13 + * See the COPYING file in the top-level directory. 14 + * 15 + * SPDX-License-Identifier: GPL-2.0-or-later 16 + */ 17 + 18 + #include "qemu/osdep.h" 19 + #include "qemu/error-report.h" 20 + #include "qemu/config-file.h" 21 + #include "qapi/error.h" 22 + #include "qemu/option.h" 23 + #include "qemu/rcu_queue.h" 24 + #include "qemu/qht.h" 25 + #include "qemu/bitmap.h" 26 + #include "qemu/xxhash.h" 27 + #include "qemu/plugin.h" 28 + #include "hw/core/cpu.h" 29 + #include "cpu.h" 30 + #include "exec/exec-all.h" 31 + #include "plugin.h" 32 + 33 + /* 34 + * For convenience we use a bitmap for plugin.mask, but really all we need is a 35 + * u32, which is what we store in TranslationBlock. 36 + */ 37 + QEMU_BUILD_BUG_ON(QEMU_PLUGIN_EV_MAX > 32); 38 + 39 + struct qemu_plugin_desc { 40 + char *path; 41 + char **argv; 42 + QTAILQ_ENTRY(qemu_plugin_desc) entry; 43 + int argc; 44 + }; 45 + 46 + struct qemu_plugin_parse_arg { 47 + QemuPluginList *head; 48 + struct qemu_plugin_desc *curr; 49 + }; 50 + 51 + QemuOptsList qemu_plugin_opts = { 52 + .name = "plugin", 53 + .implied_opt_name = "file", 54 + .head = QTAILQ_HEAD_INITIALIZER(qemu_plugin_opts.head), 55 + .desc = { 56 + /* do our own parsing to support multiple plugins */ 57 + { /* end of list */ } 58 + }, 59 + }; 60 + 61 + typedef int (*qemu_plugin_install_func_t)(qemu_plugin_id_t, int, char **); 62 + 63 + extern struct qemu_plugin_state plugin; 64 + 65 + void qemu_plugin_add_dyn_cb_arr(GArray *arr) 66 + { 67 + uint32_t hash = qemu_xxhash2((uint64_t)(uintptr_t)arr); 68 + bool inserted; 69 + 70 + inserted = qht_insert(&plugin.dyn_cb_arr_ht, arr, hash, NULL); 71 + g_assert(inserted); 72 + } 73 + 74 + static struct qemu_plugin_desc *plugin_find_desc(QemuPluginList *head, 75 + const char *path) 76 + { 77 + struct qemu_plugin_desc *desc; 78 + 79 + QTAILQ_FOREACH(desc, head, entry) { 80 + if (strcmp(desc->path, path) == 0) { 81 + return desc; 82 + } 83 + } 84 + return NULL; 85 + } 86 + 87 + static int plugin_add(void *opaque, const char *name, const char *value, 88 + Error **errp) 89 + { 90 + struct qemu_plugin_parse_arg *arg = opaque; 91 + struct qemu_plugin_desc *p; 92 + 93 + if (strcmp(name, "file") == 0) { 94 + if (strcmp(value, "") == 0) { 95 + error_setg(errp, "requires a non-empty argument"); 96 + return 1; 97 + } 98 + p = plugin_find_desc(arg->head, value); 99 + if (p == NULL) { 100 + p = g_new0(struct qemu_plugin_desc, 1); 101 + p->path = g_strdup(value); 102 + QTAILQ_INSERT_TAIL(arg->head, p, entry); 103 + } 104 + arg->curr = p; 105 + } else if (strcmp(name, "arg") == 0) { 106 + if (arg->curr == NULL) { 107 + error_setg(errp, "missing earlier '-plugin file=' option"); 108 + return 1; 109 + } 110 + p = arg->curr; 111 + p->argc++; 112 + p->argv = g_realloc_n(p->argv, p->argc, sizeof(char *)); 113 + p->argv[p->argc - 1] = g_strdup(value); 114 + } else { 115 + error_setg(errp, "-plugin: unexpected parameter '%s'; ignored", name); 116 + } 117 + return 0; 118 + } 119 + 120 + void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head) 121 + { 122 + struct qemu_plugin_parse_arg arg; 123 + QemuOpts *opts; 124 + 125 + opts = qemu_opts_parse_noisily(qemu_find_opts("plugin"), optarg, true); 126 + if (opts == NULL) { 127 + exit(1); 128 + } 129 + arg.head = head; 130 + arg.curr = NULL; 131 + qemu_opt_foreach(opts, plugin_add, &arg, &error_fatal); 132 + qemu_opts_del(opts); 133 + } 134 + 135 + /* 136 + * From: https://en.wikipedia.org/wiki/Xorshift 137 + * This is faster than rand_r(), and gives us a wider range (RAND_MAX is only 138 + * guaranteed to be >= INT_MAX). 139 + */ 140 + static uint64_t xorshift64star(uint64_t x) 141 + { 142 + x ^= x >> 12; /* a */ 143 + x ^= x << 25; /* b */ 144 + x ^= x >> 27; /* c */ 145 + return x * UINT64_C(2685821657736338717); 146 + } 147 + 148 + static int plugin_load(struct qemu_plugin_desc *desc) 149 + { 150 + qemu_plugin_install_func_t install; 151 + struct qemu_plugin_ctx *ctx; 152 + gpointer sym; 153 + int rc; 154 + 155 + ctx = qemu_memalign(qemu_dcache_linesize, sizeof(*ctx)); 156 + memset(ctx, 0, sizeof(*ctx)); 157 + ctx->desc = desc; 158 + 159 + ctx->handle = g_module_open(desc->path, G_MODULE_BIND_LOCAL); 160 + if (ctx->handle == NULL) { 161 + error_report("%s: %s", __func__, g_module_error()); 162 + goto err_dlopen; 163 + } 164 + 165 + if (!g_module_symbol(ctx->handle, "qemu_plugin_install", &sym)) { 166 + error_report("%s: %s", __func__, g_module_error()); 167 + goto err_symbol; 168 + } 169 + install = (qemu_plugin_install_func_t) sym; 170 + /* symbol was found; it could be NULL though */ 171 + if (install == NULL) { 172 + error_report("%s: %s: qemu_plugin_install is NULL", 173 + __func__, desc->path); 174 + goto err_symbol; 175 + } 176 + 177 + qemu_rec_mutex_lock(&plugin.lock); 178 + 179 + /* find an unused random id with &ctx as the seed */ 180 + ctx->id = (uint64_t)(uintptr_t)ctx; 181 + for (;;) { 182 + void *existing; 183 + 184 + ctx->id = xorshift64star(ctx->id); 185 + existing = g_hash_table_lookup(plugin.id_ht, &ctx->id); 186 + if (likely(existing == NULL)) { 187 + bool success; 188 + 189 + success = g_hash_table_insert(plugin.id_ht, &ctx->id, &ctx->id); 190 + g_assert(success); 191 + break; 192 + } 193 + } 194 + QTAILQ_INSERT_TAIL(&plugin.ctxs, ctx, entry); 195 + ctx->installing = true; 196 + rc = install(ctx->id, desc->argc, desc->argv); 197 + ctx->installing = false; 198 + if (rc) { 199 + error_report("%s: qemu_plugin_install returned error code %d", 200 + __func__, rc); 201 + /* 202 + * we cannot rely on the plugin doing its own cleanup, so 203 + * call a full uninstall if the plugin did not yet call it. 204 + */ 205 + if (!ctx->uninstalling) { 206 + plugin_reset_uninstall(ctx->id, NULL, false); 207 + } 208 + } 209 + 210 + qemu_rec_mutex_unlock(&plugin.lock); 211 + return rc; 212 + 213 + err_symbol: 214 + err_dlopen: 215 + qemu_vfree(ctx); 216 + return 1; 217 + } 218 + 219 + /* call after having removed @desc from the list */ 220 + static void plugin_desc_free(struct qemu_plugin_desc *desc) 221 + { 222 + int i; 223 + 224 + for (i = 0; i < desc->argc; i++) { 225 + g_free(desc->argv[i]); 226 + } 227 + g_free(desc->argv); 228 + g_free(desc->path); 229 + g_free(desc); 230 + } 231 + 232 + /** 233 + * qemu_plugin_load_list - load a list of plugins 234 + * @head: head of the list of descriptors of the plugins to be loaded 235 + * 236 + * Returns 0 if all plugins in the list are installed, !0 otherwise. 237 + * 238 + * Note: the descriptor of each successfully installed plugin is removed 239 + * from the list given by @head. 240 + */ 241 + int qemu_plugin_load_list(QemuPluginList *head) 242 + { 243 + struct qemu_plugin_desc *desc, *next; 244 + 245 + QTAILQ_FOREACH_SAFE(desc, head, entry, next) { 246 + int err; 247 + 248 + err = plugin_load(desc); 249 + if (err) { 250 + return err; 251 + } 252 + QTAILQ_REMOVE(head, desc, entry); 253 + } 254 + return 0; 255 + } 256 + 257 + struct qemu_plugin_reset_data { 258 + struct qemu_plugin_ctx *ctx; 259 + qemu_plugin_simple_cb_t cb; 260 + bool reset; 261 + }; 262 + 263 + static void plugin_reset_destroy__locked(struct qemu_plugin_reset_data *data) 264 + { 265 + struct qemu_plugin_ctx *ctx = data->ctx; 266 + enum qemu_plugin_event ev; 267 + bool success; 268 + 269 + /* 270 + * After updating the subscription lists there is no need to wait for an RCU 271 + * grace period to elapse, because right now we either are in a "safe async" 272 + * work environment (i.e. all vCPUs are asleep), or no vCPUs have yet been 273 + * created. 274 + */ 275 + for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { 276 + plugin_unregister_cb__locked(ctx, ev); 277 + } 278 + 279 + if (data->reset) { 280 + g_assert(ctx->resetting); 281 + if (data->cb) { 282 + data->cb(ctx->id); 283 + } 284 + ctx->resetting = false; 285 + g_free(data); 286 + return; 287 + } 288 + 289 + g_assert(ctx->uninstalling); 290 + /* we cannot dlclose if we are going to return to plugin code */ 291 + if (ctx->installing) { 292 + error_report("Calling qemu_plugin_uninstall from the install function " 293 + "is a bug. Instead, return !0 from the install function."); 294 + abort(); 295 + } 296 + 297 + success = g_hash_table_remove(plugin.id_ht, &ctx->id); 298 + g_assert(success); 299 + QTAILQ_REMOVE(&plugin.ctxs, ctx, entry); 300 + if (data->cb) { 301 + data->cb(ctx->id); 302 + } 303 + if (!g_module_close(ctx->handle)) { 304 + warn_report("%s: %s", __func__, g_module_error()); 305 + } 306 + plugin_desc_free(ctx->desc); 307 + qemu_vfree(ctx); 308 + g_free(data); 309 + } 310 + 311 + static void plugin_reset_destroy(struct qemu_plugin_reset_data *data) 312 + { 313 + qemu_rec_mutex_lock(&plugin.lock); 314 + plugin_reset_destroy__locked(data); 315 + qemu_rec_mutex_lock(&plugin.lock); 316 + } 317 + 318 + static void plugin_flush_destroy(CPUState *cpu, run_on_cpu_data arg) 319 + { 320 + struct qemu_plugin_reset_data *data = arg.host_ptr; 321 + 322 + g_assert(cpu_in_exclusive_context(cpu)); 323 + tb_flush(cpu); 324 + plugin_reset_destroy(data); 325 + } 326 + 327 + void plugin_reset_uninstall(qemu_plugin_id_t id, 328 + qemu_plugin_simple_cb_t cb, 329 + bool reset) 330 + { 331 + struct qemu_plugin_reset_data *data; 332 + struct qemu_plugin_ctx *ctx; 333 + 334 + qemu_rec_mutex_lock(&plugin.lock); 335 + ctx = plugin_id_to_ctx_locked(id); 336 + if (ctx->uninstalling || (reset && ctx->resetting)) { 337 + qemu_rec_mutex_unlock(&plugin.lock); 338 + return; 339 + } 340 + ctx->resetting = reset; 341 + ctx->uninstalling = !reset; 342 + qemu_rec_mutex_unlock(&plugin.lock); 343 + 344 + data = g_new(struct qemu_plugin_reset_data, 1); 345 + data->ctx = ctx; 346 + data->cb = cb; 347 + data->reset = reset; 348 + /* 349 + * Only flush the code cache if the vCPUs have been created. If so, 350 + * current_cpu must be non-NULL. 351 + */ 352 + if (current_cpu) { 353 + async_safe_run_on_cpu(current_cpu, plugin_flush_destroy, 354 + RUN_ON_CPU_HOST_PTR(data)); 355 + } else { 356 + /* 357 + * If current_cpu isn't set, then we don't have yet any vCPU threads 358 + * and we therefore can remove the callbacks synchronously. 359 + */ 360 + plugin_reset_destroy(data); 361 + } 362 + }
+97
plugins/plugin.h
··· 1 + /* 2 + * Plugin Shared Internal Functions 3 + * 4 + * Copyright (C) 2019, Linaro 5 + * 6 + * License: GNU GPL, version 2 or later. 7 + * See the COPYING file in the top-level directory. 8 + * 9 + * SPDX-License-Identifier: GPL-2.0-or-later 10 + */ 11 + 12 + #ifndef _PLUGIN_INTERNAL_H_ 13 + #define _PLUGIN_INTERNAL_H_ 14 + 15 + #include <gmodule.h> 16 + 17 + /* global state */ 18 + struct qemu_plugin_state { 19 + QTAILQ_HEAD(, qemu_plugin_ctx) ctxs; 20 + QLIST_HEAD(, qemu_plugin_cb) cb_lists[QEMU_PLUGIN_EV_MAX]; 21 + /* 22 + * Use the HT as a hash map by inserting k == v, which saves memory as 23 + * documented by GLib. The parent struct is obtained with container_of(). 24 + */ 25 + GHashTable *id_ht; 26 + /* 27 + * Use the HT as a hash map. Note that we could use a list here, 28 + * but with the HT we avoid adding a field to CPUState. 29 + */ 30 + GHashTable *cpu_ht; 31 + DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX); 32 + /* 33 + * @lock protects the struct as well as ctx->uninstalling. 34 + * The lock must be acquired by all API ops. 35 + * The lock is recursive, which greatly simplifies things, e.g. 36 + * callback registration from qemu_plugin_vcpu_for_each(). 37 + */ 38 + QemuRecMutex lock; 39 + /* 40 + * HT of callbacks invoked from helpers. All entries are freed when 41 + * the code cache is flushed. 42 + */ 43 + struct qht dyn_cb_arr_ht; 44 + }; 45 + 46 + 47 + struct qemu_plugin_ctx { 48 + GModule *handle; 49 + qemu_plugin_id_t id; 50 + struct qemu_plugin_cb *callbacks[QEMU_PLUGIN_EV_MAX]; 51 + QTAILQ_ENTRY(qemu_plugin_ctx) entry; 52 + /* 53 + * keep a reference to @desc until uninstall, so that plugins do not have 54 + * to strdup plugin args. 55 + */ 56 + struct qemu_plugin_desc *desc; 57 + bool installing; 58 + bool uninstalling; 59 + bool resetting; 60 + }; 61 + 62 + struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id); 63 + 64 + void plugin_register_inline_op(GArray **arr, 65 + enum qemu_plugin_mem_rw rw, 66 + enum qemu_plugin_op op, void *ptr, 67 + uint64_t imm); 68 + 69 + void plugin_reset_uninstall(qemu_plugin_id_t id, 70 + qemu_plugin_simple_cb_t cb, 71 + bool reset); 72 + 73 + void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, 74 + void *func); 75 + 76 + void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, 77 + enum qemu_plugin_event ev); 78 + 79 + void 80 + plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, 81 + void *func, void *udata); 82 + 83 + void 84 + plugin_register_dyn_cb__udata(GArray **arr, 85 + qemu_plugin_vcpu_udata_cb_t cb, 86 + enum qemu_plugin_cb_flags flags, void *udata); 87 + 88 + 89 + void plugin_register_vcpu_mem_cb(GArray **arr, 90 + void *cb, 91 + enum qemu_plugin_cb_flags flags, 92 + enum qemu_plugin_mem_rw rw, 93 + void *udata); 94 + 95 + void exec_inline_op(struct qemu_plugin_dyn_cb *cb); 96 + 97 + #endif /* _PLUGIN_INTERNAL_H_ */