qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* socket option parsing fix (Daniel)
* SCSI fixes (Fam)
* Readline double-free fix (Greg)
* More HVF attribution fixes (Izik)
* WHPX (Windows Hypervisor Platform Extensions) support (Justin)
* POLLHUP handler (Klim)
* ivshmem fixes (Ladi)
* memfd memory backend (Marc-André)
* improved error message (Marcelo)
* Memory fixes (Peter Xu, Zhecheng)
* Remove obsolete code and comments (Peter M.)
* qdev API improvements (Philippe)
* Add CONFIG_I2C switch (Thomas)

# gpg: Signature made Wed 07 Feb 2018 15:24:08 GMT
# gpg: using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (47 commits)
Add the WHPX acceleration enlightenments
Introduce the WHPX impl
Add the WHPX vcpu API
Add the Windows Hypervisor Platform accelerator.
tests/test-filter-redirector: move close()
tests: use memfd in vhost-user-test
vhost-user-test: make read-guest-mem setup its own qemu
tests: keep compiling failing vhost-user tests
Add memfd based hostmem
memfd: add hugetlbsize argument
memfd: add hugetlb support
memfd: add error argument, instead of perror()
cpus: join thread when removing a vCPU
cpus: hvf: unregister thread with RCU
cpus: tcg: unregister thread with RCU, fix exiting of loop on unplug
cpus: dummy: unregister thread with RCU, exit loop on unplug
cpus: kvm: unregister thread with RCU
cpus: hax: register/unregister thread with RCU, exit loop on unplug
ivshmem: Disable irqfd on device reset
ivshmem: Improve MSI irqfd error handling
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

# Conflicts:
# cpus.c

+2447 -330
+2 -1
.travis.yml
··· 13 13 - libattr1-dev 14 14 - libbrlapi-dev 15 15 - libcap-ng-dev 16 + - libgcc-6-dev 16 17 - libgnutls-dev 17 18 - libgtk-3-dev 18 19 - libiscsi-dev 19 20 - liblttng-ust-dev 20 - - libnfs-dev 21 21 - libncurses5-dev 22 + - libnfs-dev 22 23 - libnss3-dev 23 24 - libpixman-1-dev 24 25 - libpng12-dev
+5 -1
accel/kvm/kvm-all.c
··· 235 235 { 236 236 KVMState *s = kvm_state; 237 237 struct kvm_userspace_memory_region mem; 238 + int ret; 238 239 239 240 mem.slot = slot->slot | (kml->as_id << 16); 240 241 mem.guest_phys_addr = slot->start_addr; ··· 248 249 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 249 250 } 250 251 mem.memory_size = slot->memory_size; 251 - return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 252 + ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 253 + trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr, 254 + mem.memory_size, mem.userspace_addr, ret); 255 + return ret; 252 256 } 253 257 254 258 int kvm_destroy_vcpu(CPUState *cpu)
+1
accel/kvm/trace-events
··· 12 12 kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d" 13 13 kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d" 14 14 kvm_irqchip_release_virq(int virq) "virq %d" 15 + kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d" 15 16
+5 -4
accel/stubs/Makefile.objs
··· 1 - obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o 2 - obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o 3 - obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o 4 - obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o 1 + obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o 2 + obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o 3 + obj-$(call lnot,$(CONFIG_WHPX)) += whpx-stub.o 4 + obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o 5 + obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
+48
accel/stubs/whpx-stub.c
··· 1 + /* 2 + * QEMU Windows Hypervisor Platform accelerator (WHPX) stub 3 + * 4 + * Copyright Microsoft Corp. 2017 5 + * 6 + * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 + * See the COPYING file in the top-level directory. 8 + * 9 + */ 10 + 11 + #include "qemu/osdep.h" 12 + #include "qemu-common.h" 13 + #include "cpu.h" 14 + #include "sysemu/whpx.h" 15 + 16 + int whpx_init_vcpu(CPUState *cpu) 17 + { 18 + return -1; 19 + } 20 + 21 + int whpx_vcpu_exec(CPUState *cpu) 22 + { 23 + return -1; 24 + } 25 + 26 + void whpx_destroy_vcpu(CPUState *cpu) 27 + { 28 + } 29 + 30 + void whpx_vcpu_kick(CPUState *cpu) 31 + { 32 + } 33 + 34 + void whpx_cpu_synchronize_state(CPUState *cpu) 35 + { 36 + } 37 + 38 + void whpx_cpu_synchronize_post_reset(CPUState *cpu) 39 + { 40 + } 41 + 42 + void whpx_cpu_synchronize_post_init(CPUState *cpu) 43 + { 44 + } 45 + 46 + void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu) 47 + { 48 + }
-33
accel/tcg/user-exec.c
··· 503 503 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 504 504 } 505 505 506 - #elif defined(__ia64) 507 - 508 - #ifndef __ISR_VALID 509 - /* This ought to be in <bits/siginfo.h>... */ 510 - # define __ISR_VALID 1 511 - #endif 512 - 513 - int cpu_signal_handler(int host_signum, void *pinfo, void *puc) 514 - { 515 - siginfo_t *info = pinfo; 516 - ucontext_t *uc = puc; 517 - unsigned long ip; 518 - int is_write = 0; 519 - 520 - ip = uc->uc_mcontext.sc_ip; 521 - switch (host_signum) { 522 - case SIGILL: 523 - case SIGFPE: 524 - case SIGSEGV: 525 - case SIGBUS: 526 - case SIGTRAP: 527 - if (info->si_code && (info->si_segvflags & __ISR_VALID)) { 528 - /* ISR.W (write-access) is bit 33: */ 529 - is_write = (info->si_isr >> 33) & 1; 530 - } 531 - break; 532 - 533 - default: 534 - break; 535 - } 536 - return handle_cpu_signal(ip, info, is_write, (sigset_t *)&uc->uc_sigmask); 537 - } 538 - 539 506 #elif defined(__s390__) 540 507 541 508 int cpu_signal_handler(int host_signum, void *pinfo,
+2
backends/Makefile.objs
··· 8 8 9 9 common-obj-y += cryptodev.o 10 10 common-obj-y += cryptodev-builtin.o 11 + 12 + common-obj-$(CONFIG_LINUX) += hostmem-memfd.o
+170
backends/hostmem-memfd.c
··· 1 + /* 2 + * QEMU host memfd memory backend 3 + * 4 + * Copyright (C) 2018 Red Hat Inc 5 + * 6 + * Authors: 7 + * Marc-André Lureau <marcandre.lureau@redhat.com> 8 + * 9 + * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 + * See the COPYING file in the top-level directory. 11 + */ 12 + #include "qemu/osdep.h" 13 + #include "qemu-common.h" 14 + #include "sysemu/hostmem.h" 15 + #include "sysemu/sysemu.h" 16 + #include "qom/object_interfaces.h" 17 + #include "qemu/memfd.h" 18 + #include "qapi/error.h" 19 + 20 + #define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd" 21 + 22 + #define MEMORY_BACKEND_MEMFD(obj) \ 23 + OBJECT_CHECK(HostMemoryBackendMemfd, (obj), TYPE_MEMORY_BACKEND_MEMFD) 24 + 25 + typedef struct HostMemoryBackendMemfd HostMemoryBackendMemfd; 26 + 27 + struct HostMemoryBackendMemfd { 28 + HostMemoryBackend parent_obj; 29 + 30 + bool hugetlb; 31 + uint64_t hugetlbsize; 32 + bool seal; 33 + }; 34 + 35 + static void 36 + memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) 37 + { 38 + HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); 39 + char *name; 40 + int fd; 41 + 42 + if (!backend->size) { 43 + error_setg(errp, "can't create backend with size 0"); 44 + return; 45 + } 46 + 47 + if (host_memory_backend_mr_inited(backend)) { 48 + return; 49 + } 50 + 51 + backend->force_prealloc = mem_prealloc; 52 + fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, 53 + m->hugetlb, m->hugetlbsize, m->seal ? 54 + F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, 55 + errp); 56 + if (fd == -1) { 57 + return; 58 + } 59 + 60 + name = object_get_canonical_path(OBJECT(backend)); 61 + memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), 62 + name, backend->size, true, fd, errp); 63 + g_free(name); 64 + } 65 + 66 + static bool 67 + memfd_backend_get_hugetlb(Object *o, Error **errp) 68 + { 69 + return MEMORY_BACKEND_MEMFD(o)->hugetlb; 70 + } 71 + 72 + static void 73 + memfd_backend_set_hugetlb(Object *o, bool value, Error **errp) 74 + { 75 + MEMORY_BACKEND_MEMFD(o)->hugetlb = value; 76 + } 77 + 78 + static void 79 + memfd_backend_set_hugetlbsize(Object *obj, Visitor *v, const char *name, 80 + void *opaque, Error **errp) 81 + { 82 + HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj); 83 + Error *local_err = NULL; 84 + uint64_t value; 85 + 86 + if (host_memory_backend_mr_inited(MEMORY_BACKEND(obj))) { 87 + error_setg(&local_err, "cannot change property value"); 88 + goto out; 89 + } 90 + 91 + visit_type_size(v, name, &value, &local_err); 92 + if (local_err) { 93 + goto out; 94 + } 95 + if (!value) { 96 + error_setg(&local_err, "Property '%s.%s' doesn't take value '%" 97 + PRIu64 "'", object_get_typename(obj), name, value); 98 + goto out; 99 + } 100 + m->hugetlbsize = value; 101 + out: 102 + error_propagate(errp, local_err); 103 + } 104 + 105 + static void 106 + memfd_backend_get_hugetlbsize(Object *obj, Visitor *v, const char *name, 107 + void *opaque, Error **errp) 108 + { 109 + HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj); 110 + uint64_t value = m->hugetlbsize; 111 + 112 + visit_type_size(v, name, &value, errp); 113 + } 114 + 115 + static bool 116 + memfd_backend_get_seal(Object *o, Error **errp) 117 + { 118 + return MEMORY_BACKEND_MEMFD(o)->seal; 119 + } 120 + 121 + static void 122 + memfd_backend_set_seal(Object *o, bool value, Error **errp) 123 + { 124 + MEMORY_BACKEND_MEMFD(o)->seal = value; 125 + } 126 + 127 + static void 128 + memfd_backend_instance_init(Object *obj) 129 + { 130 + HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(obj); 131 + 132 + /* default to sealed file */ 133 + m->seal = true; 134 + } 135 + 136 + static void 137 + memfd_backend_class_init(ObjectClass *oc, void *data) 138 + { 139 + HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); 140 + 141 + bc->alloc = memfd_backend_memory_alloc; 142 + 143 + object_class_property_add_bool(oc, "hugetlb", 144 + memfd_backend_get_hugetlb, 145 + memfd_backend_set_hugetlb, 146 + &error_abort); 147 + object_class_property_add(oc, "hugetlbsize", "int", 148 + memfd_backend_get_hugetlbsize, 149 + memfd_backend_set_hugetlbsize, 150 + NULL, NULL, &error_abort); 151 + object_class_property_add_bool(oc, "seal", 152 + memfd_backend_get_seal, 153 + memfd_backend_set_seal, 154 + &error_abort); 155 + } 156 + 157 + static const TypeInfo memfd_backend_info = { 158 + .name = TYPE_MEMORY_BACKEND_MEMFD, 159 + .parent = TYPE_MEMORY_BACKEND, 160 + .instance_init = memfd_backend_instance_init, 161 + .class_init = memfd_backend_class_init, 162 + .instance_size = sizeof(HostMemoryBackendMemfd), 163 + }; 164 + 165 + static void register_types(void) 166 + { 167 + type_register_static(&memfd_backend_info); 168 + } 169 + 170 + type_init(register_types);
+21 -17
chardev/char-pty.c
··· 51 51 static void pty_chr_update_read_handler_locked(Chardev *chr); 52 52 static void pty_chr_state(Chardev *chr, int connected); 53 53 54 + static void pty_chr_timer_cancel(PtyChardev *s) 55 + { 56 + if (s->timer_src) { 57 + g_source_destroy(s->timer_src); 58 + g_source_unref(s->timer_src); 59 + s->timer_src = NULL; 60 + } 61 + } 62 + 63 + static void pty_chr_open_src_cancel(PtyChardev *s) 64 + { 65 + if (s->open_source) { 66 + g_source_destroy(s->open_source); 67 + g_source_unref(s->open_source); 68 + s->open_source = NULL; 69 + } 70 + } 71 + 54 72 static gboolean pty_chr_timer(gpointer opaque) 55 73 { 56 74 struct Chardev *chr = CHARDEV(opaque); 57 75 PtyChardev *s = PTY_CHARDEV(opaque); 58 76 59 77 qemu_mutex_lock(&chr->chr_write_lock); 60 - s->timer_src = NULL; 61 - g_source_unref(s->open_source); 62 - s->open_source = NULL; 78 + pty_chr_timer_cancel(s); 79 + pty_chr_open_src_cancel(s); 63 80 if (!s->connected) { 64 81 /* Next poll ... */ 65 82 pty_chr_update_read_handler_locked(chr); 66 83 } 67 84 qemu_mutex_unlock(&chr->chr_write_lock); 68 85 return FALSE; 69 - } 70 - 71 - static void pty_chr_timer_cancel(PtyChardev *s) 72 - { 73 - if (s->timer_src) { 74 - g_source_destroy(s->timer_src); 75 - g_source_unref(s->timer_src); 76 - s->timer_src = NULL; 77 - } 78 86 } 79 87 80 88 /* Called with chr_write_lock held. */ ··· 195 203 PtyChardev *s = PTY_CHARDEV(chr); 196 204 197 205 if (!connected) { 198 - if (s->open_source) { 199 - g_source_destroy(s->open_source); 200 - g_source_unref(s->open_source); 201 - s->open_source = NULL; 202 - } 206 + pty_chr_open_src_cancel(s); 203 207 remove_fd_in_watch(chr); 204 208 s->connected = 0; 205 209 /* (re-)connect poll interval for idle guests: once per second.
+22
chardev/char-socket.c
··· 42 42 QIOChannel *ioc; /* Client I/O channel */ 43 43 QIOChannelSocket *sioc; /* Client master channel */ 44 44 QIONetListener *listener; 45 + GSource *hup_source; 45 46 QCryptoTLSCreds *tls_creds; 46 47 int connected; 47 48 int max_size; ··· 352 353 s->read_msgfds_num = 0; 353 354 } 354 355 356 + if (s->hup_source != NULL) { 357 + g_source_destroy(s->hup_source); 358 + g_source_unref(s->hup_source); 359 + s->hup_source = NULL; 360 + } 361 + 355 362 tcp_set_msgfds(chr, NULL, 0); 356 363 remove_fd_in_watch(chr); 357 364 object_unref(OBJECT(s->sioc)); ··· 455 462 return TRUE; 456 463 } 457 464 465 + static gboolean tcp_chr_hup(QIOChannel *channel, 466 + GIOCondition cond, 467 + void *opaque) 468 + { 469 + Chardev *chr = CHARDEV(opaque); 470 + tcp_chr_disconnect(chr); 471 + return G_SOURCE_REMOVE; 472 + } 473 + 458 474 static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) 459 475 { 460 476 SocketChardev *s = SOCKET_CHARDEV(chr); ··· 528 544 tcp_chr_read, 529 545 chr, chr->gcontext); 530 546 } 547 + 548 + s->hup_source = qio_channel_create_watch(s->ioc, G_IO_HUP); 549 + g_source_set_callback(s->hup_source, (GSourceFunc)tcp_chr_hup, 550 + chr, NULL); 551 + g_source_attach(s->hup_source, chr->gcontext); 552 + 531 553 qemu_chr_be_event(chr, CHR_EVENT_OPENED); 532 554 } 533 555
+107 -6
configure
··· 222 222 return 1 223 223 } 224 224 225 + supported_whpx_target() { 226 + test "$whpx" = "yes" || return 1 227 + glob "$1" "*-softmmu" || return 1 228 + case "${1%-softmmu}" in 229 + i386|x86_64) 230 + return 0 231 + ;; 232 + esac 233 + return 1 234 + } 235 + 225 236 supported_target() { 226 237 case "$1" in 227 238 *-softmmu) ··· 248 259 supported_xen_target "$1" && return 0 249 260 supported_hax_target "$1" && return 0 250 261 supported_hvf_target "$1" && return 0 262 + supported_whpx_target "$1" && return 0 251 263 print_error "TCG disabled, but hardware accelerator not available for '$target'" 252 264 return 1 253 265 } ··· 338 350 kvm="no" 339 351 hax="no" 340 352 hvf="no" 353 + whpx="no" 341 354 rdma="" 342 355 gprof="no" 343 356 debug_tcg="no" 344 357 debug="no" 358 + sanitizers="no" 345 359 fortify_source="" 346 360 strip_opt="yes" 347 361 tcg_interpreter="no" ··· 636 650 fi 637 651 elif check_define __mips__ ; then 638 652 cpu="mips" 639 - elif check_define __ia64__ ; then 640 - cpu="ia64" 641 653 elif check_define __s390__ ; then 642 654 if check_define __s390x__ ; then 643 655 cpu="s390x" ··· 995 1007 strip_opt="no" 996 1008 fortify_source="no" 997 1009 ;; 1010 + --enable-sanitizers) sanitizers="yes" 1011 + ;; 1012 + --disable-sanitizers) sanitizers="no" 1013 + ;; 998 1014 --enable-sparse) sparse="yes" 999 1015 ;; 1000 1016 --disable-sparse) sparse="no" ··· 1055 1071 ;; 1056 1072 --enable-hvf) hvf="yes" 1057 1073 ;; 1074 + --disable-whpx) whpx="no" 1075 + ;; 1076 + --enable-whpx) whpx="yes" 1077 + ;; 1058 1078 --disable-tcg-interpreter) tcg_interpreter="no" 1059 1079 ;; 1060 1080 --enable-tcg-interpreter) tcg_interpreter="yes" ··· 1476 1496 --firmwarepath=PATH search PATH for firmware files 1477 1497 --with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix] 1478 1498 --enable-debug enable common debug build options 1499 + --enable-sanitizers enable default sanitizers 1479 1500 --disable-strip disable stripping binaries 1480 1501 --disable-werror disable compilation abort on warning 1481 1502 --disable-stack-protector disable compiler-provided stack protection ··· 1553 1574 kvm KVM acceleration support 1554 1575 hax HAX acceleration support 1555 1576 hvf Hypervisor.framework acceleration support 1577 + whpx Windows Hypervisor Platform acceleration support 1556 1578 rdma RDMA-based migration support 1557 1579 vde support for vde network 1558 1580 netmap support for netmap network ··· 2451 2473 fi 2452 2474 2453 2475 ########################################## 2476 + # Windows Hypervisor Platform accelerator (WHPX) check 2477 + if test "$whpx" != "no" ; then 2478 + cat > $TMPC << EOF 2479 + #include <windows.h> 2480 + #include <winhvplatform.h> 2481 + #include <winhvemulation.h> 2482 + int main(void) { 2483 + WHV_CAPABILITY whpx_cap; 2484 + WHvGetCapability(WHvCapabilityCodeFeatures, &whpx_cap, sizeof(whpx_cap)); 2485 + return 0; 2486 + } 2487 + EOF 2488 + if compile_prog "" "-lwinhvplatform -lwinhvemulation" ; then 2489 + libs_softmmu="$libs_softmmu -lwinhvplatform -lwinhvemulation" 2490 + whpx="yes" 2491 + else 2492 + if test "$whpx" = "yes"; then 2493 + feature_not_found "winhvplatform" "winhvemulation is not installed" 2494 + fi 2495 + whpx="no" 2496 + fi 2497 + fi 2498 + 2499 + ########################################## 2454 2500 # Sparse probe 2455 2501 if test "$sparse" != "no" ; then 2456 2502 if has cgcc; then ··· 4743 4789 fi 4744 4790 4745 4791 if test "$debug_stack_usage" = "yes"; then 4746 - if test "$cpu" = "ia64" -o "$cpu" = "hppa"; then 4747 - error_exit "stack usage debugging is not supported for $cpu" 4748 - fi 4749 4792 if test "$coroutine_pool" = "yes"; then 4750 4793 echo "WARN: disabling coroutine pool for stack usage debugging" 4751 4794 coroutine_pool=no ··· 5205 5248 fi 5206 5249 5207 5250 ########################################## 5251 + # checks for sanitizers 5252 + 5253 + write_c_skeleton 5254 + 5255 + have_asan=no 5256 + have_ubsan=no 5257 + have_asan_iface_h=no 5258 + have_asan_iface_fiber=no 5259 + 5260 + if test "$sanitizers" = "yes" ; then 5261 + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" ""; then 5262 + have_asan=yes 5263 + fi 5264 + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then 5265 + have_ubsan=yes 5266 + fi 5267 + 5268 + if check_include "sanitizer/asan_interface.h" ; then 5269 + have_asan_iface_h=yes 5270 + fi 5271 + 5272 + cat > $TMPC << EOF 5273 + #include <sanitizer/asan_interface.h> 5274 + int main(void) { 5275 + __sanitizer_start_switch_fiber(0, 0, 0); 5276 + return 0; 5277 + } 5278 + EOF 5279 + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" "" ; then 5280 + have_asan_iface_fiber=yes 5281 + fi 5282 + fi 5283 + 5284 + ########################################## 5208 5285 # End of CC checks 5209 5286 # After here, no more $cc or $ld runs 5287 + 5288 + write_c_skeleton 5210 5289 5211 5290 if test "$gcov" = "yes" ; then 5212 5291 CFLAGS="-fprofile-arcs -ftest-coverage -g $CFLAGS" ··· 5228 5307 CFLAGS="-O2 $CFLAGS" 5229 5308 fi 5230 5309 5310 + if test "$have_asan" = "yes"; then 5311 + CFLAGS="-fsanitize=address $CFLAGS" 5312 + if test "$have_asan_iface_h" = "no" ; then 5313 + echo "ASAN build enabled, but ASAN header missing." \ 5314 + "Without code annotation, the report may be inferior." 5315 + elif test "$have_asan_iface_fiber" = "no" ; then 5316 + echo "ASAN build enabled, but ASAN header is too old." \ 5317 + "Without code annotation, the report may be inferior." 5318 + fi 5319 + fi 5320 + if test "$have_ubsan" = "yes"; then 5321 + CFLAGS="-fsanitize=undefined $CFLAGS" 5322 + fi 5323 + 5231 5324 ########################################## 5232 5325 # Do we have libnfs 5233 5326 if test "$libnfs" != "no" ; then ··· 5596 5689 echo "KVM support $kvm" 5597 5690 echo "HAX support $hax" 5598 5691 echo "HVF support $hvf" 5692 + echo "WHPX support $whpx" 5599 5693 echo "TCG support $tcg" 5600 5694 if test "$tcg" = "yes" ; then 5601 5695 echo "TCG debug enabled $debug_tcg" ··· 5763 5857 echo "CONFIG_QGA_NTDDDISK=y" >> $config_host_mak 5764 5858 fi 5765 5859 if test "$guest_agent_msi" = "yes"; then 5766 - echo "QEMU_GA_MSI_ENABLED=yes" >> $config_host_mak 5860 + echo "QEMU_GA_MSI_ENABLED=yes" >> $config_host_mak 5767 5861 echo "QEMU_GA_MSI_MINGW_DLL_PATH=${QEMU_GA_MSI_MINGW_DLL_PATH}" >> $config_host_mak 5768 5862 echo "QEMU_GA_MSI_WITH_VSS=${QEMU_GA_MSI_WITH_VSS}" >> $config_host_mak 5769 5863 echo "QEMU_GA_MSI_ARCH=${QEMU_GA_MSI_ARCH}" >> $config_host_mak ··· 6211 6305 echo "CONFIG_VALGRIND_H=y" >> $config_host_mak 6212 6306 fi 6213 6307 6308 + if test "$have_asan_iface_fiber" = "yes" ; then 6309 + echo "CONFIG_ASAN_IFACE_FIBER=y" >> $config_host_mak 6310 + fi 6311 + 6214 6312 if test "$has_environ" = "yes" ; then 6215 6313 echo "CONFIG_HAS_ENVIRON=y" >> $config_host_mak 6216 6314 fi ··· 6691 6789 fi 6692 6790 if supported_hvf_target $target; then 6693 6791 echo "CONFIG_HVF=y" >> $config_target_mak 6792 + fi 6793 + if supported_whpx_target $target; then 6794 + echo "CONFIG_WHPX=y" >> $config_target_mak 6694 6795 fi 6695 6796 if test "$target_bigendian" = "yes" ; then 6696 6797 echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
+90 -23
cpus.c
··· 38 38 #include "sysemu/kvm.h" 39 39 #include "sysemu/hax.h" 40 40 #include "sysemu/hvf.h" 41 + #include "sysemu/whpx.h" 41 42 #include "qmp-commands.h" 42 43 #include "exec/exec-all.h" 43 44 ··· 1205 1206 cpu->created = false; 1206 1207 qemu_cond_signal(&qemu_cpu_cond); 1207 1208 qemu_mutex_unlock_iothread(); 1209 + rcu_unregister_thread(); 1208 1210 return NULL; 1209 1211 } 1210 1212 ··· 1233 1235 cpu->created = true; 1234 1236 qemu_cond_signal(&qemu_cpu_cond); 1235 1237 1236 - while (1) { 1238 + do { 1237 1239 qemu_mutex_unlock_iothread(); 1238 1240 do { 1239 1241 int sig; ··· 1245 1247 } 1246 1248 qemu_mutex_lock_iothread(); 1247 1249 qemu_wait_io_event(cpu); 1248 - } 1250 + } while (!cpu->unplug); 1249 1251 1252 + rcu_unregister_thread(); 1250 1253 return NULL; 1251 1254 #endif 1252 1255 } ··· 1465 1468 deal_with_unplugged_cpus(); 1466 1469 } 1467 1470 1471 + rcu_unregister_thread(); 1468 1472 return NULL; 1469 1473 } 1470 1474 ··· 1473 1477 CPUState *cpu = arg; 1474 1478 int r; 1475 1479 1480 + rcu_register_thread(); 1476 1481 qemu_mutex_lock_iothread(); 1477 1482 qemu_thread_get_self(cpu->thread); 1478 1483 ··· 1484 1489 hax_init_vcpu(cpu); 1485 1490 qemu_cond_signal(&qemu_cpu_cond); 1486 1491 1487 - while (1) { 1492 + do { 1488 1493 if (cpu_can_run(cpu)) { 1489 1494 r = hax_smp_cpu_exec(cpu); 1490 1495 if (r == EXCP_DEBUG) { ··· 1493 1498 } 1494 1499 1495 1500 qemu_wait_io_event(cpu); 1496 - } 1501 + } while (!cpu->unplug || cpu_can_run(cpu)); 1502 + rcu_unregister_thread(); 1497 1503 return NULL; 1498 1504 } 1499 1505 ··· 1536 1542 cpu->created = false; 1537 1543 qemu_cond_signal(&qemu_cpu_cond); 1538 1544 qemu_mutex_unlock_iothread(); 1545 + rcu_unregister_thread(); 1546 + return NULL; 1547 + } 1548 + 1549 + static void *qemu_whpx_cpu_thread_fn(void *arg) 1550 + { 1551 + CPUState *cpu = arg; 1552 + int r; 1553 + 1554 + rcu_register_thread(); 1555 + 1556 + qemu_mutex_lock_iothread(); 1557 + qemu_thread_get_self(cpu->thread); 1558 + cpu->thread_id = qemu_get_thread_id(); 1559 + current_cpu = cpu; 1560 + 1561 + r = whpx_init_vcpu(cpu); 1562 + if (r < 0) { 1563 + fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r)); 1564 + exit(1); 1565 + } 1566 + 1567 + /* signal CPU creation */ 1568 + cpu->created = true; 1569 + qemu_cond_signal(&qemu_cpu_cond); 1570 + 1571 + do { 1572 + if (cpu_can_run(cpu)) { 1573 + r = whpx_vcpu_exec(cpu); 1574 + if (r == EXCP_DEBUG) { 1575 + cpu_handle_guest_debug(cpu); 1576 + } 1577 + } 1578 + while (cpu_thread_is_idle(cpu)) { 1579 + qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); 1580 + } 1581 + qemu_wait_io_event_common(cpu); 1582 + } while (!cpu->unplug || cpu_can_run(cpu)); 1583 + 1584 + whpx_destroy_vcpu(cpu); 1585 + cpu->created = false; 1586 + qemu_cond_signal(&qemu_cpu_cond); 1587 + qemu_mutex_unlock_iothread(); 1588 + rcu_unregister_thread(); 1539 1589 return NULL; 1540 1590 } 1541 1591 ··· 1599 1649 /* Ignore everything else? */ 1600 1650 break; 1601 1651 } 1602 - } else if (cpu->unplug) { 1603 - qemu_tcg_destroy_vcpu(cpu); 1604 - cpu->created = false; 1605 - qemu_cond_signal(&qemu_cpu_cond); 1606 - qemu_mutex_unlock_iothread(); 1607 - return NULL; 1608 1652 } 1609 1653 1610 1654 atomic_mb_set(&cpu->exit_request, 0); 1611 1655 qemu_wait_io_event(cpu); 1612 - } 1656 + } while (!cpu->unplug || cpu_can_run(cpu)); 1613 1657 1658 + qemu_tcg_destroy_vcpu(cpu); 1659 + cpu->created = false; 1660 + qemu_cond_signal(&qemu_cpu_cond); 1661 + qemu_mutex_unlock_iothread(); 1662 + rcu_unregister_thread(); 1614 1663 return NULL; 1615 1664 } 1616 1665 ··· 1630 1679 } 1631 1680 #else /* _WIN32 */ 1632 1681 if (!qemu_cpu_is_self(cpu)) { 1633 - if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { 1634 - error_report("%s: QueueUserAPC failed with error %lu", __func__, 1635 - GetLastError()); 1682 + if (whpx_enabled()) { 1683 + whpx_vcpu_kick(cpu); 1684 + } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { 1685 + fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", 1686 + __func__, GetLastError()); 1636 1687 exit(1); 1637 1688 } 1638 1689 } ··· 1747 1798 } 1748 1799 } 1749 1800 1750 - void cpu_remove(CPUState *cpu) 1801 + void cpu_remove_sync(CPUState *cpu) 1751 1802 { 1752 1803 cpu->stop = true; 1753 1804 cpu->unplug = true; 1754 1805 qemu_cpu_kick(cpu); 1755 - } 1756 - 1757 - void cpu_remove_sync(CPUState *cpu) 1758 - { 1759 - cpu_remove(cpu); 1760 - while (cpu->created) { 1761 - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); 1762 - } 1806 + qemu_mutex_unlock_iothread(); 1807 + qemu_thread_join(cpu->thread); 1808 + qemu_mutex_lock_iothread(); 1763 1809 } 1764 1810 1765 1811 /* For temporary buffers for forming a name */ ··· 1877 1923 } 1878 1924 } 1879 1925 1926 + static void qemu_whpx_start_vcpu(CPUState *cpu) 1927 + { 1928 + char thread_name[VCPU_THREAD_NAME_SIZE]; 1929 + 1930 + cpu->thread = g_malloc0(sizeof(QemuThread)); 1931 + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); 1932 + qemu_cond_init(cpu->halt_cond); 1933 + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", 1934 + cpu->cpu_index); 1935 + qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn, 1936 + cpu, QEMU_THREAD_JOINABLE); 1937 + #ifdef _WIN32 1938 + cpu->hThread = qemu_thread_get_handle(cpu->thread); 1939 + #endif 1940 + while (!cpu->created) { 1941 + qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); 1942 + } 1943 + } 1944 + 1880 1945 static void qemu_dummy_start_vcpu(CPUState *cpu) 1881 1946 { 1882 1947 char thread_name[VCPU_THREAD_NAME_SIZE]; ··· 1915 1980 qemu_hvf_start_vcpu(cpu); 1916 1981 } else if (tcg_enabled()) { 1917 1982 qemu_tcg_init_vcpu(cpu); 1983 + } else if (whpx_enabled()) { 1984 + qemu_whpx_start_vcpu(cpu); 1918 1985 } else { 1919 1986 qemu_dummy_start_vcpu(cpu); 1920 1987 }
+1
default-configs/arm-softmmu.mak
··· 67 67 CONFIG_XGMAC=y 68 68 CONFIG_EXYNOS4=y 69 69 CONFIG_PXA2XX=y 70 + CONFIG_I2C=y 70 71 CONFIG_BITBANG_I2C=y 71 72 CONFIG_FRAMEBUFFER=y 72 73 CONFIG_XILINX_SPIPS=y
+1
default-configs/i386-softmmu.mak
··· 62 62 CONFIG_PXB=y 63 63 CONFIG_ACPI_VMGENID=y 64 64 CONFIG_FW_CFG_DMA=y 65 + CONFIG_I2C=y
+1
default-configs/mips-softmmu-common.mak
··· 34 34 CONFIG_EMPTY_SLOT=y 35 35 CONFIG_MIPS_CPS=y 36 36 CONFIG_MIPS_ITU=y 37 + CONFIG_I2C=y
+1
default-configs/ppc-softmmu.mak
··· 23 23 CONFIG_ETSEC=y 24 24 CONFIG_SM501=y 25 25 CONFIG_IDE_SII3112=y 26 + CONFIG_I2C=y 26 27 27 28 # For Macs 28 29 CONFIG_MAC=y
+1
default-configs/ppcemb-softmmu.mak
··· 17 17 CONFIG_XILINX_ETHLITE=y 18 18 CONFIG_SM501=y 19 19 CONFIG_IDE_SII3112=y 20 + CONFIG_I2C=y
+1
default-configs/x86_64-softmmu.mak
··· 62 62 CONFIG_PXB=y 63 63 CONFIG_ACPI_VMGENID=y 64 64 CONFIG_FW_CFG_DMA=y 65 + CONFIG_I2C=y
+24
hw/core/qdev.c
··· 1075 1075 dc->user_creatable = true; 1076 1076 } 1077 1077 1078 + void device_class_set_parent_reset(DeviceClass *dc, 1079 + DeviceReset dev_reset, 1080 + DeviceReset *parent_reset) 1081 + { 1082 + *parent_reset = dc->reset; 1083 + dc->reset = dev_reset; 1084 + } 1085 + 1086 + void device_class_set_parent_realize(DeviceClass *dc, 1087 + DeviceRealize dev_realize, 1088 + DeviceRealize *parent_realize) 1089 + { 1090 + *parent_realize = dc->realize; 1091 + dc->realize = dev_realize; 1092 + } 1093 + 1094 + void device_class_set_parent_unrealize(DeviceClass *dc, 1095 + DeviceUnrealize dev_unrealize, 1096 + DeviceUnrealize *parent_unrealize) 1097 + { 1098 + *parent_unrealize = dc->unrealize; 1099 + dc->unrealize = dev_unrealize; 1100 + } 1101 + 1078 1102 void device_reset(DeviceState *dev) 1079 1103 { 1080 1104 DeviceClass *klass = DEVICE_GET_CLASS(dev);
+1 -1
hw/display/exynos4210_fimd.c
··· 98 98 #define FIMD_WINCON_BUFSTATUS ((1 << 21) | (1 << 31)) 99 99 #define FIMD_WINCON_BUF0_STAT ((0 << 21) | (0 << 31)) 100 100 #define FIMD_WINCON_BUF1_STAT ((1 << 21) | (0 << 31)) 101 - #define FIMD_WINCON_BUF2_STAT ((0 << 21) | (1 << 31)) 101 + #define FIMD_WINCON_BUF2_STAT ((0 << 21) | (1U << 31)) 102 102 #define FIMD_WINCON_BUFSELECT ((1 << 20) | (1 << 30)) 103 103 #define FIMD_WINCON_BUF0_SEL ((0 << 20) | (0 << 30)) 104 104 #define FIMD_WINCON_BUF1_SEL ((1 << 20) | (0 << 30))
+1 -1
hw/i2c/Makefile.objs
··· 1 - common-obj-y += core.o smbus.o smbus_eeprom.o 1 + common-obj-$(CONFIG_I2C) += core.o smbus.o smbus_eeprom.o 2 2 common-obj-$(CONFIG_DDC) += i2c-ddc.o 3 3 common-obj-$(CONFIG_VERSATILE_I2C) += versatile_i2c.o 4 4 common-obj-$(CONFIG_ACPI_X86) += smbus_ich9.o
+2 -2
hw/i386/kvm/i8254.c
··· 315 315 PITCommonClass *k = PIT_COMMON_CLASS(klass); 316 316 DeviceClass *dc = DEVICE_CLASS(klass); 317 317 318 - kpc->parent_realize = dc->realize; 319 - dc->realize = kvm_pit_realizefn; 318 + device_class_set_parent_realize(dc, kvm_pit_realizefn, 319 + &kpc->parent_realize); 320 320 k->set_channel_gate = kvm_pit_set_gate; 321 321 k->get_channel_info = kvm_pit_get_channel_info; 322 322 dc->reset = kvm_pit_reset;
+1 -2
hw/i386/kvm/i8259.c
··· 142 142 DeviceClass *dc = DEVICE_CLASS(klass); 143 143 144 144 dc->reset = kvm_pic_reset; 145 - kpc->parent_realize = dc->realize; 146 - dc->realize = kvm_pic_realize; 145 + device_class_set_parent_realize(dc, kvm_pic_realize, &kpc->parent_realize); 147 146 k->pre_save = kvm_pic_get; 148 147 k->post_load = kvm_pic_put; 149 148 }
+2 -2
hw/input/adb-kbd.c
··· 374 374 ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); 375 375 ADBKeyboardClass *akc = ADB_KEYBOARD_CLASS(oc); 376 376 377 - akc->parent_realize = dc->realize; 378 - dc->realize = adb_kbd_realizefn; 377 + device_class_set_parent_realize(dc, adb_kbd_realizefn, 378 + &akc->parent_realize); 379 379 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 380 380 381 381 adc->devreq = adb_kbd_request;
+2 -2
hw/input/adb-mouse.c
··· 228 228 ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); 229 229 ADBMouseClass *amc = ADB_MOUSE_CLASS(oc); 230 230 231 - amc->parent_realize = dc->realize; 232 - dc->realize = adb_mouse_realizefn; 231 + device_class_set_parent_realize(dc, adb_mouse_realizefn, 232 + &amc->parent_realize); 233 233 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 234 234 235 235 adc->devreq = adb_mouse_request;
+1 -2
hw/intc/arm_gic.c
··· 1461 1461 DeviceClass *dc = DEVICE_CLASS(klass); 1462 1462 ARMGICClass *agc = ARM_GIC_CLASS(klass); 1463 1463 1464 - agc->parent_realize = dc->realize; 1465 - dc->realize = arm_gic_realize; 1464 + device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); 1466 1465 } 1467 1466 1468 1467 static const TypeInfo arm_gic_info = {
+3 -4
hw/intc/arm_gic_kvm.c
··· 591 591 592 592 agcc->pre_save = kvm_arm_gic_get; 593 593 agcc->post_load = kvm_arm_gic_put; 594 - kgc->parent_realize = dc->realize; 595 - kgc->parent_reset = dc->reset; 596 - dc->realize = kvm_arm_gic_realize; 597 - dc->reset = kvm_arm_gic_reset; 594 + device_class_set_parent_realize(dc, kvm_arm_gic_realize, 595 + &kgc->parent_realize); 596 + device_class_set_parent_reset(dc, kvm_arm_gic_reset, &kgc->parent_reset); 598 597 } 599 598 600 599 static const TypeInfo kvm_arm_gic_info = {
+1 -2
hw/intc/arm_gicv3.c
··· 385 385 ARMGICv3Class *agc = ARM_GICV3_CLASS(klass); 386 386 387 387 agcc->post_load = arm_gicv3_post_load; 388 - agc->parent_realize = dc->realize; 389 - dc->realize = arm_gic_realize; 388 + device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); 390 389 } 391 390 392 391 static const TypeInfo arm_gicv3_info = {
+1 -2
hw/intc/arm_gicv3_its_kvm.c
··· 245 245 246 246 dc->realize = kvm_arm_its_realize; 247 247 dc->props = kvm_arm_its_props; 248 - ic->parent_reset = dc->reset; 248 + device_class_set_parent_reset(dc, kvm_arm_its_reset, &ic->parent_reset); 249 249 icc->send_msi = kvm_its_send_msi; 250 250 icc->pre_save = kvm_arm_its_pre_save; 251 251 icc->post_load = kvm_arm_its_post_load; 252 - dc->reset = kvm_arm_its_reset; 253 252 } 254 253 255 254 static const TypeInfo kvm_arm_its_info = {
+3 -4
hw/intc/arm_gicv3_kvm.c
··· 795 795 796 796 agcc->pre_save = kvm_arm_gicv3_get; 797 797 agcc->post_load = kvm_arm_gicv3_put; 798 - kgc->parent_realize = dc->realize; 799 - kgc->parent_reset = dc->reset; 800 - dc->realize = kvm_arm_gicv3_realize; 801 - dc->reset = kvm_arm_gicv3_reset; 798 + device_class_set_parent_realize(dc, kvm_arm_gicv3_realize, 799 + &kgc->parent_realize); 800 + device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset); 802 801 } 803 802 804 803 static const TypeInfo kvm_arm_gicv3_info = {
+1 -2
hw/intc/i8259.c
··· 443 443 PICClass *k = PIC_CLASS(klass); 444 444 DeviceClass *dc = DEVICE_CLASS(klass); 445 445 446 - k->parent_realize = dc->realize; 447 - dc->realize = pic_realize; 446 + device_class_set_parent_realize(dc, pic_realize, &k->parent_realize); 448 447 dc->reset = pic_reset; 449 448 } 450 449
+61 -18
hw/misc/ivshmem.c
··· 76 76 typedef struct MSIVector { 77 77 PCIDevice *pdev; 78 78 int virq; 79 + bool unmasked; 79 80 } MSIVector; 80 81 81 82 typedef struct IVShmemState { ··· 316 317 int ret; 317 318 318 319 IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); 320 + if (!v->pdev) { 321 + error_report("ivshmem: vector %d route does not exist", vector); 322 + return -EINVAL; 323 + } 324 + assert(!v->unmasked); 319 325 320 326 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); 321 327 if (ret < 0) { ··· 323 329 } 324 330 kvm_irqchip_commit_routes(kvm_state); 325 331 326 - return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); 332 + ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); 333 + if (ret < 0) { 334 + return ret; 335 + } 336 + v->unmasked = true; 337 + 338 + return 0; 327 339 } 328 340 329 341 static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) 330 342 { 331 343 IVShmemState *s = IVSHMEM_COMMON(dev); 332 344 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; 345 + MSIVector *v = &s->msi_vectors[vector]; 333 346 int ret; 334 347 335 348 IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); 349 + if (!v->pdev) { 350 + error_report("ivshmem: vector %d route does not exist", vector); 351 + return; 352 + } 353 + assert(v->unmasked); 336 354 337 - ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, 338 - s->msi_vectors[vector].virq); 339 - if (ret != 0) { 355 + ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); 356 + if (ret < 0) { 340 357 error_report("remove_irqfd_notifier_gsi failed"); 358 + return; 341 359 } 360 + v->unmasked = false; 342 361 } 343 362 344 363 static void ivshmem_vector_poll(PCIDevice *dev, ··· 738 757 } 739 758 } 740 759 760 + static void ivshmem_disable_irqfd(IVShmemState *s); 761 + 741 762 static void ivshmem_reset(DeviceState *d) 742 763 { 743 764 IVShmemState *s = IVSHMEM_COMMON(d); 765 + 766 + ivshmem_disable_irqfd(s); 744 767 745 768 s->intrstatus = 0; 746 769 s->intrmask = 0; ··· 766 789 return 0; 767 790 } 768 791 792 + static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) 793 + { 794 + IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); 795 + 796 + if (s->msi_vectors[vector].pdev == NULL) { 797 + return; 798 + } 799 + 800 + /* it was cleaned when masked in the frontend. */ 801 + kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); 802 + 803 + s->msi_vectors[vector].pdev = NULL; 804 + } 805 + 769 806 static void ivshmem_enable_irqfd(IVShmemState *s) 770 807 { 771 808 PCIDevice *pdev = PCI_DEVICE(s); ··· 777 814 ivshmem_add_kvm_msi_virq(s, i, &err); 778 815 if (err) { 779 816 error_report_err(err); 780 - /* TODO do we need to handle the error? */ 817 + goto undo; 781 818 } 782 819 } 783 820 ··· 786 823 ivshmem_vector_mask, 787 824 ivshmem_vector_poll)) { 788 825 error_report("ivshmem: msix_set_vector_notifiers failed"); 826 + goto undo; 789 827 } 790 - } 791 - 792 - static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) 793 - { 794 - IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); 828 + return; 795 829 796 - if (s->msi_vectors[vector].pdev == NULL) { 797 - return; 830 + undo: 831 + while (--i >= 0) { 832 + ivshmem_remove_kvm_msi_virq(s, i); 798 833 } 799 - 800 - /* it was cleaned when masked in the frontend. */ 801 - kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); 802 - 803 - s->msi_vectors[vector].pdev = NULL; 804 834 } 805 835 806 836 static void ivshmem_disable_irqfd(IVShmemState *s) ··· 808 838 PCIDevice *pdev = PCI_DEVICE(s); 809 839 int i; 810 840 841 + if (!pdev->msix_vector_use_notifier) { 842 + return; 843 + } 844 + 845 + msix_unset_vector_notifiers(pdev); 846 + 811 847 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { 848 + /* 849 + * MSI-X is already disabled here so msix_unset_vector_notifiers() 850 + * didn't call our release notifier. Do it now to keep our masks and 851 + * unmasks balanced. 852 + */ 853 + if (s->msi_vectors[i].unmasked) { 854 + ivshmem_vector_mask(pdev, i); 855 + } 812 856 ivshmem_remove_kvm_msi_virq(s, i); 813 857 } 814 858 815 - msix_unset_vector_notifiers(pdev); 816 859 } 817 860 818 861 static void ivshmem_write_config(PCIDevice *pdev, uint32_t address,
+2 -2
hw/net/vmxnet3.c
··· 2664 2664 c->class_id = PCI_CLASS_NETWORK_ETHERNET; 2665 2665 c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; 2666 2666 c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; 2667 - vc->parent_dc_realize = dc->realize; 2668 - dc->realize = vmxnet3_realize; 2667 + device_class_set_parent_realize(dc, vmxnet3_realize, 2668 + &vc->parent_dc_realize); 2669 2669 dc->desc = "VMWare Paravirtualized Ethernet v3"; 2670 2670 dc->reset = vmxnet3_qdev_reset; 2671 2671 dc->vmsd = &vmstate_vmxnet3;
+1 -1
hw/nvram/Makefile.objs
··· 1 1 common-obj-$(CONFIG_DS1225Y) += ds1225y.o 2 2 common-obj-y += eeprom93xx.o 3 - common-obj-y += eeprom_at24c.o 3 + common-obj-$(CONFIG_I2C) += eeprom_at24c.o 4 4 common-obj-y += fw_cfg.o 5 5 common-obj-y += chrp_nvram.o 6 6 common-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o
+1 -2
hw/pci-bridge/gen_pcie_root_port.c
··· 137 137 dc->vmsd = &vmstate_rp_dev; 138 138 dc->props = gen_rp_props; 139 139 140 - rpc->parent_realize = dc->realize; 141 - dc->realize = gen_rp_realize; 140 + device_class_set_parent_realize(dc, gen_rp_realize, &rpc->parent_realize); 142 141 143 142 rpc->aer_vector = gen_rp_aer_vector; 144 143 rpc->interrupts_init = gen_rp_interrupts_init;
+3 -6
hw/scsi/scsi-generic.c
··· 482 482 int rc; 483 483 int sg_version; 484 484 struct sg_scsi_id scsiid; 485 - Error *local_err = NULL; 486 485 487 486 if (!s->conf.blk) { 488 487 error_setg(errp, "drive property not set"); ··· 516 515 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 517 516 return; 518 517 } 519 - blkconf_apply_backend_options(&s->conf, 520 - blk_is_read_only(s->conf.blk), 521 - true, &local_err); 522 - if (local_err) { 523 - error_propagate(errp, local_err); 518 + if (!blkconf_apply_backend_options(&s->conf, 519 + blk_is_read_only(s->conf.blk), 520 + true, errp)) { 524 521 return; 525 522 } 526 523
+2 -2
hw/scsi/vmw_pvscsi.c
··· 1284 1284 k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI; 1285 1285 k->class_id = PCI_CLASS_STORAGE_SCSI; 1286 1286 k->subsystem_id = 0x1000; 1287 - pvs_k->parent_dc_realize = dc->realize; 1288 - dc->realize = pvscsi_realize; 1287 + device_class_set_parent_realize(dc, pvscsi_realize, 1288 + &pvs_k->parent_dc_realize); 1289 1289 dc->reset = pvscsi_reset; 1290 1290 dc->vmsd = &vmstate_pvscsi; 1291 1291 dc->props = pvscsi_properties;
+1 -2
hw/timer/i8254.c
··· 358 358 PITCommonClass *k = PIT_COMMON_CLASS(klass); 359 359 DeviceClass *dc = DEVICE_CLASS(klass); 360 360 361 - pc->parent_realize = dc->realize; 362 - dc->realize = pit_realizefn; 361 + device_class_set_parent_realize(dc, pit_realizefn, &pc->parent_realize); 363 362 k->set_channel_gate = pit_set_channel_gate; 364 363 k->get_channel_info = pit_get_channel_info_common; 365 364 k->post_load = pit_post_load;
+2 -2
hw/vfio/amd-xgbe.c
··· 34 34 DeviceClass *dc = DEVICE_CLASS(klass); 35 35 VFIOAmdXgbeDeviceClass *vcxc = 36 36 VFIO_AMD_XGBE_DEVICE_CLASS(klass); 37 - vcxc->parent_realize = dc->realize; 38 - dc->realize = amd_xgbe_realize; 37 + device_class_set_parent_realize(dc, amd_xgbe_realize, 38 + &vcxc->parent_realize); 39 39 dc->desc = "VFIO AMD XGBE"; 40 40 dc->vmsd = &vfio_platform_amd_xgbe_vmstate; 41 41 /* Supported by TYPE_VIRT_MACHINE */
+2 -2
hw/vfio/calxeda-xgmac.c
··· 34 34 DeviceClass *dc = DEVICE_CLASS(klass); 35 35 VFIOCalxedaXgmacDeviceClass *vcxc = 36 36 VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass); 37 - vcxc->parent_realize = dc->realize; 38 - dc->realize = calxeda_xgmac_realize; 37 + device_class_set_parent_realize(dc, calxeda_xgmac_realize, 38 + &vcxc->parent_realize); 39 39 dc->desc = "VFIO Calxeda XGMAC"; 40 40 dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate; 41 41 /* Supported by TYPE_VIRT_MACHINE */
+12 -4
hw/vfio/common.c
··· 1187 1187 { 1188 1188 VFIOContainer *container = group->container; 1189 1189 1190 + QLIST_REMOVE(group, container_next); 1191 + group->container = NULL; 1192 + 1193 + /* 1194 + * Explicitly release the listener first before unset container, 1195 + * since unset may destroy the backend container if it's the last 1196 + * group. 1197 + */ 1198 + if (QLIST_EMPTY(&container->group_list)) { 1199 + vfio_listener_release(container); 1200 + } 1201 + 1190 1202 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { 1191 1203 error_report("vfio: error disconnecting group %d from container", 1192 1204 group->groupid); 1193 1205 } 1194 1206 1195 - QLIST_REMOVE(group, container_next); 1196 - group->container = NULL; 1197 - 1198 1207 if (QLIST_EMPTY(&container->group_list)) { 1199 1208 VFIOAddressSpace *space = container->space; 1200 1209 VFIOGuestIOMMU *giommu, *tmp; 1201 1210 1202 - vfio_listener_release(container); 1203 1211 QLIST_REMOVE(container, next); 1204 1212 1205 1213 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
+6
hw/virtio/trace-events
··· 25 25 virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d" 26 26 virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d" 27 27 virtio_balloon_to_target(uint64_t target, uint32_t num_pages) "balloon target: 0x%"PRIx64" num_pages: %d" 28 + 29 + # hw/virtio/vhost.c 30 + vhost_region_add(void *p, const char *mr) "dev %p mr %s" 31 + vhost_region_del(void *p, const char *mr) "dev %p mr %s" 32 + vhost_iommu_region_add(void *p, const char *mr) "dev %p mr %s" 33 + vhost_iommu_region_del(void *p, const char *mr) "dev %p mr %s"
+14 -5
hw/virtio/vhost.c
··· 27 27 #include "hw/virtio/virtio-access.h" 28 28 #include "migration/blocker.h" 29 29 #include "sysemu/dma.h" 30 + #include "trace.h" 30 31 31 32 /* enabled until disconnected backend stabilizes */ 32 33 #define _VHOST_DEBUG 1 ··· 329 330 330 331 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) 331 332 { 333 + Error *err = NULL; 332 334 struct vhost_log *log; 333 335 uint64_t logsize = size * sizeof(*(log->log)); 334 336 int fd = -1; ··· 337 339 if (share) { 338 340 log->log = qemu_memfd_alloc("vhost-log", logsize, 339 341 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 340 - &fd); 342 + &fd, &err); 343 + if (err) { 344 + error_report_err(err); 345 + g_free(log); 346 + return NULL; 347 + } 341 348 memset(log->log, 0, logsize); 342 349 } else { 343 350 log->log = g_malloc0(logsize); ··· 687 694 return; 688 695 } 689 696 697 + trace_vhost_region_add(dev, section->mr->name ?: NULL); 690 698 ++dev->n_mem_sections; 691 699 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, 692 700 dev->n_mem_sections); ··· 706 714 return; 707 715 } 708 716 717 + trace_vhost_region_del(dev, section->mr->name ?: NULL); 709 718 vhost_set_memory(listener, section, false); 710 719 memory_region_unref(section->mr); 711 720 for (i = 0; i < dev->n_mem_sections; ++i) { ··· 743 752 return; 744 753 } 745 754 755 + trace_vhost_iommu_region_add(dev, section->mr->name ?: NULL); 756 + 746 757 iommu = g_malloc0(sizeof(*iommu)); 747 758 end = int128_add(int128_make64(section->offset_within_region), 748 759 section->size); ··· 770 781 if (!memory_region_is_iommu(section->mr)) { 771 782 return; 772 783 } 784 + 785 + trace_vhost_iommu_region_del(dev, section->mr->name ?: NULL); 773 786 774 787 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { 775 788 if (iommu->mr == section->mr && ··· 1361 1374 if (hdev->mem) { 1362 1375 /* those are only safe after successful init */ 1363 1376 memory_listener_unregister(&hdev->memory_listener); 1364 - for (i = 0; i < hdev->n_mem_sections; ++i) { 1365 - MemoryRegionSection *section = &hdev->mem_sections[i]; 1366 - memory_region_unref(section->mr); 1367 - } 1368 1377 QLIST_REMOVE(hdev, entry); 1369 1378 } 1370 1379 if (hdev->migration_blocker) {
+2 -2
hw/virtio/virtio-pci.c
··· 1907 1907 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1908 1908 k->revision = VIRTIO_PCI_ABI_VERSION; 1909 1909 k->class_id = PCI_CLASS_OTHERS; 1910 - vpciklass->parent_dc_realize = dc->realize; 1911 - dc->realize = virtio_pci_dc_realize; 1910 + device_class_set_parent_realize(dc, virtio_pci_dc_realize, 1911 + &vpciklass->parent_dc_realize); 1912 1912 dc->reset = virtio_pci_reset; 1913 1913 } 1914 1914
+4 -3
include/exec/memory-internal.h
··· 1 1 /* 2 - * Declarations for obsolete exec.c functions 2 + * Declarations for functions which are internal to the memory subsystem. 3 3 * 4 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 5 * ··· 12 12 */ 13 13 14 14 /* 15 - * This header is for use by exec.c and memory.c ONLY. Do not include it. 16 - * The functions declared here will be removed soon. 15 + * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY, 16 + * for declarations which are shared between the memory subsystem's 17 + * internals and the TCG TLB code. Do not include it from elsewhere. 17 18 */ 18 19 19 20 #ifndef MEMORY_INTERNAL_H
+15 -12
include/exec/memory.h
··· 332 332 * MemoryRegionSection: describes a fragment of a #MemoryRegion 333 333 * 334 334 * @mr: the region, or %NULL if empty 335 - * @address_space: the address space the region is mapped in 335 + * @fv: the flat view of the address space the region is mapped in 336 336 * @offset_within_region: the beginning of the section, relative to @mr's start 337 337 * @size: the size of the section; will not exceed @mr's boundaries 338 338 * @offset_within_address_space: the address of the first byte of the section ··· 618 618 * @mr: the #MemoryRegion to be initialized. 619 619 * @owner: the object that tracks the region's reference count 620 620 * @ops: callbacks for write access handling (must not be NULL). 621 + * @opaque: passed to the read and write callbacks of the @ops structure. 621 622 * @name: Region name, becomes part of RAMBlock name used in migration stream 622 623 * must be unique within any device 623 624 * @size: size of the region. ··· 661 662 * An IOMMU region translates addresses and forwards accesses to a target 662 663 * memory region. 663 664 * 664 - * @typename: QOM class name 665 665 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 666 666 * @instance_size: the IOMMUMemoryRegion subclass instance size 667 + * @mrtypename: the type name of the #IOMMUMemoryRegion 667 668 * @owner: the object that tracks the region's reference count 668 - * @ops: a function that translates addresses into the @target region 669 669 * @name: used for debugging; not visible to the user or ABI 670 670 * @size: size of the region. 671 671 */ ··· 835 835 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 836 836 * if an iommu or NULL if not 837 837 * 838 - * Returns pointer to IOMMUMemoryRegioniClass if a memory region is an iommu, 839 - * otherwise NULL. This is fast path avoinding QOM checking, use with caution. 838 + * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 839 + * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 840 840 * 841 841 * @mr: the memory region being queried 842 842 */ ··· 1015 1015 * protecting the pointer, such as a reference to the region that includes 1016 1016 * the incoming ram_addr_t. 1017 1017 * 1018 - * @mr: the memory region being queried. 1018 + * @ptr: the host pointer to be converted 1019 + * @offset: the offset within memory region 1019 1020 */ 1020 1021 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1021 1022 ··· 1292 1293 * @size: the size of the access to trigger the eventfd 1293 1294 * @match_data: whether to match against @data, instead of just @addr 1294 1295 * @data: the data to match against the guest write 1295 - * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 1296 + * @e: event notifier to be triggered when @addr, @size, and @data all match. 1296 1297 **/ 1297 1298 void memory_region_add_eventfd(MemoryRegion *mr, 1298 1299 hwaddr addr, ··· 1312 1313 * @size: the size of the access to trigger the eventfd 1313 1314 * @match_data: whether to match against @data, instead of just @addr 1314 1315 * @data: the data to match against the guest write 1315 - * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 1316 + * @e: event notifier to be triggered when @addr, @size, and @data all match. 1316 1317 */ 1317 1318 void memory_region_del_eventfd(MemoryRegion *mr, 1318 1319 hwaddr addr, ··· 1548 1549 * will need to request the pointer again. 1549 1550 * 1550 1551 * @mr: #MemoryRegion associated to the pointer. 1551 - * @addr: address within that region 1552 + * @offset: offset within the memory region 1552 1553 * @size: size of that area. 1553 1554 */ 1554 1555 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, ··· 1617 1618 * @addr: address within that address space 1618 1619 * @attrs: memory transaction attributes 1619 1620 * @buf: buffer with the data transferred 1621 + * @len: the number of bytes to read or write 1620 1622 * @is_write: indicates the transfer direction 1621 1623 */ 1622 1624 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, ··· 1634 1636 * @addr: address within that address space 1635 1637 * @attrs: memory transaction attributes 1636 1638 * @buf: buffer with the data transferred 1639 + * @len: the number of bytes to write 1637 1640 */ 1638 1641 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1639 1642 MemTxAttrs attrs, ··· 1832 1835 * called from an RCU critical section, to avoid that the last reference 1833 1836 * to the returned region disappears after address_space_translate returns. 1834 1837 * 1835 - * @as: #AddressSpace to be accessed 1838 + * @fv: #FlatView to be accessed 1836 1839 * @addr: address within that address space 1837 1840 * @xlat: pointer to address within the returned memory region section's 1838 1841 * #MemoryRegion. ··· 1890 1893 * the amount of memory that was actually read or written by the caller. 1891 1894 * 1892 1895 * @as: #AddressSpace used 1893 - * @addr: address within that address space 1896 + * @buffer: host pointer as returned by address_space_map() 1894 1897 * @len: buffer length as returned by address_space_map() 1895 1898 * @access_len: amount of data actually transferred 1896 1899 * @is_write: indicates the transfer direction ··· 1927 1930 * or failed (eg unassigned memory, device rejected the transaction, 1928 1931 * IOMMU fault). 1929 1932 * 1930 - * @as: #AddressSpace to be accessed 1933 + * @fv: #FlatView to be accessed 1931 1934 * @addr: address within that address space 1932 1935 * @attrs: memory transaction attributes 1933 1936 * @buf: buffer with the data transferred
+12 -2
include/hw/qdev-core.h
··· 32 32 33 33 typedef int (*qdev_initfn)(DeviceState *dev); 34 34 typedef int (*qdev_event)(DeviceState *dev); 35 - typedef void (*qdev_resetfn)(DeviceState *dev); 36 35 typedef void (*DeviceRealize)(DeviceState *dev, Error **errp); 37 36 typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp); 37 + typedef void (*DeviceReset)(DeviceState *dev); 38 38 typedef void (*BusRealize)(BusState *bus, Error **errp); 39 39 typedef void (*BusUnrealize)(BusState *bus, Error **errp); 40 40 ··· 117 117 bool hotpluggable; 118 118 119 119 /* callbacks */ 120 - void (*reset)(DeviceState *dev); 120 + DeviceReset reset; 121 121 DeviceRealize realize; 122 122 DeviceUnrealize unrealize; 123 123 ··· 381 381 * Reset a single device (by calling the reset method). 382 382 */ 383 383 void device_reset(DeviceState *dev); 384 + 385 + void device_class_set_parent_reset(DeviceClass *dc, 386 + DeviceReset dev_reset, 387 + DeviceReset *parent_reset); 388 + void device_class_set_parent_realize(DeviceClass *dc, 389 + DeviceRealize dev_realize, 390 + DeviceRealize *parent_realize); 391 + void device_class_set_parent_unrealize(DeviceClass *dc, 392 + DeviceUnrealize dev_unrealize, 393 + DeviceUnrealize *parent_unrealize); 384 394 385 395 const struct VMStateDescription *qdev_get_vmsd(DeviceState *dev); 386 396
+4
include/qemu/compiler.h
··· 111 111 #define GCC_FMT_ATTR(n, m) 112 112 #endif 113 113 114 + #ifndef __has_feature 115 + #define __has_feature(x) 0 /* compatibility with non-clang compilers */ 116 + #endif 117 + 114 118 #endif /* COMPILER_H */
+3 -2
include/qemu/memfd.h
··· 16 16 #define F_SEAL_WRITE 0x0008 /* prevent writes */ 17 17 #endif 18 18 19 - int qemu_memfd_create(const char *name, size_t size, unsigned int seals); 19 + int qemu_memfd_create(const char *name, size_t size, bool hugetlb, 20 + uint64_t hugetlbsize, unsigned int seals, Error **errp); 20 21 void *qemu_memfd_alloc(const char *name, size_t size, unsigned int seals, 21 - int *fd); 22 + int *fd, Error **errp); 22 23 void qemu_memfd_free(void *ptr, size_t size, int fd); 23 24 bool qemu_memfd_check(void); 24 25
-3
include/qemu/processor.h
··· 12 12 #if defined(__i386__) || defined(__x86_64__) 13 13 # define cpu_relax() asm volatile("rep; nop" ::: "memory") 14 14 15 - #elif defined(__ia64__) 16 - # define cpu_relax() asm volatile("hint @pause" ::: "memory") 17 - 18 15 #elif defined(__aarch64__) 19 16 # define cpu_relax() asm volatile("yield" ::: "memory") 20 17
-9
include/qemu/timer.h
··· 931 931 return val; 932 932 } 933 933 934 - #elif defined(__ia64) 935 - 936 - static inline int64_t cpu_get_host_ticks(void) 937 - { 938 - int64_t val; 939 - asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); 940 - return val; 941 - } 942 - 943 934 #elif defined(__s390__) 944 935 945 936 static inline int64_t cpu_get_host_ticks(void)
+13
include/sysemu/hw_accel.h
··· 14 14 #include "qom/cpu.h" 15 15 #include "sysemu/hax.h" 16 16 #include "sysemu/kvm.h" 17 + #include "sysemu/whpx.h" 17 18 18 19 static inline void cpu_synchronize_state(CPUState *cpu) 19 20 { ··· 22 23 } 23 24 if (hax_enabled()) { 24 25 hax_cpu_synchronize_state(cpu); 26 + } 27 + if (whpx_enabled()) { 28 + whpx_cpu_synchronize_state(cpu); 25 29 } 26 30 } 27 31 ··· 32 36 } 33 37 if (hax_enabled()) { 34 38 hax_cpu_synchronize_post_reset(cpu); 39 + } 40 + if (whpx_enabled()) { 41 + whpx_cpu_synchronize_post_reset(cpu); 35 42 } 36 43 } 37 44 ··· 43 50 if (hax_enabled()) { 44 51 hax_cpu_synchronize_post_init(cpu); 45 52 } 53 + if (whpx_enabled()) { 54 + whpx_cpu_synchronize_post_init(cpu); 55 + } 46 56 } 47 57 48 58 static inline void cpu_synchronize_pre_loadvm(CPUState *cpu) ··· 52 62 } 53 63 if (hax_enabled()) { 54 64 hax_cpu_synchronize_pre_loadvm(cpu); 65 + } 66 + if (whpx_enabled()) { 67 + whpx_cpu_synchronize_pre_loadvm(cpu); 55 68 } 56 69 } 57 70
+40
include/sysemu/whpx.h
··· 1 + /* 2 + * QEMU Windows Hypervisor Platform accelerator (WHPX) support 3 + * 4 + * Copyright Microsoft, Corp. 2017 5 + * 6 + * Authors: 7 + * 8 + * This work is licensed under the terms of the GNU GPL, version 2 or later. 9 + * See the COPYING file in the top-level directory. 10 + * 11 + */ 12 + 13 + #ifndef QEMU_WHPX_H 14 + #define QEMU_WHPX_H 15 + 16 + #include "config-host.h" 17 + #include "qemu-common.h" 18 + 19 + int whpx_init_vcpu(CPUState *cpu); 20 + int whpx_vcpu_exec(CPUState *cpu); 21 + void whpx_destroy_vcpu(CPUState *cpu); 22 + void whpx_vcpu_kick(CPUState *cpu); 23 + 24 + 25 + void whpx_cpu_synchronize_state(CPUState *cpu); 26 + void whpx_cpu_synchronize_post_reset(CPUState *cpu); 27 + void whpx_cpu_synchronize_post_init(CPUState *cpu); 28 + void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu); 29 + 30 + #ifdef CONFIG_WHPX 31 + 32 + int whpx_enabled(void); 33 + 34 + #else /* CONFIG_WHPX */ 35 + 36 + #define whpx_enabled() (0) 37 + 38 + #endif /* CONFIG_WHPX */ 39 + 40 + #endif /* QEMU_WHPX_H */
+1 -6
linux-user/syscall.c
··· 36 36 #include <linux/capability.h> 37 37 #include <sched.h> 38 38 #include <sys/timex.h> 39 - #ifdef __ia64__ 40 - int __clone2(int (*fn)(void *), void *child_stack_base, 41 - size_t stack_size, int flags, void *arg, ...); 42 - #endif 43 39 #include <sys/socket.h> 44 40 #include <sys/un.h> 45 41 #include <sys/uio.h> ··· 246 242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 247 243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 248 244 249 - #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 250 - defined(__s390x__) 245 + #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 251 246 #define __NR__llseek __NR_lseek 252 247 #endif 253 248
+28
memory.c
··· 1091 1091 address_space_update_ioeventfds(as); 1092 1092 } 1093 1093 memory_region_update_pending = false; 1094 + ioeventfd_update_pending = false; 1094 1095 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); 1095 1096 } else if (ioeventfd_update_pending) { 1096 1097 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { ··· 2624 2625 flatview_unref(view); 2625 2626 } 2626 2627 2628 + static void listener_del_address_space(MemoryListener *listener, 2629 + AddressSpace *as) 2630 + { 2631 + FlatView *view; 2632 + FlatRange *fr; 2633 + 2634 + if (listener->begin) { 2635 + listener->begin(listener); 2636 + } 2637 + view = address_space_get_flatview(as); 2638 + FOR_EACH_FLAT_RANGE(fr, view) { 2639 + MemoryRegionSection section = section_from_flat_range(fr, view); 2640 + 2641 + if (fr->dirty_log_mask && listener->log_stop) { 2642 + listener->log_stop(listener, &section, fr->dirty_log_mask, 0); 2643 + } 2644 + if (listener->region_del) { 2645 + listener->region_del(listener, &section); 2646 + } 2647 + } 2648 + if (listener->commit) { 2649 + listener->commit(listener); 2650 + } 2651 + flatview_unref(view); 2652 + } 2653 + 2627 2654 void memory_listener_register(MemoryListener *listener, AddressSpace *as) 2628 2655 { 2629 2656 MemoryListener *other = NULL; ··· 2664 2691 return; 2665 2692 } 2666 2693 2694 + listener_del_address_space(listener, listener->address_space); 2667 2695 QTAILQ_REMOVE(&memory_listeners, listener, link); 2668 2696 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); 2669 2697 listener->address_space = NULL;
+1
numa.c
··· 463 463 if (mem_prealloc) { 464 464 exit(1); 465 465 } 466 + error_report("falling back to regular RAM allocation."); 466 467 467 468 /* Legacy behavior: if allocation failed, fall back to 468 469 * regular RAM allocation.
+26 -4
qemu-options.hx
··· 31 31 "-machine [type=]name[,prop[=value][,...]]\n" 32 32 " selects emulated machine ('-machine help' for list)\n" 33 33 " property accel=accel1[:accel2[:...]] selects accelerator\n" 34 - " supported accelerators are kvm, xen, hax, hvf or tcg (default: tcg)\n" 34 + " supported accelerators are kvm, xen, hax, hvf, whpx or tcg (default: tcg)\n" 35 35 " kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n" 36 36 " vmport=on|off|auto controls emulation of vmport (default: auto)\n" 37 37 " kvm_shadow_mem=size of KVM shadow MMU in bytes\n" ··· 66 66 @table @option 67 67 @item accel=@var{accels1}[:@var{accels2}[:...]] 68 68 This is used to enable an accelerator. Depending on the target architecture, 69 - kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is 69 + kvm, xen, hax, hvf, whpx or tcg can be available. By default, tcg is used. If there is 70 70 more than one accelerator specified, the next one is used if the previous one 71 71 fails to initialize. 72 72 @item kernel_irqchip=on|off ··· 126 126 127 127 DEF("accel", HAS_ARG, QEMU_OPTION_accel, 128 128 "-accel [accel=]accelerator[,thread=single|multi]\n" 129 - " select accelerator (kvm, xen, hax, hvf or tcg; use 'help' for a list)\n" 129 + " select accelerator (kvm, xen, hax, hvf, whpx or tcg; use 'help' for a list)\n" 130 130 " thread=single|multi (enable multi-threaded TCG)", QEMU_ARCH_ALL) 131 131 STEXI 132 132 @item -accel @var{name}[,prop=@var{value}[,...]] 133 133 @findex -accel 134 134 This is used to enable an accelerator. Depending on the target architecture, 135 - kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is 135 + kvm, xen, hax, hvf, whpx or tcg can be available. By default, tcg is used. If there is 136 136 more than one accelerator specified, the next one is used if the previous one 137 137 fails to initialize. 138 138 @table @option ··· 4023 4023 Memory backend objects offer more control than the @option{-m} option that is 4024 4024 traditionally used to define guest RAM. Please refer to 4025 4025 @option{memory-backend-file} for a description of the options. 4026 + 4027 + @item -object memory-backend-memfd,id=@var{id},merge=@var{on|off},dump=@var{on|off},prealloc=@var{on|off},size=@var{size},host-nodes=@var{host-nodes},policy=@var{default|preferred|bind|interleave},seal=@var{on|off},hugetlb=@var{on|off},hugetlbsize=@var{size} 4028 + 4029 + Creates an anonymous memory file backend object, which allows QEMU to 4030 + share the memory with an external process (e.g. when using 4031 + vhost-user). The memory is allocated with memfd and optional 4032 + sealing. (Linux only) 4033 + 4034 + The @option{seal} option creates a sealed-file, that will block 4035 + further resizing the memory ('on' by default). 4036 + 4037 + The @option{hugetlb} option specify the file to be created resides in 4038 + the hugetlbfs filesystem (since Linux 4.14). Used in conjunction with 4039 + the @option{hugetlb} option, the @option{hugetlbsize} option specify 4040 + the hugetlb page size on systems that support multiple hugetlb page 4041 + sizes (it must be a power of 2 value supported by the system). 4042 + 4043 + In some versions of Linux, the @option{hugetlb} option is incompatible 4044 + with the @option{seal} option (requires at least Linux 4.16). 4045 + 4046 + Please refer to @option{memory-backend-file} for a description of the 4047 + other options. 4026 4048 4027 4049 @item -object rng-random,id=@var{id},filename=@var{/dev/random} 4028 4050
+1
scripts/qemugdb/timers.py
··· 1 1 #!/usr/bin/python 2 + # -*- coding: utf-8 -*- 2 3 # GDB debugging support 3 4 # 4 5 # Copyright 2017 Linaro Ltd
+2 -2
target/alpha/cpu.c
··· 233 233 CPUClass *cc = CPU_CLASS(oc); 234 234 AlphaCPUClass *acc = ALPHA_CPU_CLASS(oc); 235 235 236 - acc->parent_realize = dc->realize; 237 - dc->realize = alpha_cpu_realizefn; 236 + device_class_set_parent_realize(dc, alpha_cpu_realizefn, 237 + &acc->parent_realize); 238 238 239 239 cc->class_by_name = alpha_cpu_class_by_name; 240 240 cc->has_work = alpha_cpu_has_work;
+2 -2
target/arm/cpu.c
··· 1722 1722 CPUClass *cc = CPU_CLASS(acc); 1723 1723 DeviceClass *dc = DEVICE_CLASS(oc); 1724 1724 1725 - acc->parent_realize = dc->realize; 1726 - dc->realize = arm_cpu_realizefn; 1725 + device_class_set_parent_realize(dc, arm_cpu_realizefn, 1726 + &acc->parent_realize); 1727 1727 dc->props = arm_cpu_properties; 1728 1728 1729 1729 acc->parent_reset = cc->reset;
+1 -1
target/arm/kvm.c
··· 266 266 { 267 267 KVMDevice *kd, *tkd; 268 268 269 - memory_listener_unregister(&devlistener); 270 269 QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) { 271 270 if (kd->kda.addr != -1) { 272 271 kvm_arm_set_device_addr(kd); ··· 274 273 memory_region_unref(kd->mr); 275 274 g_free(kd); 276 275 } 276 + memory_listener_unregister(&devlistener); 277 277 } 278 278 279 279 static Notifier notify = {
+2 -2
target/cris/cpu.c
··· 260 260 CPUClass *cc = CPU_CLASS(oc); 261 261 CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); 262 262 263 - ccc->parent_realize = dc->realize; 264 - dc->realize = cris_cpu_realizefn; 263 + device_class_set_parent_realize(dc, cris_cpu_realizefn, 264 + &ccc->parent_realize); 265 265 266 266 ccc->parent_reset = cc->reset; 267 267 cc->reset = cris_cpu_reset;
+2 -2
target/hppa/cpu.c
··· 168 168 CPUClass *cc = CPU_CLASS(oc); 169 169 HPPACPUClass *acc = HPPA_CPU_CLASS(oc); 170 170 171 - acc->parent_realize = dc->realize; 172 - dc->realize = hppa_cpu_realizefn; 171 + device_class_set_parent_realize(dc, hppa_cpu_realizefn, 172 + &acc->parent_realize); 173 173 174 174 cc->class_by_name = hppa_cpu_class_by_name; 175 175 cc->has_work = hppa_cpu_has_work;
+1
target/i386/Makefile.objs
··· 14 14 obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-darwin.o 15 15 obj-$(CONFIG_HVF) += hvf/ 16 16 endif 17 + obj-$(CONFIG_WHPX) += whpx-all.o
+4 -4
target/i386/cpu.c
··· 4705 4705 CPUClass *cc = CPU_CLASS(oc); 4706 4706 DeviceClass *dc = DEVICE_CLASS(oc); 4707 4707 4708 - xcc->parent_realize = dc->realize; 4709 - xcc->parent_unrealize = dc->unrealize; 4710 - dc->realize = x86_cpu_realizefn; 4711 - dc->unrealize = x86_cpu_unrealizefn; 4708 + device_class_set_parent_realize(dc, x86_cpu_realizefn, 4709 + &xcc->parent_realize); 4710 + device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 4711 + &xcc->parent_unrealize); 4712 4712 dc->props = x86_cpu_properties; 4713 4713 4714 4714 xcc->parent_reset = cc->reset;
+1 -1
target/i386/helper.c
··· 986 986 X86CPU *cpu = x86_env_get_cpu(env); 987 987 CPUState *cs = CPU(cpu); 988 988 989 - if (kvm_enabled()) { 989 + if (kvm_enabled() || whpx_enabled()) { 990 990 env->tpr_access_type = access; 991 991 992 992 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
+27
target/i386/hvf/hvf.c
··· 17 17 * 18 18 * You should have received a copy of the GNU Lesser General Public 19 19 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 20 + * 21 + * This file contain code under public domain from the hvdos project: 22 + * https://github.com/mist64/hvdos 23 + * 24 + * Parts Copyright (c) 2011 NetApp, Inc. 25 + * All rights reserved. 26 + * 27 + * Redistribution and use in source and binary forms, with or without 28 + * modification, are permitted provided that the following conditions 29 + * are met: 30 + * 1. Redistributions of source code must retain the above copyright 31 + * notice, this list of conditions and the following disclaimer. 32 + * 2. Redistributions in binary form must reproduce the above copyright 33 + * notice, this list of conditions and the following disclaimer in the 34 + * documentation and/or other materials provided with the distribution. 35 + * 36 + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 37 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 40 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 + * SUCH DAMAGE. 20 47 */ 21 48 #include "qemu/osdep.h" 22 49 #include "qemu-common.h"
+3
target/i386/hvf/vmx.h
··· 17 17 * 18 18 * You should have received a copy of the GNU Lesser General Public 19 19 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 20 + * 21 + * This file contain code under public domain from the hvdos project: 22 + * https://github.com/mist64/hvdos 20 23 */ 21 24 22 25 #ifndef VMX_H
+1366
target/i386/whpx-all.c
··· 1 + /* 2 + * QEMU Windows Hypervisor Platform accelerator (WHPX) 3 + * 4 + * Copyright Microsoft Corp. 2017 5 + * 6 + * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 + * See the COPYING file in the top-level directory. 8 + * 9 + */ 10 + 11 + #include "qemu/osdep.h" 12 + #include "cpu.h" 13 + #include "exec/address-spaces.h" 14 + #include "exec/exec-all.h" 15 + #include "exec/ioport.h" 16 + #include "qemu-common.h" 17 + #include "strings.h" 18 + #include "sysemu/accel.h" 19 + #include "sysemu/whpx.h" 20 + #include "sysemu/sysemu.h" 21 + #include "sysemu/cpus.h" 22 + #include "qemu/main-loop.h" 23 + #include "hw/boards.h" 24 + #include "qemu/error-report.h" 25 + #include "qemu/queue.h" 26 + #include "qapi/error.h" 27 + #include "migration/blocker.h" 28 + 29 + #include <winhvplatform.h> 30 + #include <winhvemulation.h> 31 + 32 + struct whpx_state { 33 + uint64_t mem_quota; 34 + WHV_PARTITION_HANDLE partition; 35 + uint32_t exit_ctx_size; 36 + }; 37 + 38 + static const WHV_REGISTER_NAME whpx_register_names[] = { 39 + 40 + /* X64 General purpose registers */ 41 + WHvX64RegisterRax, 42 + WHvX64RegisterRcx, 43 + WHvX64RegisterRdx, 44 + WHvX64RegisterRbx, 45 + WHvX64RegisterRsp, 46 + WHvX64RegisterRbp, 47 + WHvX64RegisterRsi, 48 + WHvX64RegisterRdi, 49 + WHvX64RegisterR8, 50 + WHvX64RegisterR9, 51 + WHvX64RegisterR10, 52 + WHvX64RegisterR11, 53 + WHvX64RegisterR12, 54 + WHvX64RegisterR13, 55 + WHvX64RegisterR14, 56 + WHvX64RegisterR15, 57 + WHvX64RegisterRip, 58 + WHvX64RegisterRflags, 59 + 60 + /* X64 Segment registers */ 61 + WHvX64RegisterEs, 62 + WHvX64RegisterCs, 63 + WHvX64RegisterSs, 64 + WHvX64RegisterDs, 65 + WHvX64RegisterFs, 66 + WHvX64RegisterGs, 67 + WHvX64RegisterLdtr, 68 + WHvX64RegisterTr, 69 + 70 + /* X64 Table registers */ 71 + WHvX64RegisterIdtr, 72 + WHvX64RegisterGdtr, 73 + 74 + /* X64 Control Registers */ 75 + WHvX64RegisterCr0, 76 + WHvX64RegisterCr2, 77 + WHvX64RegisterCr3, 78 + WHvX64RegisterCr4, 79 + WHvX64RegisterCr8, 80 + 81 + /* X64 Debug Registers */ 82 + /* 83 + * WHvX64RegisterDr0, 84 + * WHvX64RegisterDr1, 85 + * WHvX64RegisterDr2, 86 + * WHvX64RegisterDr3, 87 + * WHvX64RegisterDr6, 88 + * WHvX64RegisterDr7, 89 + */ 90 + 91 + /* X64 Floating Point and Vector Registers */ 92 + WHvX64RegisterXmm0, 93 + WHvX64RegisterXmm1, 94 + WHvX64RegisterXmm2, 95 + WHvX64RegisterXmm3, 96 + WHvX64RegisterXmm4, 97 + WHvX64RegisterXmm5, 98 + WHvX64RegisterXmm6, 99 + WHvX64RegisterXmm7, 100 + WHvX64RegisterXmm8, 101 + WHvX64RegisterXmm9, 102 + WHvX64RegisterXmm10, 103 + WHvX64RegisterXmm11, 104 + WHvX64RegisterXmm12, 105 + WHvX64RegisterXmm13, 106 + WHvX64RegisterXmm14, 107 + WHvX64RegisterXmm15, 108 + WHvX64RegisterFpMmx0, 109 + WHvX64RegisterFpMmx1, 110 + WHvX64RegisterFpMmx2, 111 + WHvX64RegisterFpMmx3, 112 + WHvX64RegisterFpMmx4, 113 + WHvX64RegisterFpMmx5, 114 + WHvX64RegisterFpMmx6, 115 + WHvX64RegisterFpMmx7, 116 + WHvX64RegisterFpControlStatus, 117 + WHvX64RegisterXmmControlStatus, 118 + 119 + /* X64 MSRs */ 120 + WHvX64RegisterTsc, 121 + WHvX64RegisterEfer, 122 + #ifdef TARGET_X86_64 123 + WHvX64RegisterKernelGsBase, 124 + #endif 125 + WHvX64RegisterApicBase, 126 + /* WHvX64RegisterPat, */ 127 + WHvX64RegisterSysenterCs, 128 + WHvX64RegisterSysenterEip, 129 + WHvX64RegisterSysenterEsp, 130 + WHvX64RegisterStar, 131 + #ifdef TARGET_X86_64 132 + WHvX64RegisterLstar, 133 + WHvX64RegisterCstar, 134 + WHvX64RegisterSfmask, 135 + #endif 136 + 137 + /* Interrupt / Event Registers */ 138 + /* 139 + * WHvRegisterPendingInterruption, 140 + * WHvRegisterInterruptState, 141 + * WHvRegisterPendingEvent0, 142 + * WHvRegisterPendingEvent1 143 + * WHvX64RegisterDeliverabilityNotifications, 144 + */ 145 + }; 146 + 147 + struct whpx_register_set { 148 + WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)]; 149 + }; 150 + 151 + struct whpx_vcpu { 152 + WHV_EMULATOR_HANDLE emulator; 153 + bool window_registered; 154 + bool interruptable; 155 + uint64_t tpr; 156 + uint64_t apic_base; 157 + WHV_X64_PENDING_INTERRUPTION_REGISTER interrupt_in_flight; 158 + 159 + /* Must be the last field as it may have a tail */ 160 + WHV_RUN_VP_EXIT_CONTEXT exit_ctx; 161 + }; 162 + 163 + static bool whpx_allowed; 164 + 165 + struct whpx_state whpx_global; 166 + 167 + 168 + /* 169 + * VP support 170 + */ 171 + 172 + static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu) 173 + { 174 + return (struct whpx_vcpu *)cpu->hax_vcpu; 175 + } 176 + 177 + static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, 178 + int r86) 179 + { 180 + WHV_X64_SEGMENT_REGISTER hs; 181 + unsigned flags = qs->flags; 182 + 183 + hs.Base = qs->base; 184 + hs.Limit = qs->limit; 185 + hs.Selector = qs->selector; 186 + 187 + if (v86) { 188 + hs.Attributes = 0; 189 + hs.SegmentType = 3; 190 + hs.Present = 1; 191 + hs.DescriptorPrivilegeLevel = 3; 192 + hs.NonSystemSegment = 1; 193 + 194 + } else { 195 + hs.Attributes = (flags >> DESC_TYPE_SHIFT); 196 + 197 + if (r86) { 198 + /* hs.Base &= 0xfffff; */ 199 + } 200 + } 201 + 202 + return hs; 203 + } 204 + 205 + static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs) 206 + { 207 + SegmentCache qs; 208 + 209 + qs.base = hs->Base; 210 + qs.limit = hs->Limit; 211 + qs.selector = hs->Selector; 212 + 213 + qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT; 214 + 215 + return qs; 216 + } 217 + 218 + static void whpx_set_registers(CPUState *cpu) 219 + { 220 + struct whpx_state *whpx = &whpx_global; 221 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 222 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 223 + X86CPU *x86_cpu = X86_CPU(cpu); 224 + struct whpx_register_set vcxt = {0}; 225 + HRESULT hr; 226 + int idx = 0; 227 + int i; 228 + int v86, r86; 229 + 230 + assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 231 + 232 + v86 = (env->eflags & VM_MASK); 233 + r86 = !(env->cr[0] & CR0_PE_MASK); 234 + 235 + vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state); 236 + vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); 237 + 238 + /* Indexes for first 16 registers match between HV and QEMU definitions */ 239 + for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { 240 + vcxt.values[idx].Reg64 = env->regs[idx]; 241 + } 242 + 243 + /* Same goes for RIP and RFLAGS */ 244 + assert(whpx_register_names[idx] == WHvX64RegisterRip); 245 + vcxt.values[idx++].Reg64 = env->eip; 246 + 247 + assert(whpx_register_names[idx] == WHvX64RegisterRflags); 248 + vcxt.values[idx++].Reg64 = env->eflags; 249 + 250 + /* Translate 6+4 segment registers. HV and QEMU order matches */ 251 + assert(idx == WHvX64RegisterEs); 252 + for (i = 0; i < 6; i += 1, idx += 1) { 253 + vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86); 254 + } 255 + 256 + assert(idx == WHvX64RegisterLdtr); 257 + vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0); 258 + 259 + assert(idx == WHvX64RegisterTr); 260 + vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0); 261 + 262 + assert(idx == WHvX64RegisterIdtr); 263 + vcxt.values[idx].Table.Base = env->idt.base; 264 + vcxt.values[idx].Table.Limit = env->idt.limit; 265 + idx += 1; 266 + 267 + assert(idx == WHvX64RegisterGdtr); 268 + vcxt.values[idx].Table.Base = env->gdt.base; 269 + vcxt.values[idx].Table.Limit = env->gdt.limit; 270 + idx += 1; 271 + 272 + /* CR0, 2, 3, 4, 8 */ 273 + assert(whpx_register_names[idx] == WHvX64RegisterCr0); 274 + vcxt.values[idx++].Reg64 = env->cr[0]; 275 + assert(whpx_register_names[idx] == WHvX64RegisterCr2); 276 + vcxt.values[idx++].Reg64 = env->cr[2]; 277 + assert(whpx_register_names[idx] == WHvX64RegisterCr3); 278 + vcxt.values[idx++].Reg64 = env->cr[3]; 279 + assert(whpx_register_names[idx] == WHvX64RegisterCr4); 280 + vcxt.values[idx++].Reg64 = env->cr[4]; 281 + assert(whpx_register_names[idx] == WHvX64RegisterCr8); 282 + vcxt.values[idx++].Reg64 = vcpu->tpr; 283 + 284 + /* 8 Debug Registers - Skipped */ 285 + 286 + /* 16 XMM registers */ 287 + assert(whpx_register_names[idx] == WHvX64RegisterXmm0); 288 + for (i = 0; i < 16; i += 1, idx += 1) { 289 + vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0); 290 + vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1); 291 + } 292 + 293 + /* 8 FP registers */ 294 + assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); 295 + for (i = 0; i < 8; i += 1, idx += 1) { 296 + vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0); 297 + /* vcxt.values[idx].Fp.AsUINT128.High64 = 298 + env->fpregs[i].mmx.MMX_Q(1); 299 + */ 300 + } 301 + 302 + /* FP control status register */ 303 + assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); 304 + vcxt.values[idx].FpControlStatus.FpControl = env->fpuc; 305 + vcxt.values[idx].FpControlStatus.FpStatus = 306 + (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 307 + vcxt.values[idx].FpControlStatus.FpTag = 0; 308 + for (i = 0; i < 8; ++i) { 309 + vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i; 310 + } 311 + vcxt.values[idx].FpControlStatus.Reserved = 0; 312 + vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop; 313 + vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip; 314 + idx += 1; 315 + 316 + /* XMM control status register */ 317 + assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); 318 + vcxt.values[idx].XmmControlStatus.LastFpRdp = 0; 319 + vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr; 320 + vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff; 321 + idx += 1; 322 + 323 + /* MSRs */ 324 + assert(whpx_register_names[idx] == WHvX64RegisterTsc); 325 + vcxt.values[idx++].Reg64 = env->tsc; 326 + assert(whpx_register_names[idx] == WHvX64RegisterEfer); 327 + vcxt.values[idx++].Reg64 = env->efer; 328 + #ifdef TARGET_X86_64 329 + assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); 330 + vcxt.values[idx++].Reg64 = env->kernelgsbase; 331 + #endif 332 + 333 + assert(whpx_register_names[idx] == WHvX64RegisterApicBase); 334 + vcxt.values[idx++].Reg64 = vcpu->apic_base; 335 + 336 + /* WHvX64RegisterPat - Skipped */ 337 + 338 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); 339 + vcxt.values[idx++].Reg64 = env->sysenter_cs; 340 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); 341 + vcxt.values[idx++].Reg64 = env->sysenter_eip; 342 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); 343 + vcxt.values[idx++].Reg64 = env->sysenter_esp; 344 + assert(whpx_register_names[idx] == WHvX64RegisterStar); 345 + vcxt.values[idx++].Reg64 = env->star; 346 + #ifdef TARGET_X86_64 347 + assert(whpx_register_names[idx] == WHvX64RegisterLstar); 348 + vcxt.values[idx++].Reg64 = env->lstar; 349 + assert(whpx_register_names[idx] == WHvX64RegisterCstar); 350 + vcxt.values[idx++].Reg64 = env->cstar; 351 + assert(whpx_register_names[idx] == WHvX64RegisterSfmask); 352 + vcxt.values[idx++].Reg64 = env->fmask; 353 + #endif 354 + 355 + /* Interrupt / Event Registers - Skipped */ 356 + 357 + assert(idx == RTL_NUMBER_OF(whpx_register_names)); 358 + 359 + hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 360 + whpx_register_names, 361 + RTL_NUMBER_OF(whpx_register_names), 362 + &vcxt.values[0]); 363 + 364 + if (FAILED(hr)) { 365 + error_report("WHPX: Failed to set virtual processor context, hr=%08lx", 366 + hr); 367 + __debugbreak(); 368 + } 369 + 370 + return; 371 + } 372 + 373 + static void whpx_get_registers(CPUState *cpu) 374 + { 375 + struct whpx_state *whpx = &whpx_global; 376 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 377 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 378 + X86CPU *x86_cpu = X86_CPU(cpu); 379 + struct whpx_register_set vcxt; 380 + uint64_t tpr, apic_base; 381 + HRESULT hr; 382 + int idx = 0; 383 + int i; 384 + 385 + assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 386 + 387 + hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 388 + whpx_register_names, 389 + RTL_NUMBER_OF(whpx_register_names), 390 + &vcxt.values[0]); 391 + if (FAILED(hr)) { 392 + error_report("WHPX: Failed to get virtual processor context, hr=%08lx", 393 + hr); 394 + __debugbreak(); 395 + } 396 + 397 + /* Indexes for first 16 registers match between HV and QEMU definitions */ 398 + for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { 399 + env->regs[idx] = vcxt.values[idx].Reg64; 400 + } 401 + 402 + /* Same goes for RIP and RFLAGS */ 403 + assert(whpx_register_names[idx] == WHvX64RegisterRip); 404 + env->eip = vcxt.values[idx++].Reg64; 405 + assert(whpx_register_names[idx] == WHvX64RegisterRflags); 406 + env->eflags = vcxt.values[idx++].Reg64; 407 + 408 + /* Translate 6+4 segment registers. HV and QEMU order matches */ 409 + assert(idx == WHvX64RegisterEs); 410 + for (i = 0; i < 6; i += 1, idx += 1) { 411 + env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment); 412 + } 413 + 414 + assert(idx == WHvX64RegisterLdtr); 415 + env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment); 416 + assert(idx == WHvX64RegisterTr); 417 + env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment); 418 + assert(idx == WHvX64RegisterIdtr); 419 + env->idt.base = vcxt.values[idx].Table.Base; 420 + env->idt.limit = vcxt.values[idx].Table.Limit; 421 + idx += 1; 422 + assert(idx == WHvX64RegisterGdtr); 423 + env->gdt.base = vcxt.values[idx].Table.Base; 424 + env->gdt.limit = vcxt.values[idx].Table.Limit; 425 + idx += 1; 426 + 427 + /* CR0, 2, 3, 4, 8 */ 428 + assert(whpx_register_names[idx] == WHvX64RegisterCr0); 429 + env->cr[0] = vcxt.values[idx++].Reg64; 430 + assert(whpx_register_names[idx] == WHvX64RegisterCr2); 431 + env->cr[2] = vcxt.values[idx++].Reg64; 432 + assert(whpx_register_names[idx] == WHvX64RegisterCr3); 433 + env->cr[3] = vcxt.values[idx++].Reg64; 434 + assert(whpx_register_names[idx] == WHvX64RegisterCr4); 435 + env->cr[4] = vcxt.values[idx++].Reg64; 436 + assert(whpx_register_names[idx] == WHvX64RegisterCr8); 437 + tpr = vcxt.values[idx++].Reg64; 438 + if (tpr != vcpu->tpr) { 439 + vcpu->tpr = tpr; 440 + cpu_set_apic_tpr(x86_cpu->apic_state, tpr); 441 + } 442 + 443 + /* 8 Debug Registers - Skipped */ 444 + 445 + /* 16 XMM registers */ 446 + assert(whpx_register_names[idx] == WHvX64RegisterXmm0); 447 + for (i = 0; i < 16; i += 1, idx += 1) { 448 + env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64; 449 + env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64; 450 + } 451 + 452 + /* 8 FP registers */ 453 + assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); 454 + for (i = 0; i < 8; i += 1, idx += 1) { 455 + env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64; 456 + /* env->fpregs[i].mmx.MMX_Q(1) = 457 + vcxt.values[idx].Fp.AsUINT128.High64; 458 + */ 459 + } 460 + 461 + /* FP control status register */ 462 + assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); 463 + env->fpuc = vcxt.values[idx].FpControlStatus.FpControl; 464 + env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7; 465 + env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800; 466 + for (i = 0; i < 8; ++i) { 467 + env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1); 468 + } 469 + env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp; 470 + env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip; 471 + idx += 1; 472 + 473 + /* XMM control status register */ 474 + assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); 475 + env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl; 476 + idx += 1; 477 + 478 + /* MSRs */ 479 + assert(whpx_register_names[idx] == WHvX64RegisterTsc); 480 + env->tsc = vcxt.values[idx++].Reg64; 481 + assert(whpx_register_names[idx] == WHvX64RegisterEfer); 482 + env->efer = vcxt.values[idx++].Reg64; 483 + #ifdef TARGET_X86_64 484 + assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); 485 + env->kernelgsbase = vcxt.values[idx++].Reg64; 486 + #endif 487 + 488 + assert(whpx_register_names[idx] == WHvX64RegisterApicBase); 489 + apic_base = vcxt.values[idx++].Reg64; 490 + if (apic_base != vcpu->apic_base) { 491 + vcpu->apic_base = apic_base; 492 + cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base); 493 + } 494 + 495 + /* WHvX64RegisterPat - Skipped */ 496 + 497 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); 498 + env->sysenter_cs = vcxt.values[idx++].Reg64;; 499 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); 500 + env->sysenter_eip = vcxt.values[idx++].Reg64; 501 + assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); 502 + env->sysenter_esp = vcxt.values[idx++].Reg64; 503 + assert(whpx_register_names[idx] == WHvX64RegisterStar); 504 + env->star = vcxt.values[idx++].Reg64; 505 + #ifdef TARGET_X86_64 506 + assert(whpx_register_names[idx] == WHvX64RegisterLstar); 507 + env->lstar = vcxt.values[idx++].Reg64; 508 + assert(whpx_register_names[idx] == WHvX64RegisterCstar); 509 + env->cstar = vcxt.values[idx++].Reg64; 510 + assert(whpx_register_names[idx] == WHvX64RegisterSfmask); 511 + env->fmask = vcxt.values[idx++].Reg64; 512 + #endif 513 + 514 + /* Interrupt / Event Registers - Skipped */ 515 + 516 + assert(idx == RTL_NUMBER_OF(whpx_register_names)); 517 + 518 + return; 519 + } 520 + 521 + static HRESULT CALLBACK whpx_emu_ioport_callback( 522 + void *ctx, 523 + WHV_EMULATOR_IO_ACCESS_INFO *IoAccess) 524 + { 525 + MemTxAttrs attrs = { 0 }; 526 + address_space_rw(&address_space_io, IoAccess->Port, attrs, 527 + (uint8_t *)&IoAccess->Data, IoAccess->AccessSize, 528 + IoAccess->Direction); 529 + return S_OK; 530 + } 531 + 532 + static HRESULT CALLBACK whpx_emu_memio_callback( 533 + void *ctx, 534 + WHV_EMULATOR_MEMORY_ACCESS_INFO *ma) 535 + { 536 + cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize, 537 + ma->Direction); 538 + return S_OK; 539 + } 540 + 541 + static HRESULT CALLBACK whpx_emu_getreg_callback( 542 + void *ctx, 543 + const WHV_REGISTER_NAME *RegisterNames, 544 + UINT32 RegisterCount, 545 + WHV_REGISTER_VALUE *RegisterValues) 546 + { 547 + HRESULT hr; 548 + struct whpx_state *whpx = &whpx_global; 549 + CPUState *cpu = (CPUState *)ctx; 550 + 551 + hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 552 + RegisterNames, RegisterCount, 553 + RegisterValues); 554 + if (FAILED(hr)) { 555 + error_report("WHPX: Failed to get virtual processor registers," 556 + " hr=%08lx", hr); 557 + __debugbreak(); 558 + } 559 + 560 + return hr; 561 + } 562 + 563 + static HRESULT CALLBACK whpx_emu_setreg_callback( 564 + void *ctx, 565 + const WHV_REGISTER_NAME *RegisterNames, 566 + UINT32 RegisterCount, 567 + const WHV_REGISTER_VALUE *RegisterValues) 568 + { 569 + HRESULT hr; 570 + struct whpx_state *whpx = &whpx_global; 571 + CPUState *cpu = (CPUState *)ctx; 572 + 573 + hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 574 + RegisterNames, RegisterCount, 575 + RegisterValues); 576 + if (FAILED(hr)) { 577 + error_report("WHPX: Failed to set virtual processor registers," 578 + " hr=%08lx", hr); 579 + __debugbreak(); 580 + } 581 + 582 + /* 583 + * The emulator just successfully wrote the register state. We clear the 584 + * dirty state so we avoid the double write on resume of the VP. 585 + */ 586 + cpu->vcpu_dirty = false; 587 + 588 + return hr; 589 + } 590 + 591 + static HRESULT CALLBACK whpx_emu_translate_callback( 592 + void *ctx, 593 + WHV_GUEST_VIRTUAL_ADDRESS Gva, 594 + WHV_TRANSLATE_GVA_FLAGS TranslateFlags, 595 + WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult, 596 + WHV_GUEST_PHYSICAL_ADDRESS *Gpa) 597 + { 598 + HRESULT hr; 599 + struct whpx_state *whpx = &whpx_global; 600 + CPUState *cpu = (CPUState *)ctx; 601 + WHV_TRANSLATE_GVA_RESULT res; 602 + 603 + hr = WHvTranslateGva(whpx->partition, cpu->cpu_index, 604 + Gva, TranslateFlags, &res, Gpa); 605 + if (FAILED(hr)) { 606 + error_report("WHPX: Failed to translate GVA, hr=%08lx", hr); 607 + __debugbreak(); 608 + } else { 609 + *TranslationResult = res.ResultCode; 610 + } 611 + 612 + return hr; 613 + } 614 + 615 + static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = { 616 + .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback, 617 + .WHvEmulatorMemoryCallback = whpx_emu_memio_callback, 618 + .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback, 619 + .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback, 620 + .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback, 621 + }; 622 + 623 + static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx) 624 + { 625 + HRESULT hr; 626 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 627 + WHV_EMULATOR_STATUS emu_status; 628 + 629 + hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu, ctx, &emu_status); 630 + if (FAILED(hr)) { 631 + __debugbreak(); 632 + error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr); 633 + return -1; 634 + } 635 + 636 + if (!emu_status.EmulationSuccessful) { 637 + __debugbreak(); 638 + error_report("WHPX: Failed to emulate MMIO access"); 639 + return -1; 640 + } 641 + 642 + return 0; 643 + } 644 + 645 + static int whpx_handle_portio(CPUState *cpu, 646 + WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx) 647 + { 648 + HRESULT hr; 649 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 650 + WHV_EMULATOR_STATUS emu_status; 651 + 652 + hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu, ctx, &emu_status); 653 + if (FAILED(hr)) { 654 + __debugbreak(); 655 + error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr); 656 + return -1; 657 + } 658 + 659 + if (!emu_status.EmulationSuccessful) { 660 + __debugbreak(); 661 + error_report("WHPX: Failed to emulate PortMMIO access"); 662 + return -1; 663 + } 664 + 665 + return 0; 666 + } 667 + 668 + static int whpx_handle_halt(CPUState *cpu) 669 + { 670 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 671 + int ret = 0; 672 + 673 + qemu_mutex_lock_iothread(); 674 + if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && 675 + (env->eflags & IF_MASK)) && 676 + !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { 677 + cpu->exception_index = EXCP_HLT; 678 + cpu->halted = true; 679 + ret = 1; 680 + } 681 + qemu_mutex_unlock_iothread(); 682 + 683 + return ret; 684 + } 685 + 686 + static void whpx_vcpu_pre_run(CPUState *cpu) 687 + { 688 + HRESULT hr; 689 + struct whpx_state *whpx = &whpx_global; 690 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 691 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 692 + X86CPU *x86_cpu = X86_CPU(cpu); 693 + int irq; 694 + WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0}; 695 + UINT32 reg_count = 0; 696 + WHV_REGISTER_VALUE reg_values[3] = {0}; 697 + WHV_REGISTER_NAME reg_names[3]; 698 + 699 + qemu_mutex_lock_iothread(); 700 + 701 + /* Inject NMI */ 702 + if (!vcpu->interrupt_in_flight.InterruptionPending && 703 + cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { 704 + if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { 705 + cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 706 + vcpu->interruptable = false; 707 + new_int.InterruptionType = WHvX64PendingNmi; 708 + new_int.InterruptionPending = 1; 709 + new_int.InterruptionVector = 2; 710 + } 711 + if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 712 + qemu_mutex_lock_iothread(); 713 + cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 714 + __debugbreak(); 715 + qemu_mutex_unlock_iothread(); 716 + } 717 + } 718 + 719 + /* 720 + * Force the VCPU out of its inner loop to process any INIT requests or 721 + * commit pending TPR access. 722 + */ 723 + if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 724 + if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 725 + !(env->hflags & HF_SMM_MASK)) { 726 + cpu->exit_request = 1; 727 + } 728 + if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 729 + cpu->exit_request = 1; 730 + } 731 + } 732 + 733 + /* Get pending hard interruption or replay one that was overwritten */ 734 + if (!vcpu->interrupt_in_flight.InterruptionPending && 735 + vcpu->interruptable && (env->eflags & IF_MASK)) { 736 + assert(!new_int.InterruptionPending); 737 + if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { 738 + cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 739 + irq = cpu_get_pic_interrupt(env); 740 + if (irq >= 0) { 741 + new_int.InterruptionType = WHvX64PendingInterrupt; 742 + new_int.InterruptionPending = 1; 743 + new_int.InterruptionVector = irq; 744 + } 745 + } 746 + } 747 + 748 + /* Setup interrupt state if new one was prepared */ 749 + if (new_int.InterruptionPending) { 750 + reg_values[reg_count].PendingInterruption = new_int; 751 + reg_names[reg_count] = WHvRegisterPendingInterruption; 752 + reg_count += 1; 753 + } 754 + 755 + /* Sync the TPR to the CR8 if was modified during the intercept */ 756 + reg_values[reg_count].Reg64 = cpu_get_apic_tpr(x86_cpu->apic_state); 757 + if (reg_values[reg_count].Reg64 != vcpu->tpr) { 758 + vcpu->tpr = reg_values[reg_count].Reg64; 759 + cpu->exit_request = 1; 760 + reg_names[reg_count] = WHvX64RegisterCr8; 761 + reg_count += 1; 762 + } 763 + 764 + /* Update the state of the interrupt delivery notification */ 765 + if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { 766 + reg_values[reg_count].DeliverabilityNotifications.InterruptNotification 767 + = 1; 768 + if (vcpu->window_registered != 1) { 769 + vcpu->window_registered = 1; 770 + } 771 + reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications; 772 + reg_count += 1; 773 + } 774 + 775 + qemu_mutex_unlock_iothread(); 776 + 777 + if (reg_count) { 778 + hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 779 + reg_names, reg_count, reg_values); 780 + if (FAILED(hr)) { 781 + error_report("WHPX: Failed to set interrupt state registers," 782 + " hr=%08lx", hr); 783 + __debugbreak(); 784 + } 785 + } 786 + 787 + return; 788 + } 789 + 790 + static void whpx_vcpu_post_run(CPUState *cpu) 791 + { 792 + HRESULT hr; 793 + struct whpx_state *whpx = &whpx_global; 794 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 795 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 796 + X86CPU *x86_cpu = X86_CPU(cpu); 797 + WHV_REGISTER_VALUE reg_values[4]; 798 + const WHV_REGISTER_NAME reg_names[4] = { 799 + WHvX64RegisterRflags, 800 + WHvX64RegisterCr8, 801 + WHvRegisterPendingInterruption, 802 + WHvRegisterInterruptState, 803 + }; 804 + 805 + hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, 806 + reg_names, 4, reg_values); 807 + if (FAILED(hr)) { 808 + error_report("WHPX: Failed to get interrupt state regusters," 809 + " hr=%08lx", hr); 810 + __debugbreak(); 811 + vcpu->interruptable = false; 812 + return; 813 + } 814 + 815 + assert(reg_names[0] == WHvX64RegisterRflags); 816 + env->eflags = reg_values[0].Reg64; 817 + 818 + assert(reg_names[1] == WHvX64RegisterCr8); 819 + if (vcpu->tpr != reg_values[1].Reg64) { 820 + vcpu->tpr = reg_values[1].Reg64; 821 + qemu_mutex_lock_iothread(); 822 + cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr); 823 + qemu_mutex_unlock_iothread(); 824 + } 825 + 826 + assert(reg_names[2] == WHvRegisterPendingInterruption); 827 + vcpu->interrupt_in_flight = reg_values[2].PendingInterruption; 828 + 829 + assert(reg_names[3] == WHvRegisterInterruptState); 830 + vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow; 831 + 832 + return; 833 + } 834 + 835 + static void whpx_vcpu_process_async_events(CPUState *cpu) 836 + { 837 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 838 + X86CPU *x86_cpu = X86_CPU(cpu); 839 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 840 + 841 + if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 842 + !(env->hflags & HF_SMM_MASK)) { 843 + 844 + do_cpu_init(x86_cpu); 845 + cpu->vcpu_dirty = true; 846 + vcpu->interruptable = true; 847 + } 848 + 849 + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { 850 + cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; 851 + apic_poll_irq(x86_cpu->apic_state); 852 + } 853 + 854 + if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && 855 + (env->eflags & IF_MASK)) || 856 + (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { 857 + cpu->halted = false; 858 + } 859 + 860 + if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { 861 + if (!cpu->vcpu_dirty) { 862 + whpx_get_registers(cpu); 863 + } 864 + do_cpu_sipi(x86_cpu); 865 + } 866 + 867 + if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 868 + cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; 869 + if (!cpu->vcpu_dirty) { 870 + whpx_get_registers(cpu); 871 + } 872 + apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, 873 + env->tpr_access_type); 874 + } 875 + 876 + return; 877 + } 878 + 879 + static int whpx_vcpu_run(CPUState *cpu) 880 + { 881 + HRESULT hr; 882 + struct whpx_state *whpx = &whpx_global; 883 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 884 + int ret; 885 + 886 + whpx_vcpu_process_async_events(cpu); 887 + if (cpu->halted) { 888 + cpu->exception_index = EXCP_HLT; 889 + atomic_set(&cpu->exit_request, false); 890 + return 0; 891 + } 892 + 893 + qemu_mutex_unlock_iothread(); 894 + cpu_exec_start(cpu); 895 + 896 + do { 897 + if (cpu->vcpu_dirty) { 898 + whpx_set_registers(cpu); 899 + cpu->vcpu_dirty = false; 900 + } 901 + 902 + whpx_vcpu_pre_run(cpu); 903 + 904 + if (atomic_read(&cpu->exit_request)) { 905 + whpx_vcpu_kick(cpu); 906 + } 907 + 908 + for (;;) { 909 + hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index, 910 + &vcpu->exit_ctx, whpx->exit_ctx_size); 911 + 912 + if (SUCCEEDED(hr) && (vcpu->exit_ctx.ExitReason == 913 + WHvRunVpExitReasonAlerted)) { 914 + WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 915 + 0); 916 + } else { 917 + break; 918 + } 919 + } 920 + 921 + if (FAILED(hr)) { 922 + error_report("WHPX: Failed to exec a virtual processor," 923 + " hr=%08lx", hr); 924 + ret = -1; 925 + break; 926 + } 927 + 928 + whpx_vcpu_post_run(cpu); 929 + 930 + switch (vcpu->exit_ctx.ExitReason) { 931 + case WHvRunVpExitReasonMemoryAccess: 932 + ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess); 933 + break; 934 + 935 + case WHvRunVpExitReasonX64IoPortAccess: 936 + ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess); 937 + break; 938 + 939 + case WHvRunVpExitReasonX64InterruptWindow: 940 + vcpu->window_registered = 0; 941 + break; 942 + 943 + case WHvRunVpExitReasonX64Halt: 944 + ret = whpx_handle_halt(cpu); 945 + break; 946 + 947 + case WHvRunVpExitReasonCanceled: 948 + cpu->exception_index = EXCP_INTERRUPT; 949 + ret = 1; 950 + break; 951 + 952 + case WHvRunVpExitReasonNone: 953 + case WHvRunVpExitReasonUnrecoverableException: 954 + case WHvRunVpExitReasonInvalidVpRegisterValue: 955 + case WHvRunVpExitReasonUnsupportedFeature: 956 + case WHvRunVpExitReasonX64MsrAccess: 957 + case WHvRunVpExitReasonX64Cpuid: 958 + case WHvRunVpExitReasonException: 959 + case WHvRunVpExitReasonAlerted: 960 + default: 961 + error_report("WHPX: Unexpected VP exit code %d", 962 + vcpu->exit_ctx.ExitReason); 963 + whpx_get_registers(cpu); 964 + qemu_mutex_lock_iothread(); 965 + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); 966 + qemu_mutex_unlock_iothread(); 967 + break; 968 + } 969 + 970 + } while (!ret); 971 + 972 + cpu_exec_end(cpu); 973 + qemu_mutex_lock_iothread(); 974 + current_cpu = cpu; 975 + 976 + atomic_set(&cpu->exit_request, false); 977 + 978 + return ret < 0; 979 + } 980 + 981 + static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) 982 + { 983 + whpx_get_registers(cpu); 984 + cpu->vcpu_dirty = true; 985 + } 986 + 987 + static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu, 988 + run_on_cpu_data arg) 989 + { 990 + whpx_set_registers(cpu); 991 + cpu->vcpu_dirty = false; 992 + } 993 + 994 + static void do_whpx_cpu_synchronize_post_init(CPUState *cpu, 995 + run_on_cpu_data arg) 996 + { 997 + whpx_set_registers(cpu); 998 + cpu->vcpu_dirty = false; 999 + } 1000 + 1001 + static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu, 1002 + run_on_cpu_data arg) 1003 + { 1004 + cpu->vcpu_dirty = true; 1005 + } 1006 + 1007 + /* 1008 + * CPU support. 1009 + */ 1010 + 1011 + void whpx_cpu_synchronize_state(CPUState *cpu) 1012 + { 1013 + if (!cpu->vcpu_dirty) { 1014 + run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL); 1015 + } 1016 + } 1017 + 1018 + void whpx_cpu_synchronize_post_reset(CPUState *cpu) 1019 + { 1020 + run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); 1021 + } 1022 + 1023 + void whpx_cpu_synchronize_post_init(CPUState *cpu) 1024 + { 1025 + run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL); 1026 + } 1027 + 1028 + void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu) 1029 + { 1030 + run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); 1031 + } 1032 + 1033 + /* 1034 + * Vcpu support. 1035 + */ 1036 + 1037 + static Error *whpx_migration_blocker; 1038 + 1039 + int whpx_init_vcpu(CPUState *cpu) 1040 + { 1041 + HRESULT hr; 1042 + struct whpx_state *whpx = &whpx_global; 1043 + struct whpx_vcpu *vcpu; 1044 + Error *local_error = NULL; 1045 + 1046 + /* Add migration blockers for all unsupported features of the 1047 + * Windows Hypervisor Platform 1048 + */ 1049 + if (whpx_migration_blocker == NULL) { 1050 + error_setg(&whpx_migration_blocker, 1051 + "State blocked due to non-migratable CPUID feature support," 1052 + "dirty memory tracking support, and XSAVE/XRSTOR support"); 1053 + 1054 + (void)migrate_add_blocker(whpx_migration_blocker, &local_error); 1055 + if (local_error) { 1056 + error_report_err(local_error); 1057 + error_free(whpx_migration_blocker); 1058 + migrate_del_blocker(whpx_migration_blocker); 1059 + return -EINVAL; 1060 + } 1061 + } 1062 + 1063 + vcpu = g_malloc0(FIELD_OFFSET(struct whpx_vcpu, exit_ctx) + 1064 + whpx->exit_ctx_size); 1065 + 1066 + if (!vcpu) { 1067 + error_report("WHPX: Failed to allocte VCPU context."); 1068 + return -ENOMEM; 1069 + } 1070 + 1071 + hr = WHvEmulatorCreateEmulator(whpx_emu_callbacks, &vcpu->emulator); 1072 + if (FAILED(hr)) { 1073 + error_report("WHPX: Failed to setup instruction completion support," 1074 + " hr=%08lx", hr); 1075 + g_free(vcpu); 1076 + return -EINVAL; 1077 + } 1078 + 1079 + hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0); 1080 + if (FAILED(hr)) { 1081 + error_report("WHPX: Failed to create a virtual processor," 1082 + " hr=%08lx", hr); 1083 + WHvEmulatorDestroyEmulator(vcpu->emulator); 1084 + g_free(vcpu); 1085 + return -EINVAL; 1086 + } 1087 + 1088 + vcpu->interruptable = true; 1089 + 1090 + cpu->vcpu_dirty = true; 1091 + cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu; 1092 + 1093 + return 0; 1094 + } 1095 + 1096 + int whpx_vcpu_exec(CPUState *cpu) 1097 + { 1098 + int ret; 1099 + int fatal; 1100 + 1101 + for (;;) { 1102 + if (cpu->exception_index >= EXCP_INTERRUPT) { 1103 + ret = cpu->exception_index; 1104 + cpu->exception_index = -1; 1105 + break; 1106 + } 1107 + 1108 + fatal = whpx_vcpu_run(cpu); 1109 + 1110 + if (fatal) { 1111 + error_report("WHPX: Failed to exec a virtual processor"); 1112 + abort(); 1113 + } 1114 + } 1115 + 1116 + return ret; 1117 + } 1118 + 1119 + void whpx_destroy_vcpu(CPUState *cpu) 1120 + { 1121 + struct whpx_state *whpx = &whpx_global; 1122 + struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); 1123 + 1124 + WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); 1125 + WHvEmulatorDestroyEmulator(vcpu->emulator); 1126 + g_free(cpu->hax_vcpu); 1127 + return; 1128 + } 1129 + 1130 + void whpx_vcpu_kick(CPUState *cpu) 1131 + { 1132 + struct whpx_state *whpx = &whpx_global; 1133 + WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0); 1134 + } 1135 + 1136 + /* 1137 + * Memory support. 1138 + */ 1139 + 1140 + static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size, 1141 + void *host_va, int add, int rom, 1142 + const char *name) 1143 + { 1144 + struct whpx_state *whpx = &whpx_global; 1145 + HRESULT hr; 1146 + 1147 + /* 1148 + if (add) { 1149 + printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n", 1150 + (void*)start_pa, (void*)size, host_va, 1151 + (rom ? "ROM" : "RAM"), name); 1152 + } else { 1153 + printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n", 1154 + (void*)start_pa, (void*)size, host_va, name); 1155 + } 1156 + */ 1157 + 1158 + if (add) { 1159 + hr = WHvMapGpaRange(whpx->partition, 1160 + host_va, 1161 + start_pa, 1162 + size, 1163 + (WHvMapGpaRangeFlagRead | 1164 + WHvMapGpaRangeFlagExecute | 1165 + (rom ? 0 : WHvMapGpaRangeFlagWrite))); 1166 + } else { 1167 + hr = WHvUnmapGpaRange(whpx->partition, 1168 + start_pa, 1169 + size); 1170 + } 1171 + 1172 + if (FAILED(hr)) { 1173 + error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes," 1174 + " Host:%p, hr=%08lx", 1175 + (add ? "MAP" : "UNMAP"), name, 1176 + (void *)start_pa, (void *)size, host_va, hr); 1177 + } 1178 + } 1179 + 1180 + static void whpx_process_section(MemoryRegionSection *section, int add) 1181 + { 1182 + MemoryRegion *mr = section->mr; 1183 + hwaddr start_pa = section->offset_within_address_space; 1184 + ram_addr_t size = int128_get64(section->size); 1185 + unsigned int delta; 1186 + uint64_t host_va; 1187 + 1188 + if (!memory_region_is_ram(mr)) { 1189 + return; 1190 + } 1191 + 1192 + delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); 1193 + delta &= ~qemu_real_host_page_mask; 1194 + if (delta > size) { 1195 + return; 1196 + } 1197 + start_pa += delta; 1198 + size -= delta; 1199 + size &= qemu_real_host_page_mask; 1200 + if (!size || (start_pa & ~qemu_real_host_page_mask)) { 1201 + return; 1202 + } 1203 + 1204 + host_va = (uintptr_t)memory_region_get_ram_ptr(mr) 1205 + + section->offset_within_region + delta; 1206 + 1207 + whpx_update_mapping(start_pa, size, (void *)host_va, add, 1208 + memory_region_is_rom(mr), mr->name); 1209 + } 1210 + 1211 + static void whpx_region_add(MemoryListener *listener, 1212 + MemoryRegionSection *section) 1213 + { 1214 + memory_region_ref(section->mr); 1215 + whpx_process_section(section, 1); 1216 + } 1217 + 1218 + static void whpx_region_del(MemoryListener *listener, 1219 + MemoryRegionSection *section) 1220 + { 1221 + whpx_process_section(section, 0); 1222 + memory_region_unref(section->mr); 1223 + } 1224 + 1225 + static void whpx_transaction_begin(MemoryListener *listener) 1226 + { 1227 + } 1228 + 1229 + static void whpx_transaction_commit(MemoryListener *listener) 1230 + { 1231 + } 1232 + 1233 + static void whpx_log_sync(MemoryListener *listener, 1234 + MemoryRegionSection *section) 1235 + { 1236 + MemoryRegion *mr = section->mr; 1237 + 1238 + if (!memory_region_is_ram(mr)) { 1239 + return; 1240 + } 1241 + 1242 + memory_region_set_dirty(mr, 0, int128_get64(section->size)); 1243 + } 1244 + 1245 + static MemoryListener whpx_memory_listener = { 1246 + .begin = whpx_transaction_begin, 1247 + .commit = whpx_transaction_commit, 1248 + .region_add = whpx_region_add, 1249 + .region_del = whpx_region_del, 1250 + .log_sync = whpx_log_sync, 1251 + .priority = 10, 1252 + }; 1253 + 1254 + static void whpx_memory_init(void) 1255 + { 1256 + memory_listener_register(&whpx_memory_listener, &address_space_memory); 1257 + } 1258 + 1259 + static void whpx_handle_interrupt(CPUState *cpu, int mask) 1260 + { 1261 + cpu->interrupt_request |= mask; 1262 + 1263 + if (!qemu_cpu_is_self(cpu)) { 1264 + qemu_cpu_kick(cpu); 1265 + } 1266 + } 1267 + 1268 + /* 1269 + * Partition support 1270 + */ 1271 + 1272 + static int whpx_accel_init(MachineState *ms) 1273 + { 1274 + struct whpx_state *whpx; 1275 + int ret; 1276 + HRESULT hr; 1277 + WHV_CAPABILITY whpx_cap; 1278 + WHV_PARTITION_PROPERTY prop; 1279 + 1280 + whpx = &whpx_global; 1281 + 1282 + memset(whpx, 0, sizeof(struct whpx_state)); 1283 + whpx->mem_quota = ms->ram_size; 1284 + 1285 + hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap, 1286 + sizeof(whpx_cap)); 1287 + if (FAILED(hr) || !whpx_cap.HypervisorPresent) { 1288 + error_report("WHPX: No accelerator found, hr=%08lx", hr); 1289 + ret = -ENOSPC; 1290 + goto error; 1291 + } 1292 + 1293 + hr = WHvCreatePartition(&whpx->partition); 1294 + if (FAILED(hr)) { 1295 + error_report("WHPX: Failed to create partition, hr=%08lx", hr); 1296 + ret = -EINVAL; 1297 + goto error; 1298 + } 1299 + 1300 + memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY)); 1301 + prop.PropertyCode = WHvPartitionPropertyCodeProcessorCount; 1302 + prop.ProcessorCount = smp_cpus; 1303 + hr = WHvSetPartitionProperty(whpx->partition, 1304 + &prop, 1305 + sizeof(WHV_PARTITION_PROPERTY)); 1306 + 1307 + if (FAILED(hr)) { 1308 + error_report("WHPX: Failed to set partition core count to %d," 1309 + " hr=%08lx", smp_cores, hr); 1310 + ret = -EINVAL; 1311 + goto error; 1312 + } 1313 + 1314 + hr = WHvSetupPartition(whpx->partition); 1315 + if (FAILED(hr)) { 1316 + error_report("WHPX: Failed to setup partition, hr=%08lx", hr); 1317 + ret = -EINVAL; 1318 + goto error; 1319 + } 1320 + 1321 + whpx->exit_ctx_size = WHvGetRunExitContextSize(); 1322 + assert(whpx->exit_ctx_size); 1323 + 1324 + whpx_memory_init(); 1325 + 1326 + cpu_interrupt_handler = whpx_handle_interrupt; 1327 + 1328 + printf("Windows Hypervisor Platform accelerator is operational\n"); 1329 + return 0; 1330 + 1331 + error: 1332 + 1333 + if (NULL != whpx->partition) { 1334 + WHvDeletePartition(whpx->partition); 1335 + whpx->partition = NULL; 1336 + } 1337 + 1338 + 1339 + return ret; 1340 + } 1341 + 1342 + int whpx_enabled(void) 1343 + { 1344 + return whpx_allowed; 1345 + } 1346 + 1347 + static void whpx_accel_class_init(ObjectClass *oc, void *data) 1348 + { 1349 + AccelClass *ac = ACCEL_CLASS(oc); 1350 + ac->name = "WHPX"; 1351 + ac->init_machine = whpx_accel_init; 1352 + ac->allowed = &whpx_allowed; 1353 + } 1354 + 1355 + static const TypeInfo whpx_accel_type = { 1356 + .name = ACCEL_CLASS_NAME("whpx"), 1357 + .parent = TYPE_ACCEL, 1358 + .class_init = whpx_accel_class_init, 1359 + }; 1360 + 1361 + static void whpx_type_init(void) 1362 + { 1363 + type_register_static(&whpx_accel_type); 1364 + } 1365 + 1366 + type_init(whpx_type_init);
+2 -3
target/lm32/cpu.c
··· 236 236 CPUClass *cc = CPU_CLASS(oc); 237 237 DeviceClass *dc = DEVICE_CLASS(oc); 238 238 239 - lcc->parent_realize = dc->realize; 240 - dc->realize = lm32_cpu_realizefn; 241 - 239 + device_class_set_parent_realize(dc, lm32_cpu_realizefn, 240 + &lcc->parent_realize); 242 241 lcc->parent_reset = cc->reset; 243 242 cc->reset = lm32_cpu_reset; 244 243
+2 -3
target/m68k/cpu.c
··· 255 255 CPUClass *cc = CPU_CLASS(c); 256 256 DeviceClass *dc = DEVICE_CLASS(c); 257 257 258 - mcc->parent_realize = dc->realize; 259 - dc->realize = m68k_cpu_realizefn; 260 - 258 + device_class_set_parent_realize(dc, m68k_cpu_realizefn, 259 + &mcc->parent_realize); 261 260 mcc->parent_reset = cc->reset; 262 261 cc->reset = m68k_cpu_reset; 263 262
+2 -3
target/microblaze/cpu.c
··· 258 258 CPUClass *cc = CPU_CLASS(oc); 259 259 MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_CLASS(oc); 260 260 261 - mcc->parent_realize = dc->realize; 262 - dc->realize = mb_cpu_realizefn; 263 - 261 + device_class_set_parent_realize(dc, mb_cpu_realizefn, 262 + &mcc->parent_realize); 264 263 mcc->parent_reset = cc->reset; 265 264 cc->reset = mb_cpu_reset; 266 265
+2 -3
target/mips/cpu.c
··· 174 174 CPUClass *cc = CPU_CLASS(c); 175 175 DeviceClass *dc = DEVICE_CLASS(c); 176 176 177 - mcc->parent_realize = dc->realize; 178 - dc->realize = mips_cpu_realizefn; 179 - 177 + device_class_set_parent_realize(dc, mips_cpu_realizefn, 178 + &mcc->parent_realize); 180 179 mcc->parent_reset = cc->reset; 181 180 cc->reset = mips_cpu_reset; 182 181
+2 -3
target/moxie/cpu.c
··· 102 102 CPUClass *cc = CPU_CLASS(oc); 103 103 MoxieCPUClass *mcc = MOXIE_CPU_CLASS(oc); 104 104 105 - mcc->parent_realize = dc->realize; 106 - dc->realize = moxie_cpu_realizefn; 107 - 105 + device_class_set_parent_realize(dc, moxie_cpu_realizefn, 106 + &mcc->parent_realize); 108 107 mcc->parent_reset = cc->reset; 109 108 cc->reset = moxie_cpu_reset; 110 109
+2 -2
target/nios2/cpu.c
··· 187 187 CPUClass *cc = CPU_CLASS(oc); 188 188 Nios2CPUClass *ncc = NIOS2_CPU_CLASS(oc); 189 189 190 - ncc->parent_realize = dc->realize; 191 - dc->realize = nios2_cpu_realizefn; 190 + device_class_set_parent_realize(dc, nios2_cpu_realizefn, 191 + &ncc->parent_realize); 192 192 dc->props = nios2_properties; 193 193 ncc->parent_reset = cc->reset; 194 194 cc->reset = nios2_cpu_reset;
+2 -3
target/openrisc/cpu.c
··· 132 132 CPUClass *cc = CPU_CLASS(occ); 133 133 DeviceClass *dc = DEVICE_CLASS(oc); 134 134 135 - occ->parent_realize = dc->realize; 136 - dc->realize = openrisc_cpu_realizefn; 137 - 135 + device_class_set_parent_realize(dc, openrisc_cpu_realizefn, 136 + &occ->parent_realize); 138 137 occ->parent_reset = cc->reset; 139 138 cc->reset = openrisc_cpu_reset; 140 139
+4 -4
target/ppc/translate_init.c
··· 10556 10556 CPUClass *cc = CPU_CLASS(oc); 10557 10557 DeviceClass *dc = DEVICE_CLASS(oc); 10558 10558 10559 - pcc->parent_realize = dc->realize; 10560 - pcc->parent_unrealize = dc->unrealize; 10559 + device_class_set_parent_realize(dc, ppc_cpu_realizefn, 10560 + &pcc->parent_realize); 10561 + device_class_set_parent_unrealize(dc, ppc_cpu_unrealizefn, 10562 + &pcc->parent_unrealize); 10561 10563 pcc->pvr_match = ppc_pvr_match_default; 10562 10564 pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always; 10563 - dc->realize = ppc_cpu_realizefn; 10564 - dc->unrealize = ppc_cpu_unrealizefn; 10565 10565 dc->props = ppc_cpu_properties; 10566 10566 10567 10567 pcc->parent_reset = cc->reset;
+2 -2
target/s390x/cpu.c
··· 464 464 CPUClass *cc = CPU_CLASS(scc); 465 465 DeviceClass *dc = DEVICE_CLASS(oc); 466 466 467 - scc->parent_realize = dc->realize; 468 - dc->realize = s390_cpu_realizefn; 467 + device_class_set_parent_realize(dc, s390_cpu_realizefn, 468 + &scc->parent_realize); 469 469 dc->props = s390x_cpu_properties; 470 470 dc->user_creatable = true; 471 471
+2 -2
target/sh4/cpu.c
··· 236 236 CPUClass *cc = CPU_CLASS(oc); 237 237 SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc); 238 238 239 - scc->parent_realize = dc->realize; 240 - dc->realize = superh_cpu_realizefn; 239 + device_class_set_parent_realize(dc, superh_cpu_realizefn, 240 + &scc->parent_realize); 241 241 242 242 scc->parent_reset = cc->reset; 243 243 cc->reset = superh_cpu_reset;
+2 -2
target/sparc/cpu.c
··· 858 858 CPUClass *cc = CPU_CLASS(oc); 859 859 DeviceClass *dc = DEVICE_CLASS(oc); 860 860 861 - scc->parent_realize = dc->realize; 862 - dc->realize = sparc_cpu_realizefn; 861 + device_class_set_parent_realize(dc, sparc_cpu_realizefn, 862 + &scc->parent_realize); 863 863 dc->props = sparc_cpu_properties; 864 864 865 865 scc->parent_reset = cc->reset;
+2 -2
target/tilegx/cpu.c
··· 141 141 CPUClass *cc = CPU_CLASS(oc); 142 142 TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc); 143 143 144 - tcc->parent_realize = dc->realize; 145 - dc->realize = tilegx_cpu_realizefn; 144 + device_class_set_parent_realize(dc, tilegx_cpu_realizefn, 145 + &tcc->parent_realize); 146 146 147 147 tcc->parent_reset = cc->reset; 148 148 cc->reset = tilegx_cpu_reset;
+2 -2
target/tricore/cpu.c
··· 153 153 CPUClass *cc = CPU_CLASS(c); 154 154 DeviceClass *dc = DEVICE_CLASS(c); 155 155 156 - mcc->parent_realize = dc->realize; 157 - dc->realize = tricore_cpu_realizefn; 156 + device_class_set_parent_realize(dc, tricore_cpu_realizefn, 157 + &mcc->parent_realize); 158 158 159 159 mcc->parent_reset = cc->reset; 160 160 cc->reset = tricore_cpu_reset;
+2 -2
target/unicore32/cpu.c
··· 132 132 CPUClass *cc = CPU_CLASS(oc); 133 133 UniCore32CPUClass *ucc = UNICORE32_CPU_CLASS(oc); 134 134 135 - ucc->parent_realize = dc->realize; 136 - dc->realize = uc32_cpu_realizefn; 135 + device_class_set_parent_realize(dc, uc32_cpu_realizefn, 136 + &ucc->parent_realize); 137 137 138 138 cc->class_by_name = uc32_cpu_class_by_name; 139 139 cc->has_work = uc32_cpu_has_work;
+2 -2
target/xtensa/cpu.c
··· 151 151 CPUClass *cc = CPU_CLASS(oc); 152 152 XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc); 153 153 154 - xcc->parent_realize = dc->realize; 155 - dc->realize = xtensa_cpu_realizefn; 154 + device_class_set_parent_realize(dc, xtensa_cpu_realizefn, 155 + &xcc->parent_realize); 156 156 157 157 xcc->parent_reset = cc->reset; 158 158 cc->reset = xtensa_cpu_reset;
+1 -1
tests/test-filter-redirector.c
··· 186 186 187 187 ret = iov_send(send_sock, iov, 2, 0, sizeof(size) + sizeof(send_buf)); 188 188 g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); 189 - close(send_sock); 190 189 191 190 ret = qemu_recv(backend_sock[0], &len, sizeof(len), 0); 192 191 g_assert_cmpint(ret, ==, sizeof(len)); ··· 197 196 ret = qemu_recv(backend_sock[0], recv_buf, len, 0); 198 197 g_assert_cmpstr(recv_buf, ==, send_buf); 199 198 199 + close(send_sock); 200 200 g_free(recv_buf); 201 201 unlink(sock_path0); 202 202 unlink(sock_path1);
+85 -43
tests/vhost-user-test.c
··· 17 17 #include "qemu/range.h" 18 18 #include "qemu/sockets.h" 19 19 #include "chardev/char-fe.h" 20 + #include "qemu/memfd.h" 20 21 #include "sysemu/sysemu.h" 21 22 #include "libqos/libqos.h" 22 23 #include "libqos/pci-pc.h" ··· 30 31 #include <linux/virtio_net.h> 31 32 #include <sys/vfs.h> 32 33 33 - #define VHOST_USER_NET_TESTS_WORKING 0 /* broken as of 2.10.0 */ 34 - 35 34 /* GLIB version compatibility flags */ 36 35 #if !GLIB_CHECK_VERSION(2, 26, 0) 37 36 #define G_TIME_SPAN_SECOND (G_GINT64_CONSTANT(1000000)) ··· 41 40 #define HAVE_MONOTONIC_TIME 42 41 #endif 43 42 44 - #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM,"\ 43 + #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \ 45 44 "mem-path=%s,share=on -numa node,memdev=mem" 45 + #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \ 46 + " -numa node,memdev=mem" 46 47 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s" 47 48 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=%s,vhostforce" 48 49 #define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0" 49 - 50 - #define QEMU_CMD QEMU_CMD_MEM QEMU_CMD_CHR \ 51 - QEMU_CMD_NETDEV QEMU_CMD_NET 52 50 53 51 #define HUGETLBFS_MAGIC 0x958458f6 54 52 ··· 161 159 QGuestAllocator *alloc; 162 160 } TestServer; 163 161 162 + static TestServer *test_server_new(const gchar *name); 163 + static void test_server_free(TestServer *server); 164 + static void test_server_listen(TestServer *server); 165 + 164 166 static const char *tmpfs; 165 167 static const char *root; 166 168 169 + enum test_memfd { 170 + TEST_MEMFD_AUTO, 171 + TEST_MEMFD_YES, 172 + TEST_MEMFD_NO, 173 + }; 174 + 175 + static char *get_qemu_cmd(TestServer *s, 176 + int mem, enum test_memfd memfd, const char *mem_path, 177 + const char *chr_opts, const char *extra) 178 + { 179 + if (memfd == TEST_MEMFD_AUTO && qemu_memfd_check()) { 180 + memfd = TEST_MEMFD_YES; 181 + } 182 + 183 + if (memfd == TEST_MEMFD_YES) { 184 + return g_strdup_printf(QEMU_CMD_MEMFD QEMU_CMD_CHR 185 + QEMU_CMD_NETDEV QEMU_CMD_NET "%s", mem, mem, 186 + s->chr_name, s->socket_path, 187 + chr_opts, s->chr_name, extra); 188 + } else { 189 + return g_strdup_printf(QEMU_CMD_MEM QEMU_CMD_CHR 190 + QEMU_CMD_NETDEV QEMU_CMD_NET "%s", mem, mem, 191 + mem_path, s->chr_name, s->socket_path, 192 + chr_opts, s->chr_name, extra); 193 + } 194 + } 195 + 167 196 static void init_virtio_dev(TestServer *s, uint32_t features_mask) 168 197 { 169 198 uint32_t features; ··· 227 256 g_mutex_unlock(&s->data_mutex); 228 257 } 229 258 230 - static void read_guest_mem(const void *data) 259 + static void read_guest_mem_server(TestServer *s) 231 260 { 232 - TestServer *s = (void *)data; 233 261 uint32_t *guest_mem; 234 262 int i, j; 235 263 size_t size; ··· 494 522 test_server_create_chr(server, ",server,nowait"); 495 523 } 496 524 497 - #define GET_QEMU_CMD(s) \ 498 - g_strdup_printf(QEMU_CMD, 512, 512, (root), (s)->chr_name, \ 499 - (s)->socket_path, "", (s)->chr_name) 500 - 501 - #define GET_QEMU_CMDE(s, mem, chr_opts, extra, ...) \ 502 - g_strdup_printf(QEMU_CMD extra, (mem), (mem), (root), (s)->chr_name, \ 503 - (s)->socket_path, (chr_opts), (s)->chr_name, ##__VA_ARGS__) 504 - 505 525 static gboolean _test_server_free(TestServer *server) 506 526 { 507 527 int i; ··· 638 658 .check = test_migrate_source_check, 639 659 }; 640 660 641 - static void test_read_guest_mem(void) 661 + static void test_read_guest_mem(const void *arg) 642 662 { 663 + enum test_memfd memfd = GPOINTER_TO_INT(arg); 643 664 TestServer *server = NULL; 644 665 char *qemu_cmd = NULL; 645 666 QTestState *s = NULL; ··· 647 668 server = test_server_new("test"); 648 669 test_server_listen(server); 649 670 650 - qemu_cmd = GET_QEMU_CMD(server); 671 + qemu_cmd = get_qemu_cmd(server, 512, memfd, root, "", ""); 651 672 652 673 s = qtest_start(qemu_cmd); 653 674 g_free(qemu_cmd); 654 675 655 676 init_virtio_dev(server, 1u << VIRTIO_NET_F_MAC); 656 677 657 - read_guest_mem(server); 678 + read_guest_mem_server(server); 658 679 659 680 uninit_virtio_dev(server); 660 681 ··· 669 690 char *uri = g_strdup_printf("%s%s", "unix:", dest->mig_path); 670 691 QTestState *global = global_qtest, *from, *to; 671 692 GSource *source; 672 - gchar *cmd; 693 + gchar *cmd, *tmp; 673 694 QDict *rsp; 674 695 guint8 *log; 675 696 guint64 size; ··· 677 698 test_server_listen(s); 678 699 test_server_listen(dest); 679 700 680 - cmd = GET_QEMU_CMDE(s, 2, "", ""); 701 + cmd = get_qemu_cmd(s, 2, TEST_MEMFD_AUTO, root, "", ""); 681 702 from = qtest_start(cmd); 682 703 g_free(cmd); 683 704 ··· 686 707 size = get_log_size(s); 687 708 g_assert_cmpint(size, ==, (2 * 1024 * 1024) / (VHOST_LOG_PAGE * 8)); 688 709 689 - cmd = GET_QEMU_CMDE(dest, 2, "", " -incoming %s", uri); 710 + tmp = g_strdup_printf(" -incoming %s", uri); 711 + cmd = get_qemu_cmd(dest, 2, TEST_MEMFD_AUTO, root, "", tmp); 712 + g_free(tmp); 690 713 to = qtest_init(cmd); 691 714 g_free(cmd); 692 715 ··· 732 755 global_qtest = to; 733 756 qmp_eventwait("RESUME"); 734 757 735 - read_guest_mem(dest); 758 + read_guest_mem_server(dest); 736 759 737 760 uninit_virtio_dev(s); 738 761 ··· 765 788 g_mutex_unlock(&s->data_mutex); 766 789 } 767 790 768 - #if VHOST_USER_NET_TESTS_WORKING && defined(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) 791 + #if defined(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) 769 792 static inline void test_server_connect(TestServer *server) 770 793 { 771 794 test_server_create_chr(server, ",reconnect=1"); ··· 799 822 char *cmd; 800 823 801 824 g_thread_new("connect", connect_thread, s); 802 - cmd = GET_QEMU_CMDE(s, 2, ",server", ""); 825 + cmd = get_qemu_cmd(s, 2, TEST_MEMFD_AUTO, root, ",server", ""); 803 826 qtest_start(cmd); 804 827 g_free(cmd); 805 828 ··· 837 860 838 861 s->test_fail = true; 839 862 g_thread_new("connect", connect_thread, s); 840 - cmd = GET_QEMU_CMDE(s, 2, ",server", ""); 863 + cmd = get_qemu_cmd(s, 2, TEST_MEMFD_AUTO, root, ",server", ""); 841 864 qtest_start(cmd); 842 865 g_free(cmd); 843 866 ··· 867 890 868 891 s->test_flags = TEST_FLAGS_DISCONNECT; 869 892 g_thread_new("connect", connect_thread, s); 870 - cmd = GET_QEMU_CMDE(s, 2, ",server", ""); 893 + cmd = get_qemu_cmd(s, 2, TEST_MEMFD_AUTO, root, ",server", ""); 871 894 qtest_start(cmd); 872 895 g_free(cmd); 873 896 ··· 902 925 s->queues = 2; 903 926 test_server_listen(s); 904 927 905 - cmd = g_strdup_printf(QEMU_CMD_MEM QEMU_CMD_CHR QEMU_CMD_NETDEV ",queues=%d " 906 - "-device virtio-net-pci,netdev=net0,mq=on,vectors=%d", 907 - 512, 512, root, s->chr_name, 908 - s->socket_path, "", s->chr_name, 909 - s->queues, s->queues * 2 + 2); 928 + if (qemu_memfd_check()) { 929 + cmd = g_strdup_printf( 930 + QEMU_CMD_MEMFD QEMU_CMD_CHR QEMU_CMD_NETDEV ",queues=%d " 931 + "-device virtio-net-pci,netdev=net0,mq=on,vectors=%d", 932 + 512, 512, s->chr_name, 933 + s->socket_path, "", s->chr_name, 934 + s->queues, s->queues * 2 + 2); 935 + } else { 936 + cmd = g_strdup_printf( 937 + QEMU_CMD_MEM QEMU_CMD_CHR QEMU_CMD_NETDEV ",queues=%d " 938 + "-device virtio-net-pci,netdev=net0,mq=on,vectors=%d", 939 + 512, 512, root, s->chr_name, 940 + s->socket_path, "", s->chr_name, 941 + s->queues, s->queues * 2 + 2); 942 + } 910 943 qtest_start(cmd); 911 944 g_free(cmd); 912 945 ··· 952 985 /* run the main loop thread so the chardev may operate */ 953 986 thread = g_thread_new(NULL, thread_function, loop); 954 987 955 - qtest_add_func("/vhost-user/read-guest-mem", test_read_guest_mem); 988 + if (qemu_memfd_check()) { 989 + qtest_add_data_func("/vhost-user/read-guest-mem/memfd", 990 + GINT_TO_POINTER(TEST_MEMFD_YES), 991 + test_read_guest_mem); 992 + } 993 + qtest_add_data_func("/vhost-user/read-guest-mem/memfile", 994 + GINT_TO_POINTER(TEST_MEMFD_NO), test_read_guest_mem); 956 995 qtest_add_func("/vhost-user/migrate", test_migrate); 957 996 qtest_add_func("/vhost-user/multiqueue", test_multiqueue); 958 997 959 - #if VHOST_USER_NET_TESTS_WORKING && defined(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) 960 - qtest_add_func("/vhost-user/reconnect/subprocess", 961 - test_reconnect_subprocess); 962 - qtest_add_func("/vhost-user/reconnect", test_reconnect); 963 - qtest_add_func("/vhost-user/connect-fail/subprocess", 964 - test_connect_fail_subprocess); 965 - qtest_add_func("/vhost-user/connect-fail", test_connect_fail); 966 - qtest_add_func("/vhost-user/flags-mismatch/subprocess", 967 - test_flags_mismatch_subprocess); 968 - qtest_add_func("/vhost-user/flags-mismatch", test_flags_mismatch); 998 + #if defined(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) 999 + /* keeps failing on build-system since Aug 15 2017 */ 1000 + if (getenv("QTEST_VHOST_USER_FIXME")) { 1001 + qtest_add_func("/vhost-user/reconnect/subprocess", 1002 + test_reconnect_subprocess); 1003 + qtest_add_func("/vhost-user/reconnect", test_reconnect); 1004 + qtest_add_func("/vhost-user/connect-fail/subprocess", 1005 + test_connect_fail_subprocess); 1006 + qtest_add_func("/vhost-user/connect-fail", test_connect_fail); 1007 + qtest_add_func("/vhost-user/flags-mismatch/subprocess", 1008 + test_flags_mismatch_subprocess); 1009 + qtest_add_func("/vhost-user/flags-mismatch", test_flags_mismatch); 1010 + } 969 1011 #endif 970 1012 971 1013 ret = g_test_run();
+48
util/coroutine-ucontext.c
··· 31 31 #include <valgrind/valgrind.h> 32 32 #endif 33 33 34 + #if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer) 35 + #ifdef CONFIG_ASAN_IFACE_FIBER 36 + #define CONFIG_ASAN 1 37 + #include <sanitizer/asan_interface.h> 38 + #endif 39 + #endif 40 + 34 41 typedef struct { 35 42 Coroutine base; 36 43 void *stack; ··· 59 66 int i[2]; 60 67 }; 61 68 69 + static void finish_switch_fiber(void *fake_stack_save) 70 + { 71 + #ifdef CONFIG_ASAN 72 + const void *bottom_old; 73 + size_t size_old; 74 + 75 + __sanitizer_finish_switch_fiber(fake_stack_save, &bottom_old, &size_old); 76 + 77 + if (!leader.stack) { 78 + leader.stack = (void *)bottom_old; 79 + leader.stack_size = size_old; 80 + } 81 + #endif 82 + } 83 + 84 + static void start_switch_fiber(void **fake_stack_save, 85 + const void *bottom, size_t size) 86 + { 87 + #ifdef CONFIG_ASAN 88 + __sanitizer_start_switch_fiber(fake_stack_save, bottom, size); 89 + #endif 90 + } 91 + 62 92 static void coroutine_trampoline(int i0, int i1) 63 93 { 64 94 union cc_arg arg; 65 95 CoroutineUContext *self; 66 96 Coroutine *co; 97 + void *fake_stack_save = NULL; 98 + 99 + finish_switch_fiber(NULL); 67 100 68 101 arg.i[0] = i0; 69 102 arg.i[1] = i1; ··· 72 105 73 106 /* Initialize longjmp environment and switch back the caller */ 74 107 if (!sigsetjmp(self->env, 0)) { 108 + start_switch_fiber(&fake_stack_save, 109 + leader.stack, leader.stack_size); 75 110 siglongjmp(*(sigjmp_buf *)co->entry_arg, 1); 76 111 } 77 112 113 + finish_switch_fiber(fake_stack_save); 114 + 78 115 while (true) { 79 116 co->entry(co->entry_arg); 80 117 qemu_coroutine_switch(co, co->caller, COROUTINE_TERMINATE); ··· 87 124 ucontext_t old_uc, uc; 88 125 sigjmp_buf old_env; 89 126 union cc_arg arg = {0}; 127 + void *fake_stack_save = NULL; 90 128 91 129 /* The ucontext functions preserve signal masks which incurs a 92 130 * system call overhead. sigsetjmp(buf, 0)/siglongjmp() does not ··· 122 160 123 161 /* swapcontext() in, siglongjmp() back out */ 124 162 if (!sigsetjmp(old_env, 0)) { 163 + start_switch_fiber(&fake_stack_save, co->stack, co->stack_size); 125 164 swapcontext(&old_uc, &uc); 126 165 } 166 + 167 + finish_switch_fiber(fake_stack_save); 168 + 127 169 return &co->base; 128 170 } 129 171 ··· 169 211 CoroutineUContext *from = DO_UPCAST(CoroutineUContext, base, from_); 170 212 CoroutineUContext *to = DO_UPCAST(CoroutineUContext, base, to_); 171 213 int ret; 214 + void *fake_stack_save = NULL; 172 215 173 216 current = to_; 174 217 175 218 ret = sigsetjmp(from->env, 0); 176 219 if (ret == 0) { 220 + start_switch_fiber(action == COROUTINE_TERMINATE ? 221 + NULL : &fake_stack_save, to->stack, to->stack_size); 177 222 siglongjmp(to->env, action); 178 223 } 224 + 225 + finish_switch_fiber(fake_stack_save); 226 + 179 227 return ret; 180 228 } 181 229
+51 -27
util/memfd.c
··· 27 27 28 28 #include "qemu/osdep.h" 29 29 30 + #include "qapi/error.h" 30 31 #include "qemu/memfd.h" 32 + #include "qemu/host-utils.h" 31 33 32 34 #if defined CONFIG_LINUX && !defined CONFIG_MEMFD 33 35 #include <sys/syscall.h> ··· 51 53 #define MFD_ALLOW_SEALING 0x0002U 52 54 #endif 53 55 54 - int qemu_memfd_create(const char *name, size_t size, unsigned int seals) 56 + #ifndef MFD_HUGETLB 57 + #define MFD_HUGETLB 0x0004U 58 + #endif 59 + 60 + #ifndef MFD_HUGE_SHIFT 61 + #define MFD_HUGE_SHIFT 26 62 + #endif 63 + 64 + int qemu_memfd_create(const char *name, size_t size, bool hugetlb, 65 + uint64_t hugetlbsize, unsigned int seals, Error **errp) 55 66 { 56 - int mfd = -1; 67 + int htsize = hugetlbsize ? ctz64(hugetlbsize) : 0; 68 + 69 + if (htsize && 1 << htsize != hugetlbsize) { 70 + error_setg(errp, "Hugepage size must be a power of 2"); 71 + return -1; 72 + } 73 + 74 + htsize = htsize << MFD_HUGE_SHIFT; 57 75 58 76 #ifdef CONFIG_LINUX 77 + int mfd = -1; 59 78 unsigned int flags = MFD_CLOEXEC; 60 79 61 80 if (seals) { 62 81 flags |= MFD_ALLOW_SEALING; 63 82 } 64 - 83 + if (hugetlb) { 84 + flags |= MFD_HUGETLB; 85 + flags |= htsize; 86 + } 65 87 mfd = memfd_create(name, flags); 66 88 if (mfd < 0) { 67 - return -1; 89 + goto err; 68 90 } 69 91 70 92 if (ftruncate(mfd, size) == -1) { 71 - perror("ftruncate"); 72 - close(mfd); 73 - return -1; 93 + goto err; 74 94 } 75 95 76 96 if (seals && fcntl(mfd, F_ADD_SEALS, seals) == -1) { 77 - perror("fcntl"); 78 - close(mfd); 79 - return -1; 97 + goto err; 80 98 } 81 - #endif 82 99 83 100 return mfd; 101 + 102 + err: 103 + if (mfd >= 0) { 104 + close(mfd); 105 + } 106 + #endif 107 + error_setg_errno(errp, errno, "failed to create memfd"); 108 + return -1; 84 109 } 85 110 86 111 /* ··· 90 115 * sealing. 91 116 */ 92 117 void *qemu_memfd_alloc(const char *name, size_t size, unsigned int seals, 93 - int *fd) 118 + int *fd, Error **errp) 94 119 { 95 120 void *ptr; 96 - int mfd = qemu_memfd_create(name, size, seals); 121 + int mfd = qemu_memfd_create(name, size, false, 0, seals, NULL); 97 122 98 123 /* some systems have memfd without sealing */ 99 124 if (mfd == -1) { 100 - mfd = qemu_memfd_create(name, size, 0); 125 + mfd = qemu_memfd_create(name, size, false, 0, 0, NULL); 101 126 } 102 127 103 128 if (mfd == -1) { ··· 109 134 unlink(fname); 110 135 g_free(fname); 111 136 112 - if (mfd == -1) { 113 - perror("mkstemp"); 114 - return NULL; 115 - } 116 - 117 - if (ftruncate(mfd, size) == -1) { 118 - perror("ftruncate"); 119 - close(mfd); 120 - return NULL; 137 + if (mfd == -1 || 138 + ftruncate(mfd, size) == -1) { 139 + goto err; 121 140 } 122 141 } 123 142 124 143 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0); 125 144 if (ptr == MAP_FAILED) { 126 - perror("mmap"); 127 - close(mfd); 128 - return NULL; 145 + goto err; 129 146 } 130 147 131 148 *fd = mfd; 132 149 return ptr; 150 + 151 + err: 152 + error_setg_errno(errp, errno, "failed to allocate shared memory"); 153 + if (mfd >= 0) { 154 + close(mfd); 155 + } 156 + return NULL; 133 157 } 134 158 135 159 void qemu_memfd_free(void *ptr, size_t size, int fd) ··· 157 181 int fd; 158 182 void *ptr; 159 183 160 - ptr = qemu_memfd_alloc("test", 4096, 0, &fd); 184 + ptr = qemu_memfd_alloc("test", 4096, 0, &fd, NULL); 161 185 memfd_check = ptr ? MEMFD_OK : MEMFD_KO; 162 186 qemu_memfd_free(ptr, 4096, fd); 163 187 }
+40 -4
util/qemu-sockets.c
··· 554 554 } 555 555 556 556 /* compatibility wrapper */ 557 + static int inet_parse_flag(const char *flagname, const char *optstr, bool *val, 558 + Error **errp) 559 + { 560 + char *end; 561 + size_t len; 562 + 563 + end = strstr(optstr, ","); 564 + if (end) { 565 + if (end[1] == ',') { /* Reject 'ipv6=on,,foo' */ 566 + error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr); 567 + return -1; 568 + } 569 + len = end - optstr; 570 + } else { 571 + len = strlen(optstr); 572 + } 573 + if (len == 0 || (len == 3 && strncmp(optstr, "=on", len) == 0)) { 574 + *val = true; 575 + } else if (len == 4 && strncmp(optstr, "=off", len) == 0) { 576 + *val = false; 577 + } else { 578 + error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr); 579 + return -1; 580 + } 581 + return 0; 582 + } 583 + 557 584 int inet_parse(InetSocketAddress *addr, const char *str, Error **errp) 558 585 { 559 586 const char *optstr, *h; ··· 561 588 char port[33]; 562 589 int to; 563 590 int pos; 591 + char *begin; 564 592 565 593 memset(addr, 0, sizeof(*addr)); 566 594 ··· 602 630 addr->has_to = true; 603 631 addr->to = to; 604 632 } 605 - if (strstr(optstr, ",ipv4")) { 606 - addr->ipv4 = addr->has_ipv4 = true; 633 + begin = strstr(optstr, ",ipv4"); 634 + if (begin) { 635 + if (inet_parse_flag("ipv4", begin + 5, &addr->ipv4, errp) < 0) { 636 + return -1; 637 + } 638 + addr->has_ipv4 = true; 607 639 } 608 - if (strstr(optstr, ",ipv6")) { 609 - addr->ipv6 = addr->has_ipv6 = true; 640 + begin = strstr(optstr, ",ipv6"); 641 + if (begin) { 642 + if (inet_parse_flag("ipv6", begin + 5, &addr->ipv6, errp) < 0) { 643 + return -1; 644 + } 645 + addr->has_ipv6 = true; 610 646 } 611 647 return 0; 612 648 }
-3
util/readline.c
··· 510 510 for (i = 0; i < READLINE_MAX_CMDS; i++) { 511 511 g_free(rs->history[i]); 512 512 } 513 - for (i = 0; i < READLINE_MAX_COMPLETIONS; i++) { 514 - g_free(rs->completions[i]); 515 - } 516 513 g_free(rs); 517 514 } 518 515