qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* Bugfixes all over the place
* get/set_uint cleanups (Felipe)
* Lock guard support (Stefan)
* MemoryRegion ownership cleanup (Philippe)
* AVX512 optimization for buffer_is_zero (Robert)

# gpg: Signature made Tue 17 Mar 2020 15:01:54 GMT
# gpg: using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (62 commits)
hw/arm: Let devices own the MemoryRegion they create
hw/arm: Remove unnecessary memory_region_set_readonly() on ROM alias
hw/ppc/ppc405: Use memory_region_init_rom() with read-only regions
hw/arm/stm32: Use memory_region_init_rom() with read-only regions
hw/char: Let devices own the MemoryRegion they create
hw/riscv: Let devices own the MemoryRegion they create
hw/dma: Let devices own the MemoryRegion they create
hw/display: Let devices own the MemoryRegion they create
hw/core: Let devices own the MemoryRegion they create
scripts/cocci: Patch to let devices own their MemoryRegions
scripts/cocci: Patch to remove unnecessary memory_region_set_readonly()
scripts/cocci: Patch to detect potential use of memory_region_init_rom
hw/sparc: Use memory_region_init_rom() with read-only regions
hw/sh4: Use memory_region_init_rom() with read-only regions
hw/riscv: Use memory_region_init_rom() with read-only regions
hw/ppc: Use memory_region_init_rom() with read-only regions
hw/pci-host: Use memory_region_init_rom() with read-only regions
hw/net: Use memory_region_init_rom() with read-only regions
hw/m68k: Use memory_region_init_rom() with read-only regions
hw/display: Use memory_region_init_rom() with read-only regions
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+1160 -766
+12
MAINTAINERS
··· 437 437 F: include/hw/xen/ 438 438 F: include/sysemu/xen-mapcache.h 439 439 440 + Guest CPU Cores (HAXM) 441 + --------------------- 442 + X86 HAXM CPUs 443 + M: Wenchao Wang <wenchao.wang@intel.com> 444 + M: Colin Xu <colin.xu@intel.com> 445 + L: haxm-team@intel.com 446 + W: https://github.com/intel/haxm/issues 447 + S: Maintained 448 + F: include/sysemu/hax.h 449 + F: target/i386/hax-* 450 + 440 451 Hosts 441 452 ----- 442 453 LINUX ··· 2040 2051 F: memory.c 2041 2052 F: include/exec/memory-internal.h 2042 2053 F: exec.c 2054 + F: scripts/coccinelle/memory-region-housekeeping.cocci 2043 2055 2044 2056 SPICE 2045 2057 M: Gerd Hoffmann <kraxel@redhat.com>
+28 -21
Makefile
··· 1235 1235 include $(SRC_PATH)/tests/docker/Makefile.include 1236 1236 include $(SRC_PATH)/tests/vm/Makefile.include 1237 1237 1238 + print-help-run = printf " %-30s - %s\\n" "$1" "$2" 1239 + print-help = $(quiet-@)$(call print-help-run,$1,$2) 1240 + 1238 1241 .PHONY: help 1239 1242 help: 1240 1243 @echo 'Generic targets:' 1241 - @echo ' all - Build all' 1244 + $(call print-help,all,Build all) 1242 1245 ifdef CONFIG_MODULES 1243 - @echo ' modules - Build all modules' 1246 + $(call print-help,modules,Build all modules) 1244 1247 endif 1245 - @echo ' dir/file.o - Build specified target only' 1246 - @echo ' install - Install QEMU, documentation and tools' 1247 - @echo ' ctags/TAGS - Generate tags file for editors' 1248 - @echo ' cscope - Generate cscope index' 1248 + $(call print-help,dir/file.o,Build specified target only) 1249 + $(call print-help,install,Install QEMU, documentation and tools) 1250 + $(call print-help,ctags/TAGS,Generate tags file for editors) 1251 + $(call print-help,cscope,Generate cscope index) 1249 1252 @echo '' 1250 1253 @$(if $(TARGET_DIRS), \ 1251 1254 echo 'Architecture specific targets:'; \ 1252 1255 $(foreach t, $(TARGET_DIRS), \ 1253 - printf " %-30s - Build for %s\\n" $(t)/all $(t);) \ 1256 + $(call print-help-run,$(t)/all,Build for $(t));) \ 1257 + echo '') 1258 + @$(if $(TOOLS), \ 1259 + echo 'Tools targets:'; \ 1260 + $(foreach t, $(TOOLS), \ 1261 + $(call print-help-run,$(t),Build $(shell basename $(t)) tool);) \ 1254 1262 echo '') 1255 1263 @echo 'Cleaning targets:' 1256 - @echo ' clean - Remove most generated files but keep the config' 1264 + $(call print-help,clean,Remove most generated files but keep the config) 1257 1265 ifdef CONFIG_GCOV 1258 - @echo ' clean-coverage - Remove coverage files' 1266 + $(call print-help,clean-coverage,Remove coverage files) 1259 1267 endif 1260 - @echo ' distclean - Remove all generated files' 1261 - @echo ' dist - Build a distributable tarball' 1268 + $(call print-help,distclean,Remove all generated files) 1269 + $(call print-help,dist,Build a distributable tarball) 1262 1270 @echo '' 1263 1271 @echo 'Test targets:' 1264 - @echo ' check - Run all tests (check-help for details)' 1265 - @echo ' docker - Help about targets running tests inside containers' 1266 - @echo ' vm-help - Help about targets running tests inside VM' 1272 + $(call print-help,check,Run all tests (check-help for details)) 1273 + $(call print-help,docker,Help about targets running tests inside containers) 1274 + $(call print-help,vm-help,Help about targets running tests inside VM) 1267 1275 @echo '' 1268 1276 @echo 'Documentation targets:' 1269 - @echo ' html info pdf txt' 1270 - @echo ' - Build documentation in specified format' 1277 + $(call print-help,html info pdf txt,Build documentation in specified format) 1271 1278 ifdef CONFIG_GCOV 1272 - @echo ' coverage-report - Create code coverage report' 1279 + $(call print-help,coverage-report,Create code coverage report) 1273 1280 endif 1274 1281 @echo '' 1275 1282 ifdef CONFIG_WIN32 1276 1283 @echo 'Windows targets:' 1277 - @echo ' installer - Build NSIS-based installer for QEMU' 1284 + $(call print-help,installer,Build NSIS-based installer for QEMU) 1278 1285 ifdef QEMU_GA_MSI_ENABLED 1279 - @echo ' msi - Build MSI-based installer for qemu-ga' 1286 + $(call print-help,msi,Build MSI-based installer for qemu-ga) 1280 1287 endif 1281 1288 @echo '' 1282 1289 endif 1283 - @echo ' $(MAKE) [targets] (quiet build, default)' 1284 - @echo ' $(MAKE) V=1 [targets] (verbose build)' 1290 + $(call print-help,$(MAKE) [targets],(quiet build, default)) 1291 + $(call print-help,$(MAKE) V=1 [targets],(verbose build))
+1 -1
Makefile.target
··· 12 12 13 13 $(call set-vpath, $(SRC_PATH):$(BUILD_DIR)) 14 14 ifdef CONFIG_LINUX 15 - QEMU_CFLAGS += -I../linux-headers 15 + QEMU_CFLAGS += -isystem ../linux-headers 16 16 endif 17 17 QEMU_CFLAGS += -iquote .. -iquote $(SRC_PATH)/target/$(TARGET_BASE_ARCH) -DNEED_CPU_H 18 18
+1 -1
block/linux-aio.c
··· 121 121 unsigned incompat_features; 122 122 unsigned header_length; /* size of aio_ring */ 123 123 124 - struct io_event io_events[0]; 124 + struct io_event io_events[]; 125 125 }; 126 126 127 127 /**
+1 -1
block/vmdk.c
··· 187 187 typedef struct VmdkGrainMarker { 188 188 uint64_t lba; 189 189 uint32_t size; 190 - uint8_t data[0]; 190 + uint8_t data[]; 191 191 } QEMU_PACKED VmdkGrainMarker; 192 192 193 193 enum {
+1 -1
bsd-user/qemu.h
··· 95 95 struct sigqueue *first_free; /* first free siginfo queue entry */ 96 96 int signal_pending; /* non zero if a signal may be pending */ 97 97 98 - uint8_t stack[0]; 98 + uint8_t stack[]; 99 99 } __attribute__((aligned(16))) TaskState; 100 100 101 101 void init_task_state(TaskState *ts);
+60 -2
configure
··· 405 405 DSOSUF=".so" 406 406 LDFLAGS_SHARED="-shared" 407 407 modules="no" 408 + module_upgrades="no" 408 409 prefix="/usr/local" 409 410 mandir="\${prefix}/share/man" 410 411 datadir="\${prefix}/share" ··· 899 900 linux="yes" 900 901 linux_user="yes" 901 902 kvm="yes" 902 - QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers -I$PWD/linux-headers $QEMU_INCLUDES" 903 + QEMU_INCLUDES="-isystem \$(SRC_PATH)/linux-headers -isystem $PWD/linux-headers $QEMU_INCLUDES" 903 904 supported_os="yes" 904 905 libudev="yes" 905 906 ;; ··· 1032 1033 --disable-modules) 1033 1034 modules="no" 1034 1035 ;; 1036 + --disable-module-upgrades) module_upgrades="no" 1037 + ;; 1038 + --enable-module-upgrades) module_upgrades="yes" 1039 + ;; 1035 1040 --cpu=*) 1036 1041 ;; 1037 1042 --target-list=*) target_list="$optarg" ··· 1421 1426 ;; 1422 1427 --enable-avx2) avx2_opt="yes" 1423 1428 ;; 1429 + --disable-avx512f) avx512f_opt="no" 1430 + ;; 1431 + --enable-avx512f) avx512f_opt="yes" 1432 + ;; 1433 + 1424 1434 --enable-glusterfs) glusterfs="yes" 1425 1435 ;; 1426 1436 --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) ··· 1786 1796 guest-agent-msi build guest agent Windows MSI installation package 1787 1797 pie Position Independent Executables 1788 1798 modules modules support (non-Windows) 1799 + module-upgrades try to load modules from alternate paths for upgrades 1789 1800 debug-tcg TCG debugging (default is disabled) 1790 1801 debug-info debugging information 1791 1802 sparse sparse checker ··· 1857 1868 tcmalloc tcmalloc support 1858 1869 jemalloc jemalloc support 1859 1870 avx2 AVX2 optimization support 1871 + avx512f AVX512F optimization support 1860 1872 replication replication support 1861 1873 opengl opengl support 1862 1874 virglrenderer virgl rendering support ··· 2049 2061 error_exit "Modules are not available for Windows" 2050 2062 fi 2051 2063 2064 + # module_upgrades is only reasonable if modules are enabled 2065 + if test "$modules" = "no" && test "$module_upgrades" = "yes" ; then 2066 + error_exit "Can't enable module-upgrades as Modules are not enabled" 2067 + fi 2068 + 2052 2069 # Static linking is not possible with modules or PIE 2053 2070 if test "$static" = "yes" ; then 2054 2071 if test "$modules" = "yes" ; then ··· 3350 3367 int main(void) { sasl_server_init(NULL, "qemu"); return 0; } 3351 3368 EOF 3352 3369 # Assuming Cyrus-SASL installed in /usr prefix 3353 - vnc_sasl_cflags="" 3370 + # QEMU defines struct iovec in "qemu/osdep.h", 3371 + # we don't want libsasl to redefine it in <sasl/sasl.h>. 3372 + vnc_sasl_cflags="-DSTRUCT_IOVEC_DEFINED" 3354 3373 vnc_sasl_libs="-lsasl2" 3355 3374 if compile_prog "$vnc_sasl_cflags" "$vnc_sasl_libs" ; then 3356 3375 vnc_sasl=yes ··· 5574 5593 fi 5575 5594 fi 5576 5595 5596 + ########################################## 5597 + # avx512f optimization requirement check 5598 + # 5599 + # There is no point enabling this if cpuid.h is not usable, 5600 + # since we won't be able to select the new routines. 5601 + # by default, it is turned off. 5602 + # if user explicitly want to enable it, check environment 5603 + 5604 + if test "$cpuid_h" = "yes" && test "$avx512f_opt" = "yes"; then 5605 + cat > $TMPC << EOF 5606 + #pragma GCC push_options 5607 + #pragma GCC target("avx512f") 5608 + #include <cpuid.h> 5609 + #include <immintrin.h> 5610 + static int bar(void *a) { 5611 + __m512i x = *(__m512i *)a; 5612 + return _mm512_test_epi64_mask(x, x); 5613 + } 5614 + int main(int argc, char *argv[]) 5615 + { 5616 + return bar(argv[0]); 5617 + } 5618 + EOF 5619 + if ! compile_object "" ; then 5620 + avx512f_opt="no" 5621 + fi 5622 + else 5623 + avx512f_opt="no" 5624 + fi 5625 + 5577 5626 ######################################## 5578 5627 # check if __[u]int128_t is usable. 5579 5628 ··· 6590 6639 echo "smbd $smbd" 6591 6640 fi 6592 6641 echo "module support $modules" 6642 + echo "alt path mod load $module_upgrades" 6593 6643 echo "host CPU $cpu" 6594 6644 echo "host big endian $bigendian" 6595 6645 echo "target list $target_list" ··· 6717 6767 echo "tcmalloc support $tcmalloc" 6718 6768 echo "jemalloc support $jemalloc" 6719 6769 echo "avx2 optimization $avx2_opt" 6770 + echo "avx512f optimization $avx512f_opt" 6720 6771 echo "replication support $replication" 6721 6772 echo "VxHS block device $vxhs" 6722 6773 echo "bochs support $bochs" ··· 6942 6993 # like as an symbol. So prefix it with an underscore 6943 6994 echo "CONFIG_STAMP=_$( (echo $qemu_version; echo $pkgversion; cat $0) | $shacmd - | cut -f1 -d\ )" >> $config_host_mak 6944 6995 echo "CONFIG_MODULES=y" >> $config_host_mak 6996 + fi 6997 + if test "$module_upgrades" = "yes"; then 6998 + echo "CONFIG_MODULE_UPGRADES=y" >> $config_host_mak 6945 6999 fi 6946 7000 if test "$have_x11" = "yes" && test "$need_x11" = "yes"; then 6947 7001 echo "CONFIG_X11=y" >> $config_host_mak ··· 7266 7320 7267 7321 if test "$avx2_opt" = "yes" ; then 7268 7322 echo "CONFIG_AVX2_OPT=y" >> $config_host_mak 7323 + fi 7324 + 7325 + if test "$avx512f_opt" = "yes" ; then 7326 + echo "CONFIG_AVX512F_OPT=y" >> $config_host_mak 7269 7327 fi 7270 7328 7271 7329 if test "$lzo" = "yes" ; then
+1 -1
contrib/libvhost-user/libvhost-user.h
··· 286 286 uint16_t used_idx; 287 287 288 288 /* Used to track the state of each descriptor in descriptor table */ 289 - VuDescStateSplit desc[0]; 289 + VuDescStateSplit desc[]; 290 290 } VuVirtqInflight; 291 291 292 292 typedef struct VuVirtqInflightDesc {
+3 -3
contrib/vhost-user-gpu/Makefile.objs
··· 1 - vhost-user-gpu-obj-y = main.o virgl.o vugbm.o 1 + vhost-user-gpu-obj-y = vhost-user-gpu.o virgl.o vugbm.o 2 2 3 - main.o-cflags := $(PIXMAN_CFLAGS) $(GBM_CFLAGS) 4 - main.o-libs := $(PIXMAN_LIBS) 3 + vhost-user-gpu.o-cflags := $(PIXMAN_CFLAGS) $(GBM_CFLAGS) 4 + vhost-user-gpu.o-libs := $(PIXMAN_LIBS) 5 5 6 6 virgl.o-cflags := $(VIRGL_CFLAGS) $(GBM_CFLAGS) 7 7 virgl.o-libs := $(VIRGL_LIBS)
contrib/vhost-user-gpu/main.c contrib/vhost-user-gpu/vhost-user-gpu.c
+5 -1
cpus.c
··· 1026 1026 int ret = 0; 1027 1027 1028 1028 if (runstate_is_running()) { 1029 + runstate_set(state); 1029 1030 cpu_disable_ticks(); 1030 1031 pause_all_vcpus(); 1031 - runstate_set(state); 1032 1032 vm_state_notify(0, state); 1033 1033 if (send_stop) { 1034 1034 qapi_event_send_stop(); ··· 1898 1898 void resume_all_vcpus(void) 1899 1899 { 1900 1900 CPUState *cpu; 1901 + 1902 + if (!runstate_is_running()) { 1903 + return; 1904 + } 1901 1905 1902 1906 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); 1903 1907 CPU_FOREACH(cpu) {
+2 -2
docs/interop/vhost-user.rst
··· 568 568 uint16_t used_idx; 569 569 570 570 /* Used to track the state of each descriptor in descriptor table */ 571 - DescStateSplit desc[0]; 571 + DescStateSplit desc[]; 572 572 } QueueRegionSplit; 573 573 574 574 To track inflight I/O, the queue region should be processed as follows: ··· 690 690 uint8_t padding[7]; 691 691 692 692 /* Used to track the state of each descriptor fetched from descriptor ring */ 693 - DescStatePacked desc[0]; 693 + DescStatePacked desc[]; 694 694 } QueueRegionPacked; 695 695 696 696 To track inflight I/O, the queue region should be processed as follows:
+55 -2
docs/system/cpu-models-x86.rst.inc
··· 49 49 compatibility is required, use the newest CPU model that is compatible 50 50 across all desired hosts. 51 51 52 - ``Skylake-Server``, ``Skylake-Server-IBRS`` 52 + ``Cascadelake-Server``, ``Cascadelake-Server-noTSX`` 53 + Intel Xeon Processor (Cascade Lake, 2019), with "stepping" levels 6 54 + or 7 only. (The Cascade Lake Xeon processor with *stepping 5 is 55 + vulnerable to MDS variants*.) 56 + 57 + ``Skylake-Server``, ``Skylake-Server-IBRS``, ``Skylake-Server-IBRS-noTSX`` 53 58 Intel Xeon Processor (Skylake, 2016) 54 59 55 - ``Skylake-Client``, ``Skylake-Client-IBRS`` 60 + ``Skylake-Client``, ``Skylake-Client-IBRS``, ``Skylake-Client-noTSX-IBRS}`` 56 61 Intel Core Processor (Skylake, 2015) 57 62 58 63 ``Broadwell``, ``Broadwell-IBRS``, ``Broadwell-noTSX``, ``Broadwell-noTSX-IBRS`` ··· 147 152 148 153 Requires the host CPU microcode to support this feature before it 149 154 can be used for guest CPUs. 155 + 156 + ``mds-no`` 157 + Recommended to inform the guest OS that the host is *not* vulnerable 158 + to any of the MDS variants ([MFBDS] CVE-2018-12130, [MLPDS] 159 + CVE-2018-12127, [MSBDS] CVE-2018-12126). 160 + 161 + This is an MSR (Model-Specific Register) feature rather than a CPUID feature, 162 + so it will not appear in the Linux ``/proc/cpuinfo`` in the host or 163 + guest. Instead, the host kernel uses it to populate the MDS 164 + vulnerability file in ``sysfs``. 165 + 166 + So it should only be enabled for VMs if the host reports @code{Not 167 + affected} in the ``/sys/devices/system/cpu/vulnerabilities/mds`` file. 168 + 169 + ``taa-no`` 170 + Recommended to inform that the guest that the host is ``not`` 171 + vulnerable to CVE-2019-11135, TSX Asynchronous Abort (TAA). 172 + 173 + This too is an MSR feature, so it does not show up in the Linux 174 + ``/proc/cpuinfo`` in the host or guest. 175 + 176 + It should only be enabled for VMs if the host reports ``Not affected`` 177 + in the ``/sys/devices/system/cpu/vulnerabilities/tsx_async_abort`` 178 + file. 179 + 180 + ``tsx-ctrl`` 181 + Recommended to inform the guest that it can disable the Intel TSX 182 + (Transactional Synchronization Extensions) feature; or, if the 183 + processor is vulnerable, use the Intel VERW instruction (a 184 + processor-level instruction that performs checks on memory access) as 185 + a mitigation for the TAA vulnerability. (For details, refer to 186 + Intel's `deep dive into MDS 187 + <https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-microarchitectural-data-sampling>`_.) 188 + 189 + Expose this to the guest OS if and only if: (a) the host has TSX 190 + enabled; *and* (b) the guest has ``rtm`` CPU flag enabled. 191 + 192 + By disabling TSX, KVM-based guests can avoid paying the price of 193 + mitigating TSX-based attacks. 194 + 195 + Note that ``tsx-ctrl`` too is an MSR feature, so it does not show 196 + up in the Linux ``/proc/cpuinfo`` in the host or guest. 197 + 198 + To validate that Intel TSX is indeed disabled for the guest, there are 199 + two ways: (a) check for the *absence* of ``rtm`` in the guest's 200 + ``/proc/cpuinfo``; or (b) the 201 + ``/sys/devices/system/cpu/vulnerabilities/tsx_async_abort`` file in 202 + the guest should report ``Mitigation: TSX disabled``. 150 203 151 204 152 205 Preferred CPU models for AMD x86 hosts
+5 -4
exec.c
··· 1315 1315 unsigned client) 1316 1316 { 1317 1317 DirtyMemoryBlocks *blocks; 1318 - unsigned long end, page; 1318 + unsigned long end, page, start_page; 1319 1319 bool dirty = false; 1320 1320 RAMBlock *ramblock; 1321 1321 uint64_t mr_offset, mr_size; ··· 1325 1325 } 1326 1326 1327 1327 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 1328 - page = start >> TARGET_PAGE_BITS; 1328 + start_page = start >> TARGET_PAGE_BITS; 1329 + page = start_page; 1329 1330 1330 1331 WITH_RCU_READ_LOCK_GUARD() { 1331 1332 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); ··· 1345 1346 page += num; 1346 1347 } 1347 1348 1348 - mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset; 1349 - mr_size = (end - page) << TARGET_PAGE_BITS; 1349 + mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 1350 + mr_size = (end - start_page) << TARGET_PAGE_BITS; 1350 1351 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 1351 1352 } 1352 1353
+10 -89
hw/acpi/ich9.c
··· 357 357 s->pm.cpu_hotplug_legacy = value; 358 358 } 359 359 360 - static void ich9_pm_get_disable_s3(Object *obj, Visitor *v, const char *name, 361 - void *opaque, Error **errp) 362 - { 363 - ICH9LPCPMRegs *pm = opaque; 364 - uint8_t value = pm->disable_s3; 365 - 366 - visit_type_uint8(v, name, &value, errp); 367 - } 368 - 369 - static void ich9_pm_set_disable_s3(Object *obj, Visitor *v, const char *name, 370 - void *opaque, Error **errp) 371 - { 372 - ICH9LPCPMRegs *pm = opaque; 373 - Error *local_err = NULL; 374 - uint8_t value; 375 - 376 - visit_type_uint8(v, name, &value, &local_err); 377 - if (local_err) { 378 - goto out; 379 - } 380 - pm->disable_s3 = value; 381 - out: 382 - error_propagate(errp, local_err); 383 - } 384 - 385 - static void ich9_pm_get_disable_s4(Object *obj, Visitor *v, const char *name, 386 - void *opaque, Error **errp) 387 - { 388 - ICH9LPCPMRegs *pm = opaque; 389 - uint8_t value = pm->disable_s4; 390 - 391 - visit_type_uint8(v, name, &value, errp); 392 - } 393 - 394 - static void ich9_pm_set_disable_s4(Object *obj, Visitor *v, const char *name, 395 - void *opaque, Error **errp) 396 - { 397 - ICH9LPCPMRegs *pm = opaque; 398 - Error *local_err = NULL; 399 - uint8_t value; 400 - 401 - visit_type_uint8(v, name, &value, &local_err); 402 - if (local_err) { 403 - goto out; 404 - } 405 - pm->disable_s4 = value; 406 - out: 407 - error_propagate(errp, local_err); 408 - } 409 - 410 - static void ich9_pm_get_s4_val(Object *obj, Visitor *v, const char *name, 411 - void *opaque, Error **errp) 412 - { 413 - ICH9LPCPMRegs *pm = opaque; 414 - uint8_t value = pm->s4_val; 415 - 416 - visit_type_uint8(v, name, &value, errp); 417 - } 418 - 419 - static void ich9_pm_set_s4_val(Object *obj, Visitor *v, const char *name, 420 - void *opaque, Error **errp) 421 - { 422 - ICH9LPCPMRegs *pm = opaque; 423 - Error *local_err = NULL; 424 - uint8_t value; 425 - 426 - visit_type_uint8(v, name, &value, &local_err); 427 - if (local_err) { 428 - goto out; 429 - } 430 - pm->s4_val = value; 431 - out: 432 - error_propagate(errp, local_err); 433 - } 434 - 435 360 static bool ich9_pm_get_enable_tco(Object *obj, Error **errp) 436 361 { 437 362 ICH9LPCState *s = ICH9_LPC_DEVICE(obj); ··· 454 379 pm->s4_val = 2; 455 380 456 381 object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE, 457 - &pm->pm_io_base, errp); 382 + &pm->pm_io_base, OBJ_PROP_FLAG_READ, errp); 458 383 object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32", 459 384 ich9_pm_get_gpe0_blk, 460 385 NULL, NULL, pm, NULL); 461 386 object_property_add_uint32_ptr(obj, ACPI_PM_PROP_GPE0_BLK_LEN, 462 - &gpe0_len, errp); 387 + &gpe0_len, OBJ_PROP_FLAG_READ, errp); 463 388 object_property_add_bool(obj, "memory-hotplug-support", 464 389 ich9_pm_get_memory_hotplug_support, 465 390 ich9_pm_set_memory_hotplug_support, ··· 468 393 ich9_pm_get_cpu_hotplug_legacy, 469 394 ich9_pm_set_cpu_hotplug_legacy, 470 395 NULL); 471 - object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8", 472 - ich9_pm_get_disable_s3, 473 - ich9_pm_set_disable_s3, 474 - NULL, pm, NULL); 475 - object_property_add(obj, ACPI_PM_PROP_S4_DISABLED, "uint8", 476 - ich9_pm_get_disable_s4, 477 - ich9_pm_set_disable_s4, 478 - NULL, pm, NULL); 479 - object_property_add(obj, ACPI_PM_PROP_S4_VAL, "uint8", 480 - ich9_pm_get_s4_val, 481 - ich9_pm_set_s4_val, 482 - NULL, pm, NULL); 396 + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S3_DISABLED, 397 + &pm->disable_s3, OBJ_PROP_FLAG_READWRITE, 398 + NULL); 399 + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S4_DISABLED, 400 + &pm->disable_s4, OBJ_PROP_FLAG_READWRITE, 401 + NULL); 402 + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S4_VAL, 403 + &pm->s4_val, OBJ_PROP_FLAG_READWRITE, NULL); 483 404 object_property_add_bool(obj, ACPI_PM_PROP_TCO_ENABLED, 484 405 ich9_pm_get_enable_tco, 485 406 ich9_pm_set_enable_tco,
+3 -3
hw/acpi/nvdimm.c
··· 485 485 /* the size of buffer filled by QEMU. */ 486 486 uint32_t len; 487 487 uint32_t func_ret_status; /* return status code. */ 488 - uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */ 488 + uint8_t out_buf[]; /* the data got via Get Namesapce Label function. */ 489 489 } QEMU_PACKED; 490 490 typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut; 491 491 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE); ··· 493 493 struct NvdimmFuncSetLabelDataIn { 494 494 uint32_t offset; /* the offset in the namespace label data area. */ 495 495 uint32_t length; /* the size of data is to be written via the function. */ 496 - uint8_t in_buf[0]; /* the data written to label data area. */ 496 + uint8_t in_buf[]; /* the data written to label data area. */ 497 497 } QEMU_PACKED; 498 498 typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn; 499 499 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) + ··· 510 510 /* the size of buffer filled by QEMU. */ 511 511 uint32_t len; 512 512 uint32_t func_ret_status; /* return status code. */ 513 - uint8_t fit[0]; /* the FIT data. */ 513 + uint8_t fit[]; /* the FIT data. */ 514 514 } QEMU_PACKED; 515 515 typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut; 516 516 QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE);
+4 -3
hw/acpi/pcihp.c
··· 80 80 81 81 *bus_bsel = (*bsel_alloc)++; 82 82 object_property_add_uint32_ptr(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, 83 - bus_bsel, &error_abort); 83 + bus_bsel, OBJ_PROP_FLAG_READ, 84 + &error_abort); 84 85 } 85 86 86 87 return bsel_alloc; ··· 373 374 memory_region_add_subregion(address_space_io, s->io_base, &s->io); 374 375 375 376 object_property_add_uint16_ptr(owner, ACPI_PCIHP_IO_BASE_PROP, &s->io_base, 376 - &error_abort); 377 + OBJ_PROP_FLAG_READ, &error_abort); 377 378 object_property_add_uint16_ptr(owner, ACPI_PCIHP_IO_LEN_PROP, &s->io_len, 378 - &error_abort); 379 + OBJ_PROP_FLAG_READ, &error_abort); 379 380 } 380 381 381 382 const VMStateDescription vmstate_acpi_pcihp_pci_status = {
+6 -6
hw/acpi/piix4.c
··· 444 444 static const uint16_t sci_int = 9; 445 445 446 446 object_property_add_uint8_ptr(OBJECT(s), ACPI_PM_PROP_ACPI_ENABLE_CMD, 447 - &acpi_enable_cmd, NULL); 447 + &acpi_enable_cmd, OBJ_PROP_FLAG_READ, NULL); 448 448 object_property_add_uint8_ptr(OBJECT(s), ACPI_PM_PROP_ACPI_DISABLE_CMD, 449 - &acpi_disable_cmd, NULL); 449 + &acpi_disable_cmd, OBJ_PROP_FLAG_READ, NULL); 450 450 object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_GPE0_BLK, 451 - &gpe0_blk, NULL); 451 + &gpe0_blk, OBJ_PROP_FLAG_READ, NULL); 452 452 object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_GPE0_BLK_LEN, 453 - &gpe0_blk_len, NULL); 453 + &gpe0_blk_len, OBJ_PROP_FLAG_READ, NULL); 454 454 object_property_add_uint16_ptr(OBJECT(s), ACPI_PM_PROP_SCI_INT, 455 - &sci_int, NULL); 455 + &sci_int, OBJ_PROP_FLAG_READ, NULL); 456 456 object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_PM_IO_BASE, 457 - &s->io_base, NULL); 457 + &s->io_base, OBJ_PROP_FLAG_READ, NULL); 458 458 } 459 459 460 460 static void piix4_pm_realize(PCIDevice *dev, Error **errp)
+6 -8
hw/arm/exynos4210.c
··· 305 305 /*** Memory ***/ 306 306 307 307 /* Chip-ID and OMR */ 308 - memory_region_init_io(&s->chipid_mem, NULL, &exynos4210_chipid_and_omr_ops, 309 - NULL, "exynos4210.chipid", sizeof(chipid_and_omr)); 308 + memory_region_init_io(&s->chipid_mem, OBJECT(socdev), 309 + &exynos4210_chipid_and_omr_ops, NULL, 310 + "exynos4210.chipid", sizeof(chipid_and_omr)); 310 311 memory_region_add_subregion(system_mem, EXYNOS4210_CHIPID_ADDR, 311 312 &s->chipid_mem); 312 313 313 314 /* Internal ROM */ 314 - memory_region_init_ram(&s->irom_mem, NULL, "exynos4210.irom", 315 + memory_region_init_rom(&s->irom_mem, OBJECT(socdev), "exynos4210.irom", 315 316 EXYNOS4210_IROM_SIZE, &error_fatal); 316 - memory_region_set_readonly(&s->irom_mem, true); 317 317 memory_region_add_subregion(system_mem, EXYNOS4210_IROM_BASE_ADDR, 318 318 &s->irom_mem); 319 319 /* mirror of iROM */ 320 - memory_region_init_alias(&s->irom_alias_mem, NULL, "exynos4210.irom_alias", 321 - &s->irom_mem, 322 - 0, 320 + memory_region_init_alias(&s->irom_alias_mem, OBJECT(socdev), 321 + "exynos4210.irom_alias", &s->irom_mem, 0, 323 322 EXYNOS4210_IROM_SIZE); 324 - memory_region_set_readonly(&s->irom_alias_mem, true); 325 323 memory_region_add_subregion(system_mem, EXYNOS4210_IROM_MIRROR_BASE_ADDR, 326 324 &s->irom_alias_mem); 327 325
+5 -5
hw/arm/fsl-imx25.c
··· 303 303 } 304 304 305 305 /* initialize 2 x 16 KB ROM */ 306 - memory_region_init_rom(&s->rom[0], NULL, 307 - "imx25.rom0", FSL_IMX25_ROM0_SIZE, &err); 306 + memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", 307 + FSL_IMX25_ROM0_SIZE, &err); 308 308 if (err) { 309 309 error_propagate(errp, err); 310 310 return; 311 311 } 312 312 memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR, 313 313 &s->rom[0]); 314 - memory_region_init_rom(&s->rom[1], NULL, 315 - "imx25.rom1", FSL_IMX25_ROM1_SIZE, &err); 314 + memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", 315 + FSL_IMX25_ROM1_SIZE, &err); 316 316 if (err) { 317 317 error_propagate(errp, err); 318 318 return; ··· 331 331 &s->iram); 332 332 333 333 /* internal RAM (128 KB) is aliased over 128 MB - 128 KB */ 334 - memory_region_init_alias(&s->iram_alias, NULL, "imx25.iram_alias", 334 + memory_region_init_alias(&s->iram_alias, OBJECT(dev), "imx25.iram_alias", 335 335 &s->iram, 0, FSL_IMX25_IRAM_ALIAS_SIZE); 336 336 memory_region_add_subregion(get_system_memory(), FSL_IMX25_IRAM_ALIAS_ADDR, 337 337 &s->iram_alias);
+3 -3
hw/arm/fsl-imx31.c
··· 206 206 } 207 207 208 208 /* On a real system, the first 16k is a `secure boot rom' */ 209 - memory_region_init_rom(&s->secure_rom, NULL, "imx31.secure_rom", 209 + memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", 210 210 FSL_IMX31_SECURE_ROM_SIZE, &err); 211 211 if (err) { 212 212 error_propagate(errp, err); ··· 216 216 &s->secure_rom); 217 217 218 218 /* There is also a 16k ROM */ 219 - memory_region_init_rom(&s->rom, NULL, "imx31.rom", 219 + memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", 220 220 FSL_IMX31_ROM_SIZE, &err); 221 221 if (err) { 222 222 error_propagate(errp, err); ··· 236 236 &s->iram); 237 237 238 238 /* internal RAM (16 KB) is aliased over 256 MB - 16 KB */ 239 - memory_region_init_alias(&s->iram_alias, NULL, "imx31.iram_alias", 239 + memory_region_init_alias(&s->iram_alias, OBJECT(dev), "imx31.iram_alias", 240 240 &s->iram, 0, FSL_IMX31_IRAM_ALIAS_SIZE); 241 241 memory_region_add_subregion(get_system_memory(), FSL_IMX31_IRAM_ALIAS_ADDR, 242 242 &s->iram_alias);
+3 -3
hw/arm/fsl-imx6.c
··· 441 441 } 442 442 443 443 /* ROM memory */ 444 - memory_region_init_rom(&s->rom, NULL, "imx6.rom", 444 + memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", 445 445 FSL_IMX6_ROM_SIZE, &err); 446 446 if (err) { 447 447 error_propagate(errp, err); ··· 451 451 &s->rom); 452 452 453 453 /* CAAM memory */ 454 - memory_region_init_rom(&s->caam, NULL, "imx6.caam", 454 + memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", 455 455 FSL_IMX6_CAAM_MEM_SIZE, &err); 456 456 if (err) { 457 457 error_propagate(errp, err); ··· 471 471 &s->ocram); 472 472 473 473 /* internal OCRAM (256 KB) is aliased over 1 MB */ 474 - memory_region_init_alias(&s->ocram_alias, NULL, "imx6.ocram_alias", 474 + memory_region_init_alias(&s->ocram_alias, OBJECT(dev), "imx6.ocram_alias", 475 475 &s->ocram, 0, FSL_IMX6_OCRAM_ALIAS_SIZE); 476 476 memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ALIAS_ADDR, 477 477 &s->ocram_alias);
+5 -4
hw/arm/fsl-imx6ul.c
··· 592 592 /* 593 593 * ROM memory 594 594 */ 595 - memory_region_init_rom(&s->rom, NULL, "imx6ul.rom", 595 + memory_region_init_rom(&s->rom, OBJECT(dev), "imx6ul.rom", 596 596 FSL_IMX6UL_ROM_SIZE, &error_abort); 597 597 memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_ROM_ADDR, 598 598 &s->rom); ··· 600 600 /* 601 601 * CAAM memory 602 602 */ 603 - memory_region_init_rom(&s->caam, NULL, "imx6ul.caam", 603 + memory_region_init_rom(&s->caam, OBJECT(dev), "imx6ul.caam", 604 604 FSL_IMX6UL_CAAM_MEM_SIZE, &error_abort); 605 605 memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_CAAM_MEM_ADDR, 606 606 &s->caam); ··· 617 617 /* 618 618 * internal OCRAM (128 KB) is aliased over 512 KB 619 619 */ 620 - memory_region_init_alias(&s->ocram_alias, NULL, "imx6ul.ocram_alias", 621 - &s->ocram, 0, FSL_IMX6UL_OCRAM_ALIAS_SIZE); 620 + memory_region_init_alias(&s->ocram_alias, OBJECT(dev), 621 + "imx6ul.ocram_alias", &s->ocram, 0, 622 + FSL_IMX6UL_OCRAM_ALIAS_SIZE); 622 623 memory_region_add_subregion(get_system_memory(), 623 624 FSL_IMX6UL_OCRAM_ALIAS_ADDR, &s->ocram_alias); 624 625 }
+1 -2
hw/arm/mainstone.c
··· 124 124 /* Setup CPU & memory */ 125 125 mpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, 126 126 machine->cpu_type); 127 - memory_region_init_ram(rom, NULL, "mainstone.rom", MAINSTONE_ROM, 127 + memory_region_init_rom(rom, NULL, "mainstone.rom", MAINSTONE_ROM, 128 128 &error_fatal); 129 - memory_region_set_readonly(rom, true); 130 129 memory_region_add_subregion(address_space_mem, 0, rom); 131 130 132 131 /* There are two 32MiB flash devices on the board */
+3 -3
hw/arm/msf2-soc.c
··· 96 96 MemoryRegion *nvm_alias = g_new(MemoryRegion, 1); 97 97 MemoryRegion *sram = g_new(MemoryRegion, 1); 98 98 99 - memory_region_init_rom(nvm, NULL, "MSF2.eNVM", s->envm_size, 99 + memory_region_init_rom(nvm, OBJECT(dev_soc), "MSF2.eNVM", s->envm_size, 100 100 &error_fatal); 101 101 /* 102 102 * On power-on, the eNVM region 0x60000000 is automatically ··· 104 104 * start address (0x0). We do not support remapping other eNVM, 105 105 * eSRAM and DDR regions by guest(via Sysreg) currently. 106 106 */ 107 - memory_region_init_alias(nvm_alias, NULL, "MSF2.eNVM", 108 - nvm, 0, s->envm_size); 107 + memory_region_init_alias(nvm_alias, OBJECT(dev_soc), "MSF2.eNVM", nvm, 0, 108 + s->envm_size); 109 109 110 110 memory_region_add_subregion(system_memory, ENVM_BASE_ADDRESS, nvm); 111 111 memory_region_add_subregion(system_memory, 0, nvm_alias);
+1 -1
hw/arm/nrf51_soc.c
··· 165 165 } 166 166 167 167 /* STUB Peripherals */ 168 - memory_region_init_io(&s->clock, NULL, &clock_ops, NULL, 168 + memory_region_init_io(&s->clock, OBJECT(dev_soc), &clock_ops, NULL, 169 169 "nrf51_soc.clock", 0x1000); 170 170 memory_region_add_subregion_overlap(&s->container, 171 171 NRF51_IOMEM_BASE, &s->clock, -1);
+2 -4
hw/arm/omap_sx1.c
··· 131 131 mpu = omap310_mpu_init(machine->ram, machine->cpu_type); 132 132 133 133 /* External Flash (EMIFS) */ 134 - memory_region_init_ram(flash, NULL, "omap_sx1.flash0-0", flash_size, 134 + memory_region_init_rom(flash, NULL, "omap_sx1.flash0-0", flash_size, 135 135 &error_fatal); 136 - memory_region_set_readonly(flash, true); 137 136 memory_region_add_subregion(address_space, OMAP_CS0_BASE, flash); 138 137 139 138 memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, ··· 167 166 if ((version == 1) && 168 167 (dinfo = drive_get(IF_PFLASH, 0, fl_idx)) != NULL) { 169 168 MemoryRegion *flash_1 = g_new(MemoryRegion, 1); 170 - memory_region_init_ram(flash_1, NULL, "omap_sx1.flash1-0", 169 + memory_region_init_rom(flash_1, NULL, "omap_sx1.flash1-0", 171 170 flash1_size, &error_fatal); 172 - memory_region_set_readonly(flash_1, true); 173 171 memory_region_add_subregion(address_space, OMAP_CS1_BASE, flash_1); 174 172 175 173 memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val,
+1 -2
hw/arm/palm.c
··· 213 213 mpu = omap310_mpu_init(machine->ram, machine->cpu_type); 214 214 215 215 /* External Flash (EMIFS) */ 216 - memory_region_init_ram(flash, NULL, "palmte.flash", flash_size, 216 + memory_region_init_rom(flash, NULL, "palmte.flash", flash_size, 217 217 &error_fatal); 218 - memory_region_set_readonly(flash, true); 219 218 memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE, flash); 220 219 221 220 memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, "palmte-cs0",
+1 -2
hw/arm/spitz.c
··· 929 929 930 930 sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M); 931 931 932 - memory_region_init_ram(rom, NULL, "spitz.rom", SPITZ_ROM, &error_fatal); 933 - memory_region_set_readonly(rom, true); 932 + memory_region_init_rom(rom, NULL, "spitz.rom", SPITZ_ROM, &error_fatal); 934 933 memory_region_add_subregion(address_space_mem, 0, rom); 935 934 936 935 /* Setup peripherals */
+1 -2
hw/arm/stellaris.c
··· 1300 1300 sram_size = ((board->dc0 >> 18) + 1) * 1024; 1301 1301 1302 1302 /* Flash programming is done via the SCU, so pretend it is ROM. */ 1303 - memory_region_init_ram(flash, NULL, "stellaris.flash", flash_size, 1303 + memory_region_init_rom(flash, NULL, "stellaris.flash", flash_size, 1304 1304 &error_fatal); 1305 - memory_region_set_readonly(flash, true); 1306 1305 memory_region_add_subregion(system_memory, 0, flash); 1307 1306 1308 1307 memory_region_init_ram(sram, NULL, "stellaris.sram", sram_size,
+4 -7
hw/arm/stm32f205_soc.c
··· 93 93 MemoryRegion *flash = g_new(MemoryRegion, 1); 94 94 MemoryRegion *flash_alias = g_new(MemoryRegion, 1); 95 95 96 - memory_region_init_ram(flash, NULL, "STM32F205.flash", FLASH_SIZE, 97 - &error_fatal); 98 - memory_region_init_alias(flash_alias, NULL, "STM32F205.flash.alias", 99 - flash, 0, FLASH_SIZE); 100 - 101 - memory_region_set_readonly(flash, true); 102 - memory_region_set_readonly(flash_alias, true); 96 + memory_region_init_rom(flash, OBJECT(dev_soc), "STM32F205.flash", 97 + FLASH_SIZE, &error_fatal); 98 + memory_region_init_alias(flash_alias, OBJECT(dev_soc), 99 + "STM32F205.flash.alias", flash, 0, FLASH_SIZE); 103 100 104 101 memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, flash); 105 102 memory_region_add_subregion(system_memory, 0, flash_alias);
+5 -7
hw/arm/stm32f405_soc.c
··· 95 95 Error *err = NULL; 96 96 int i; 97 97 98 - memory_region_init_ram(&s->flash, NULL, "STM32F405.flash", FLASH_SIZE, 99 - &err); 98 + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F405.flash", 99 + FLASH_SIZE, &err); 100 100 if (err != NULL) { 101 101 error_propagate(errp, err); 102 102 return; 103 103 } 104 - memory_region_init_alias(&s->flash_alias, NULL, "STM32F405.flash.alias", 105 - &s->flash, 0, FLASH_SIZE); 106 - 107 - memory_region_set_readonly(&s->flash, true); 108 - memory_region_set_readonly(&s->flash_alias, true); 104 + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), 105 + "STM32F405.flash.alias", &s->flash, 0, 106 + FLASH_SIZE); 109 107 110 108 memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); 111 109 memory_region_add_subregion(system_memory, 0, &s->flash_alias);
+1 -2
hw/arm/tosa.c
··· 226 226 227 227 mpu = pxa255_init(address_space_mem, tosa_binfo.ram_size); 228 228 229 - memory_region_init_ram(rom, NULL, "tosa.rom", TOSA_ROM, &error_fatal); 230 - memory_region_set_readonly(rom, true); 229 + memory_region_init_rom(rom, NULL, "tosa.rom", TOSA_ROM, &error_fatal); 231 230 memory_region_add_subregion(address_space_mem, 0, rom); 232 231 233 232 tmio = tc6393xb_init(address_space_mem, 0x10000000,
+5 -6
hw/arm/xlnx-zynqmp.c
··· 318 318 ddr_low_size = XLNX_ZYNQMP_MAX_LOW_RAM_SIZE; 319 319 ddr_high_size = ram_size - XLNX_ZYNQMP_MAX_LOW_RAM_SIZE; 320 320 321 - memory_region_init_alias(&s->ddr_ram_high, NULL, 322 - "ddr-ram-high", s->ddr_ram, 323 - ddr_low_size, ddr_high_size); 321 + memory_region_init_alias(&s->ddr_ram_high, OBJECT(dev), 322 + "ddr-ram-high", s->ddr_ram, ddr_low_size, 323 + ddr_high_size); 324 324 memory_region_add_subregion(get_system_memory(), 325 325 XLNX_ZYNQMP_HIGH_RAM_START, 326 326 &s->ddr_ram_high); ··· 330 330 ddr_low_size = ram_size; 331 331 } 332 332 333 - memory_region_init_alias(&s->ddr_ram_low, NULL, 334 - "ddr-ram-low", s->ddr_ram, 335 - 0, ddr_low_size); 333 + memory_region_init_alias(&s->ddr_ram_low, OBJECT(dev), "ddr-ram-low", 334 + s->ddr_ram, 0, ddr_low_size); 336 335 memory_region_add_subregion(get_system_memory(), 0, &s->ddr_ram_low); 337 336 338 337 /* Create the four OCM banks */
+3 -1
hw/audio/fmopl.c
··· 186 186 187 187 /* envelope output curve table */ 188 188 /* attack + decay + OFF */ 189 - static int32_t ENV_CURVE[2*EG_ENT+1]; 189 + static int32_t *ENV_CURVE; 190 190 191 191 /* multiple table */ 192 192 #define ML 2 ··· 1090 1090 OPL->clock = clock; 1091 1091 OPL->rate = rate; 1092 1092 OPL->max_ch = max_ch; 1093 + ENV_CURVE = g_new(int32_t, 2 * EG_ENT + 1); 1093 1094 /* init grobal tables */ 1094 1095 OPL_initialize(OPL); 1095 1096 /* reset chip */ ··· 1127 1128 #endif 1128 1129 OPL_UnLockTable(); 1129 1130 free(OPL); 1131 + g_free(ENV_CURVE); 1130 1132 } 1131 1133 1132 1134 /* ---------- Option handlers ---------- */
+10 -14
hw/audio/intel-hda.c
··· 181 181 IntelHDAStream st[8]; 182 182 183 183 /* state */ 184 + MemoryRegion container; 184 185 MemoryRegion mmio; 186 + MemoryRegion alias; 185 187 uint32_t rirb_count; 186 188 int64_t wall_base_ns; 187 189 ··· 670 672 .offset = offsetof(IntelHDAState, wall_clk), 671 673 .rhandler = intel_hda_get_wall_clk, 672 674 }, 673 - [ ICH6_REG_WALLCLK + 0x2000 ] = { 674 - .name = "WALLCLK(alias)", 675 - .size = 4, 676 - .offset = offsetof(IntelHDAState, wall_clk), 677 - .rhandler = intel_hda_get_wall_clk, 678 - }, 679 675 680 676 /* dma engine */ 681 677 [ ICH6_REG_CORBLBASE ] = { ··· 834 830 [ ST_REG(_i, ICH6_REG_SD_LPIB) ] = { \ 835 831 .stream = _i, \ 836 832 .name = _t stringify(_i) " LPIB", \ 837 - .size = 4, \ 838 - .offset = offsetof(IntelHDAState, st[_i].lpib), \ 839 - }, \ 840 - [ ST_REG(_i, ICH6_REG_SD_LPIB) + 0x2000 ] = { \ 841 - .stream = _i, \ 842 - .name = _t stringify(_i) " LPIB(alias)", \ 843 833 .size = 4, \ 844 834 .offset = offsetof(IntelHDAState, st[_i].lpib), \ 845 835 }, \ ··· 1125 1115 error_free(err); 1126 1116 } 1127 1117 1118 + memory_region_init(&d->container, OBJECT(d), 1119 + "intel-hda-container", 0x4000); 1128 1120 memory_region_init_io(&d->mmio, OBJECT(d), &intel_hda_mmio_ops, d, 1129 - "intel-hda", 0x4000); 1130 - pci_register_bar(&d->pci, 0, 0, &d->mmio); 1121 + "intel-hda", 0x2000); 1122 + memory_region_add_subregion(&d->container, 0x0000, &d->mmio); 1123 + memory_region_init_alias(&d->alias, OBJECT(d), "intel-hda-alias", 1124 + &d->mmio, 0, 0x2000); 1125 + memory_region_add_subregion(&d->container, 0x2000, &d->alias); 1126 + pci_register_bar(&d->pci, 0, 0, &d->container); 1131 1127 1132 1128 hda_codec_bus_init(DEVICE(pci), &d->codecs, sizeof(d->codecs), 1133 1129 intel_hda_response, intel_hda_xfer);
+1 -1
hw/char/sclpconsole-lm.c
··· 31 31 typedef struct OprtnsCommand { 32 32 EventBufferHeader header; 33 33 MDMSU message_unit; 34 - char data[0]; 34 + char data[]; 35 35 } QEMU_PACKED OprtnsCommand; 36 36 37 37 /* max size for line-mode data in 4K SCCB page */
+1 -1
hw/char/sclpconsole.c
··· 25 25 26 26 typedef struct ASCIIConsoleData { 27 27 EventBufferHeader ebh; 28 - char data[0]; 28 + char data[]; 29 29 } QEMU_PACKED ASCIIConsoleData; 30 30 31 31 /* max size for ASCII data in 4K SCCB page */
+4 -3
hw/char/serial.c
··· 997 997 return; 998 998 } 999 999 1000 - memory_region_init_io(&s->io, NULL, &serial_io_ops, s, "serial", 8); 1000 + memory_region_init_io(&s->io, OBJECT(dev), &serial_io_ops, s, "serial", 8); 1001 1001 sysbus_init_mmio(SYS_BUS_DEVICE(sio), &s->io); 1002 1002 sysbus_init_irq(SYS_BUS_DEVICE(sio), &s->irq); 1003 1003 } ··· 1106 1106 return; 1107 1107 } 1108 1108 1109 - memory_region_init_io(&s->io, NULL, &serial_mm_ops[smm->endianness], smm, 1110 - "serial", 8 << smm->regshift); 1109 + memory_region_init_io(&s->io, OBJECT(dev), 1110 + &serial_mm_ops[smm->endianness], smm, "serial", 1111 + 8 << smm->regshift); 1111 1112 sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io); 1112 1113 sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq); 1113 1114 }
+16 -9
hw/core/loader.c
··· 1119 1119 { 1120 1120 Rom *rom; 1121 1121 1122 - /* 1123 - * We don't need to fill in the RAM with ROM data because we'll fill 1124 - * the data in during the next incoming migration in all cases. Note 1125 - * that some of those RAMs can actually be modified by the guest on ARM 1126 - * so this is probably the only right thing to do here. 1127 - */ 1128 - if (runstate_check(RUN_STATE_INMIGRATE)) 1129 - return; 1130 - 1131 1122 QTAILQ_FOREACH(rom, &roms, next) { 1132 1123 if (rom->fw_file) { 1133 1124 continue; 1134 1125 } 1126 + /* 1127 + * We don't need to fill in the RAM with ROM data because we'll fill 1128 + * the data in during the next incoming migration in all cases. Note 1129 + * that some of those RAMs can actually be modified by the guest. 1130 + */ 1131 + if (runstate_check(RUN_STATE_INMIGRATE)) { 1132 + if (rom->data && rom->isrom) { 1133 + /* 1134 + * Free it so that a rom_reset after migration doesn't 1135 + * overwrite a potentially modified 'rom'. 1136 + */ 1137 + rom_free_data(rom); 1138 + } 1139 + continue; 1140 + } 1141 + 1135 1142 if (rom->data == NULL) { 1136 1143 continue; 1137 1144 }
+2 -1
hw/core/platform-bus.c
··· 187 187 d = SYS_BUS_DEVICE(dev); 188 188 pbus = PLATFORM_BUS_DEVICE(dev); 189 189 190 - memory_region_init(&pbus->mmio, NULL, "platform bus", pbus->mmio_size); 190 + memory_region_init(&pbus->mmio, OBJECT(dev), "platform bus", 191 + pbus->mmio_size); 191 192 sysbus_init_mmio(d, &pbus->mmio); 192 193 193 194 pbus->used_irqs = bitmap_new(pbus->num_irqs);
+2 -3
hw/display/cg3.c
··· 287 287 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 288 288 CG3State *s = CG3(obj); 289 289 290 - memory_region_init_ram_nomigrate(&s->rom, obj, "cg3.prom", FCODE_MAX_ROM_SIZE, 291 - &error_fatal); 292 - memory_region_set_readonly(&s->rom, true); 290 + memory_region_init_rom_nomigrate(&s->rom, obj, "cg3.prom", 291 + FCODE_MAX_ROM_SIZE, &error_fatal); 293 292 sysbus_init_mmio(sbd, &s->rom); 294 293 295 294 memory_region_init_io(&s->reg, obj, &cg3_reg_ops, s, "cg3.reg",
+2 -1
hw/display/g364fb.c
··· 477 477 478 478 s->con = graphic_console_init(dev, 0, &g364fb_ops, s); 479 479 480 - memory_region_init_io(&s->mem_ctrl, NULL, &g364fb_ctrl_ops, s, "ctrl", 0x180000); 480 + memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &g364fb_ctrl_ops, s, 481 + "ctrl", 0x180000); 481 482 memory_region_init_ram_ptr(&s->mem_vram, NULL, "vram", 482 483 s->vram_size, s->vram); 483 484 vmstate_register_ram(&s->mem_vram, dev);
+2 -2
hw/display/macfb.c
··· 362 362 return; 363 363 } 364 364 365 - memory_region_init_io(&s->mem_ctrl, NULL, &macfb_ctrl_ops, s, "macfb-ctrl", 366 - 0x1000); 365 + memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &macfb_ctrl_ops, s, 366 + "macfb-ctrl", 0x1000); 367 367 368 368 memory_region_init_ram_nomigrate(&s->mem_vram, OBJECT(s), "macfb-vram", 369 369 MACFB_VRAM_SIZE, errp);
+2 -3
hw/display/tcx.c
··· 755 755 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 756 756 TCXState *s = TCX(obj); 757 757 758 - memory_region_init_ram_nomigrate(&s->rom, obj, "tcx.prom", FCODE_MAX_ROM_SIZE, 759 - &error_fatal); 760 - memory_region_set_readonly(&s->rom, true); 758 + memory_region_init_rom_nomigrate(&s->rom, obj, "tcx.prom", 759 + FCODE_MAX_ROM_SIZE, &error_fatal); 761 760 sysbus_init_mmio(sbd, &s->rom); 762 761 763 762 /* 2/STIP : Stippler */
+1 -1
hw/dma/i8257.c
··· 553 553 I8257State *d = I8257(dev); 554 554 int i; 555 555 556 - memory_region_init_io(&d->channel_io, NULL, &channel_io_ops, d, 556 + memory_region_init_io(&d->channel_io, OBJECT(dev), &channel_io_ops, d, 557 557 "dma-chan", 8 << d->dshift); 558 558 memory_region_add_subregion(isa_address_space_io(isa), 559 559 d->base, &d->channel_io);
+2 -2
hw/dma/rc4030.c
··· 679 679 s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 680 680 rc4030_periodic_timer, s); 681 681 682 - memory_region_init_io(&s->iomem_chipset, NULL, &rc4030_ops, s, 682 + memory_region_init_io(&s->iomem_chipset, o, &rc4030_ops, s, 683 683 "rc4030.chipset", 0x300); 684 - memory_region_init_io(&s->iomem_jazzio, NULL, &jazzio_ops, s, 684 + memory_region_init_io(&s->iomem_jazzio, o, &jazzio_ops, s, 685 685 "rc4030.jazzio", 0x00001000); 686 686 687 687 memory_region_init_iommu(&s->dma_mr, sizeof(s->dma_mr),
+1 -1
hw/dma/soc_dma.c
··· 80 80 } *memmap; 81 81 int memmap_size; 82 82 83 - struct soc_dma_ch_s ch[0]; 83 + struct soc_dma_ch_s ch[]; 84 84 }; 85 85 86 86 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
+6
hw/i386/intel_iommu.c
··· 3094 3094 uint16_t mask, source_id; 3095 3095 uint8_t bus, bus_max, bus_min; 3096 3096 3097 + if (index >= iommu->intr_size) { 3098 + error_report_once("%s: index too large: ind=0x%x", 3099 + __func__, index); 3100 + return -VTD_FR_IR_INDEX_OVER; 3101 + } 3102 + 3097 3103 addr = iommu->intr_root + index * sizeof(*entry); 3098 3104 if (dma_memory_read(&address_space_memory, addr, entry, 3099 3105 sizeof(*entry))) {
+1 -1
hw/i386/x86.c
··· 328 328 uint64_t next; 329 329 uint32_t type; 330 330 uint32_t len; 331 - uint8_t data[0]; 331 + uint8_t data[]; 332 332 } __attribute__((packed)); 333 333 334 334
+6 -21
hw/isa/lpc_ich9.c
··· 625 625 .endianness = DEVICE_LITTLE_ENDIAN 626 626 }; 627 627 628 - static void ich9_lpc_get_sci_int(Object *obj, Visitor *v, const char *name, 629 - void *opaque, Error **errp) 628 + static void ich9_lpc_initfn(Object *obj) 630 629 { 631 630 ICH9LPCState *lpc = ICH9_LPC_DEVICE(obj); 632 - uint32_t value = lpc->sci_gsi; 633 - 634 - visit_type_uint32(v, name, &value, errp); 635 - } 636 631 637 - static void ich9_lpc_add_properties(ICH9LPCState *lpc) 638 - { 639 632 static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE; 640 633 static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE; 641 634 642 - object_property_add(OBJECT(lpc), ACPI_PM_PROP_SCI_INT, "uint32", 643 - ich9_lpc_get_sci_int, 644 - NULL, NULL, NULL, NULL); 635 + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT, 636 + &lpc->sci_gsi, OBJ_PROP_FLAG_READ, NULL); 645 637 object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD, 646 - &acpi_enable_cmd, NULL); 638 + &acpi_enable_cmd, OBJ_PROP_FLAG_READ, NULL); 647 639 object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_DISABLE_CMD, 648 - &acpi_disable_cmd, NULL); 640 + &acpi_disable_cmd, OBJ_PROP_FLAG_READ, NULL); 649 641 650 - ich9_pm_add_properties(OBJECT(lpc), &lpc->pm, NULL); 651 - } 652 - 653 - static void ich9_lpc_initfn(Object *obj) 654 - { 655 - ICH9LPCState *lpc = ICH9_LPC_DEVICE(obj); 656 - 657 - ich9_lpc_add_properties(lpc); 642 + ich9_pm_add_properties(obj, &lpc->pm, NULL); 658 643 } 659 644 660 645 static void ich9_lpc_realize(PCIDevice *d, Error **errp)
+1 -1
hw/m68k/bootinfo.h
··· 14 14 struct bi_record { 15 15 uint16_t tag; /* tag ID */ 16 16 uint16_t size; /* size of record */ 17 - uint32_t data[0]; /* data */ 17 + uint32_t data[]; /* data */ 18 18 }; 19 19 20 20 /* machine independent tags */
+1 -2
hw/m68k/q800.c
··· 399 399 uint8_t *ptr; 400 400 /* allocate and load BIOS */ 401 401 rom = g_malloc(sizeof(*rom)); 402 - memory_region_init_ram(rom, NULL, "m68k_mac.rom", MACROM_SIZE, 402 + memory_region_init_rom(rom, NULL, "m68k_mac.rom", MACROM_SIZE, 403 403 &error_abort); 404 404 if (bios_name == NULL) { 405 405 bios_name = MACROM_FILENAME; 406 406 } 407 407 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 408 - memory_region_set_readonly(rom, true); 409 408 memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); 410 409 411 410 /* Load MacROM binary */
+3 -10
hw/misc/edu.c
··· 396 396 msi_uninit(pdev); 397 397 } 398 398 399 - static void edu_obj_uint64(Object *obj, Visitor *v, const char *name, 400 - void *opaque, Error **errp) 401 - { 402 - uint64_t *val = opaque; 403 - 404 - visit_type_uint64(v, name, val, errp); 405 - } 406 - 407 399 static void edu_instance_init(Object *obj) 408 400 { 409 401 EduState *edu = EDU(obj); 410 402 411 403 edu->dma_mask = (1UL << 28) - 1; 412 - object_property_add(obj, "dma_mask", "uint64", edu_obj_uint64, 413 - edu_obj_uint64, NULL, &edu->dma_mask, NULL); 404 + object_property_add_uint64_ptr(obj, "dma_mask", 405 + &edu->dma_mask, OBJ_PROP_FLAG_READWRITE, 406 + NULL); 414 407 } 415 408 416 409 static void edu_class_init(ObjectClass *class, void *data)
+1 -1
hw/misc/omap_l4.c
··· 24 24 MemoryRegion *address_space; 25 25 hwaddr base; 26 26 int ta_num; 27 - struct omap_target_agent_s ta[0]; 27 + struct omap_target_agent_s ta[]; 28 28 }; 29 29 30 30 struct omap_l4_s *omap_l4_init(MemoryRegion *address_space,
+2 -3
hw/net/dp8393x.c
··· 986 986 987 987 s->watchdog = timer_new_ns(QEMU_CLOCK_VIRTUAL, dp8393x_watchdog, s); 988 988 989 - memory_region_init_ram(&s->prom, OBJECT(dev), 990 - "dp8393x-prom", SONIC_PROM_SIZE, &local_err); 989 + memory_region_init_rom(&s->prom, OBJECT(dev), "dp8393x-prom", 990 + SONIC_PROM_SIZE, &local_err); 991 991 if (local_err) { 992 992 error_propagate(errp, local_err); 993 993 return; 994 994 } 995 - memory_region_set_readonly(&s->prom, true); 996 995 prom = memory_region_get_ram_ptr(&s->prom); 997 996 checksum = 0; 998 997 for (i = 0; i < 6; i++) {
+1 -1
hw/nvram/eeprom93xx.c
··· 86 86 uint8_t addrbits; 87 87 uint16_t size; 88 88 uint16_t data; 89 - uint16_t contents[0]; 89 + uint16_t contents[]; 90 90 }; 91 91 92 92 /* Code for saving and restoring of EEPROM state. */
+2 -3
hw/pci-host/prep.c
··· 325 325 d->config[0x0D] = 0x10; // latency_timer 326 326 d->config[0x34] = 0x00; // capabilities_pointer 327 327 328 - memory_region_init_ram_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, 329 - &error_fatal); 330 - memory_region_set_readonly(&s->bios, true); 328 + memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, 329 + &error_fatal); 331 330 memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), 332 331 &s->bios); 333 332 if (s->bios_name) {
+3 -11
hw/pci-host/q35.c
··· 166 166 visit_type_uint64(v, name, &value, errp); 167 167 } 168 168 169 - static void q35_host_get_mmcfg_size(Object *obj, Visitor *v, const char *name, 170 - void *opaque, Error **errp) 171 - { 172 - PCIExpressHost *e = PCIE_HOST_BRIDGE(obj); 173 - 174 - visit_type_uint64(v, name, &e->size, errp); 175 - } 176 - 177 169 /* 178 170 * NOTE: setting defaults for the mch.* fields in this table 179 171 * doesn't work, because mch is a separate QOM object that is ··· 214 206 { 215 207 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 216 208 PCIHostState *phb = PCI_HOST_BRIDGE(obj); 209 + PCIExpressHost *pehb = PCIE_HOST_BRIDGE(obj); 217 210 218 211 memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, phb, 219 212 "pci-conf-idx", 4); ··· 243 236 q35_host_get_pci_hole64_end, 244 237 NULL, NULL, NULL, NULL); 245 238 246 - object_property_add(obj, PCIE_HOST_MCFG_SIZE, "uint64", 247 - q35_host_get_mmcfg_size, 248 - NULL, NULL, NULL, NULL); 239 + object_property_add_uint64_ptr(obj, PCIE_HOST_MCFG_SIZE, 240 + &pehb->size, OBJ_PROP_FLAG_READ, NULL); 249 241 250 242 object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION, 251 243 (Object **) &s->mch.ram_memory,
+1 -2
hw/ppc/mac_newworld.c
··· 155 155 memory_region_add_subregion(get_system_memory(), 0, machine->ram); 156 156 157 157 /* allocate and load BIOS */ 158 - memory_region_init_ram(bios, NULL, "ppc_core99.bios", BIOS_SIZE, 158 + memory_region_init_rom(bios, NULL, "ppc_core99.bios", BIOS_SIZE, 159 159 &error_fatal); 160 160 161 161 if (bios_name == NULL) 162 162 bios_name = PROM_FILENAME; 163 163 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 164 - memory_region_set_readonly(bios, true); 165 164 memory_region_add_subregion(get_system_memory(), PROM_ADDR, bios); 166 165 167 166 /* Load OpenBIOS (ELF) */
+1 -2
hw/ppc/mac_oldworld.c
··· 129 129 memory_region_add_subregion(sysmem, 0, machine->ram); 130 130 131 131 /* allocate and load BIOS */ 132 - memory_region_init_ram(bios, NULL, "ppc_heathrow.bios", BIOS_SIZE, 132 + memory_region_init_rom(bios, NULL, "ppc_heathrow.bios", BIOS_SIZE, 133 133 &error_fatal); 134 134 135 135 if (bios_name == NULL) 136 136 bios_name = PROM_FILENAME; 137 137 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 138 - memory_region_set_readonly(bios, true); 139 138 memory_region_add_subregion(sysmem, PROM_ADDR, bios); 140 139 141 140 /* Load OpenBIOS (ELF) */
+2 -4
hw/ppc/ppc405_boards.c
··· 199 199 #endif 200 200 { 201 201 bios = g_new(MemoryRegion, 1); 202 - memory_region_init_ram(bios, NULL, "ef405ep.bios", BIOS_SIZE, 202 + memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE, 203 203 &error_fatal); 204 204 205 205 if (bios_name == NULL) ··· 223 223 /* Avoid an uninitialized variable warning */ 224 224 bios_size = -1; 225 225 } 226 - memory_region_set_readonly(bios, true); 227 226 } 228 227 /* Register FPGA */ 229 228 ref405ep_fpga_init(sysmem, 0xF0300000); ··· 471 470 if (bios_name == NULL) 472 471 bios_name = BIOS_FILENAME; 473 472 bios = g_new(MemoryRegion, 1); 474 - memory_region_init_ram(bios, NULL, "taihu_405ep.bios", BIOS_SIZE, 473 + memory_region_init_rom(bios, NULL, "taihu_405ep.bios", BIOS_SIZE, 475 474 &error_fatal); 476 475 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); 477 476 if (filename) { ··· 489 488 error_report("Could not load PowerPC BIOS '%s'", bios_name); 490 489 exit(1); 491 490 } 492 - memory_region_set_readonly(bios, true); 493 491 } 494 492 /* Register Linux flash */ 495 493 dinfo = drive_get(IF_PFLASH, 0, fl_idx);
+7 -29
hw/ppc/spapr.c
··· 3223 3223 } 3224 3224 } 3225 3225 3226 - static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name, 3227 - void *opaque, Error **errp) 3228 - { 3229 - visit_type_uint32(v, name, (uint32_t *)opaque, errp); 3230 - } 3231 - 3232 - static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, 3233 - void *opaque, Error **errp) 3234 - { 3235 - visit_type_uint32(v, name, (uint32_t *)opaque, errp); 3236 - } 3237 - 3238 - static void spapr_get_kernel_addr(Object *obj, Visitor *v, const char *name, 3239 - void *opaque, Error **errp) 3240 - { 3241 - visit_type_uint64(v, name, (uint64_t *)opaque, errp); 3242 - } 3243 - 3244 - static void spapr_set_kernel_addr(Object *obj, Visitor *v, const char *name, 3245 - void *opaque, Error **errp) 3246 - { 3247 - visit_type_uint64(v, name, (uint64_t *)opaque, errp); 3248 - } 3249 - 3250 3226 static char *spapr_get_ic_mode(Object *obj, Error **errp) 3251 3227 { 3252 3228 SpaprMachineState *spapr = SPAPR_MACHINE(obj); ··· 3344 3320 object_property_set_description(obj, "resize-hpt", 3345 3321 "Resizing of the Hash Page Table (enabled, disabled, required)", 3346 3322 NULL); 3347 - object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt, 3348 - spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort); 3323 + object_property_add_uint32_ptr(obj, "vsmt", 3324 + &spapr->vsmt, OBJ_PROP_FLAG_READWRITE, 3325 + &error_abort); 3349 3326 object_property_set_description(obj, "vsmt", 3350 3327 "Virtual SMT: KVM behaves as if this were" 3351 3328 " the host's SMT mode", &error_abort); 3329 + 3352 3330 object_property_add_bool(obj, "vfio-no-msix-emulation", 3353 3331 spapr_get_msix_emulation, NULL, NULL); 3354 3332 3355 - object_property_add(obj, "kernel-addr", "uint64", spapr_get_kernel_addr, 3356 - spapr_set_kernel_addr, NULL, &spapr->kernel_addr, 3357 - &error_abort); 3333 + object_property_add_uint64_ptr(obj, "kernel-addr", 3334 + &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE, 3335 + &error_abort); 3358 3336 object_property_set_description(obj, "kernel-addr", 3359 3337 stringify(KERNEL_LOAD_ADDR) 3360 3338 " for -kernel is the default",
+2 -1
hw/ppc/spapr_drc.c
··· 583 583 SpaprDrc *drc = SPAPR_DR_CONNECTOR(obj); 584 584 SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); 585 585 586 - object_property_add_uint32_ptr(obj, "id", &drc->id, NULL); 586 + object_property_add_uint32_ptr(obj, "id", &drc->id, OBJ_PROP_FLAG_READ, 587 + NULL); 587 588 object_property_add(obj, "index", "uint32", prop_get_index, 588 589 NULL, NULL, NULL, NULL); 589 590 object_property_add(obj, "fdt", "struct", prop_get_fdt,
+2 -2
hw/rdma/vmw/pvrdma_qp_ops.c
··· 34 34 /* Send Queue WQE */ 35 35 typedef struct PvrdmaSqWqe { 36 36 struct pvrdma_sq_wqe_hdr hdr; 37 - struct pvrdma_sge sge[0]; 37 + struct pvrdma_sge sge[]; 38 38 } PvrdmaSqWqe; 39 39 40 40 /* Recv Queue WQE */ 41 41 typedef struct PvrdmaRqWqe { 42 42 struct pvrdma_rq_wqe_hdr hdr; 43 - struct pvrdma_sge sge[0]; 43 + struct pvrdma_sge sge[]; 44 44 } PvrdmaRqWqe; 45 45 46 46 /*
+4 -5
hw/riscv/sifive_e.c
··· 145 145 &error_abort); 146 146 147 147 /* Mask ROM */ 148 - memory_region_init_rom(&s->mask_rom, NULL, "riscv.sifive.e.mrom", 149 - memmap[SIFIVE_E_MROM].size, &error_fatal); 148 + memory_region_init_rom(&s->mask_rom, OBJECT(dev), "riscv.sifive.e.mrom", 149 + memmap[SIFIVE_E_MROM].size, &error_fatal); 150 150 memory_region_add_subregion(sys_mem, 151 151 memmap[SIFIVE_E_MROM].base, &s->mask_rom); 152 152 ··· 208 208 memmap[SIFIVE_E_PWM2].base, memmap[SIFIVE_E_PWM2].size); 209 209 210 210 /* Flash memory */ 211 - memory_region_init_ram(&s->xip_mem, NULL, "riscv.sifive.e.xip", 212 - memmap[SIFIVE_E_XIP].size, &error_fatal); 213 - memory_region_set_readonly(&s->xip_mem, true); 211 + memory_region_init_rom(&s->xip_mem, OBJECT(dev), "riscv.sifive.e.xip", 212 + memmap[SIFIVE_E_XIP].size, &error_fatal); 214 213 memory_region_add_subregion(sys_mem, memmap[SIFIVE_E_XIP].base, 215 214 &s->xip_mem); 216 215 }
+1 -1
hw/riscv/sifive_u.c
··· 501 501 &error_abort); 502 502 503 503 /* boot rom */ 504 - memory_region_init_rom(mask_rom, NULL, "riscv.sifive.u.mrom", 504 + memory_region_init_rom(mask_rom, OBJECT(dev), "riscv.sifive.u.mrom", 505 505 memmap[SIFIVE_U_MROM].size, &error_fatal); 506 506 memory_region_add_subregion(system_memory, memmap[SIFIVE_U_MROM].base, 507 507 mask_rom);
+1 -1
hw/s390x/virtio-ccw.c
··· 193 193 typedef struct VirtioRevInfo { 194 194 uint16_t revision; 195 195 uint16_t length; 196 - uint8_t data[0]; 196 + uint8_t data[]; 197 197 } QEMU_PACKED VirtioRevInfo; 198 198 199 199 /* Specify where the virtqueues for the subchannel are in guest memory. */
+1 -2
hw/sh4/shix.c
··· 53 53 cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); 54 54 55 55 /* Allocate memory space */ 56 - memory_region_init_ram(rom, NULL, "shix.rom", 0x4000, &error_fatal); 57 - memory_region_set_readonly(rom, true); 56 + memory_region_init_rom(rom, NULL, "shix.rom", 0x4000, &error_fatal); 58 57 memory_region_add_subregion(sysmem, 0x00000000, rom); 59 58 memory_region_init_ram(&sdram[0], NULL, "shix.sdram1", 0x01000000, 60 59 &error_fatal);
+1 -2
hw/sparc/leon3.c
··· 255 255 256 256 /* Allocate BIOS */ 257 257 prom_size = 8 * MiB; 258 - memory_region_init_ram(prom, NULL, "Leon3.bios", prom_size, &error_fatal); 259 - memory_region_set_readonly(prom, true); 258 + memory_region_init_rom(prom, NULL, "Leon3.bios", prom_size, &error_fatal); 260 259 memory_region_add_subregion(address_space_mem, LEON3_PROM_OFFSET, prom); 261 260 262 261 /* Load boot prom */
+1 -1
hw/usb/dev-network.c
··· 626 626 struct rndis_response { 627 627 QTAILQ_ENTRY(rndis_response) entries; 628 628 uint32_t length; 629 - uint8_t buf[0]; 629 + uint8_t buf[]; 630 630 }; 631 631 632 632 typedef struct USBNetState {
+2 -2
hw/usb/dev-smartcard-reader.c
··· 227 227 typedef struct QEMU_PACKED CCID_DataBlock { 228 228 CCID_BULK_IN b; 229 229 uint8_t bChainParameter; 230 - uint8_t abData[0]; 230 + uint8_t abData[]; 231 231 } CCID_DataBlock; 232 232 233 233 /* 6.1.4 PC_to_RDR_XfrBlock */ ··· 235 235 CCID_Header hdr; 236 236 uint8_t bBWI; /* Block Waiting Timeout */ 237 237 uint16_t wLevelParameter; /* XXX currently unused */ 238 - uint8_t abData[0]; 238 + uint8_t abData[]; 239 239 } CCID_XferBlock; 240 240 241 241 typedef struct QEMU_PACKED CCID_IccPowerOn {
+2 -2
hw/usb/quirks.c
··· 22 22 uint8_t interface_protocol) { 23 23 int i; 24 24 25 - for (i = 0; ids[i].vendor_id != -1; i++) { 25 + for (i = 0; ids[i].terminating_entry == 0; i++) { 26 26 if (ids[i].vendor_id == vendor_id && 27 27 ids[i].product_id == product_id && 28 - (ids[i].interface_class == -1 || 28 + (ids[i].interface_protocol_used == 0 || 29 29 (ids[i].interface_class == interface_class && 30 30 ids[i].interface_subclass == interface_subclass && 31 31 ids[i].interface_protocol == interface_protocol))) {
+13 -9
hw/usb/quirks.h
··· 21 21 #include "quirks-pl2303-ids.h" 22 22 23 23 struct usb_device_id { 24 - int vendor_id; 25 - int product_id; 26 - int interface_class; 27 - int interface_subclass; 28 - int interface_protocol; 24 + uint16_t vendor_id; 25 + uint16_t product_id; 26 + uint8_t interface_class; 27 + uint8_t interface_subclass; 28 + uint8_t interface_protocol; 29 + uint8_t interface_protocol_used:1, 30 + terminating_entry:1, 31 + reserved:6; 29 32 }; 30 33 31 34 #define USB_DEVICE(vendor, product) \ 32 - .vendor_id = vendor, .product_id = product, .interface_class = -1, 35 + .vendor_id = vendor, .product_id = product, .interface_protocol_used = 0, 33 36 34 37 #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, iclass, isubclass, iproto) \ 35 38 .vendor_id = vend, .product_id = prod, .interface_class = iclass, \ 36 - .interface_subclass = isubclass, .interface_protocol = iproto 39 + .interface_subclass = isubclass, .interface_protocol = iproto, \ 40 + .interface_protocol_used = 1 37 41 38 42 static const struct usb_device_id usbredir_raw_serial_ids[] = { 39 43 /* ··· 206 210 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 207 211 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, 208 212 209 - { USB_DEVICE(-1, -1) } /* Terminating Entry */ 213 + { .terminating_entry = 1 } /* Terminating Entry */ 210 214 }; 211 215 212 216 static const struct usb_device_id usbredir_ftdi_serial_ids[] = { ··· 906 910 { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID) }, 907 911 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, 908 912 909 - { USB_DEVICE(-1, -1) } /* Terminating Entry */ 913 + { .terminating_entry = 1 } /* Terminating Entry */ 910 914 }; 911 915 912 916 #undef USB_DEVICE
+2 -2
hw/virtio/virtio.c
··· 54 54 { 55 55 uint16_t flags; 56 56 uint16_t idx; 57 - uint16_t ring[0]; 57 + uint16_t ring[]; 58 58 } VRingAvail; 59 59 60 60 typedef struct VRingUsedElem ··· 67 67 { 68 68 uint16_t flags; 69 69 uint16_t idx; 70 - VRingUsedElem ring[0]; 70 + VRingUsedElem ring[]; 71 71 } VRingUsed; 72 72 73 73 typedef struct VRingMemoryRegionCaches {
+1 -1
hw/xen/xen_pt.h
··· 203 203 uint64_t mmio_base_addr; 204 204 MemoryRegion mmio; 205 205 void *phys_iomem_base; 206 - XenPTMSIXEntry msix_entry[0]; 206 + XenPTMSIXEntry msix_entry[]; 207 207 } XenPTMSIX; 208 208 209 209 struct XenPCIPassthroughState {
+8 -8
include/hw/acpi/acpi-defs.h
··· 152 152 */ 153 153 struct AcpiRsdtDescriptorRev1 { 154 154 ACPI_TABLE_HEADER_DEF /* ACPI common table header */ 155 - uint32_t table_offset_entry[0]; /* Array of pointers to other */ 155 + uint32_t table_offset_entry[]; /* Array of pointers to other */ 156 156 /* ACPI tables */ 157 157 } QEMU_PACKED; 158 158 typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1; ··· 162 162 */ 163 163 struct AcpiXsdtDescriptorRev2 { 164 164 ACPI_TABLE_HEADER_DEF /* ACPI common table header */ 165 - uint64_t table_offset_entry[0]; /* Array of pointers to other */ 165 + uint64_t table_offset_entry[]; /* Array of pointers to other */ 166 166 /* ACPI tables */ 167 167 } QEMU_PACKED; 168 168 typedef struct AcpiXsdtDescriptorRev2 AcpiXsdtDescriptorRev2; ··· 518 518 struct { 519 519 uint8_t device; 520 520 uint8_t function; 521 - } path[0]; 521 + } path[]; 522 522 } QEMU_PACKED; 523 523 typedef struct AcpiDmarDeviceScope AcpiDmarDeviceScope; 524 524 ··· 530 530 uint8_t reserved; 531 531 uint16_t pci_segment; /* The PCI Segment associated with this unit */ 532 532 uint64_t address; /* Base address of remapping hardware register-set */ 533 - AcpiDmarDeviceScope scope[0]; 533 + AcpiDmarDeviceScope scope[]; 534 534 } QEMU_PACKED; 535 535 typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit; 536 536 ··· 541 541 uint8_t flags; 542 542 uint8_t reserved; 543 543 uint16_t pci_segment; 544 - AcpiDmarDeviceScope scope[0]; 544 + AcpiDmarDeviceScope scope[]; 545 545 } QEMU_PACKED; 546 546 typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS; 547 547 ··· 604 604 struct AcpiIortItsGroup { 605 605 ACPI_IORT_NODE_HEADER_DEF 606 606 uint32_t its_count; 607 - uint32_t identifiers[0]; 607 + uint32_t identifiers[]; 608 608 } QEMU_PACKED; 609 609 typedef struct AcpiIortItsGroup AcpiIortItsGroup; 610 610 ··· 621 621 uint32_t pri_gsiv; 622 622 uint32_t gerr_gsiv; 623 623 uint32_t sync_gsiv; 624 - AcpiIortIdMapping id_mapping_array[0]; 624 + AcpiIortIdMapping id_mapping_array[]; 625 625 } QEMU_PACKED; 626 626 typedef struct AcpiIortSmmu3 AcpiIortSmmu3; 627 627 ··· 630 630 AcpiIortMemoryAccess memory_properties; 631 631 uint32_t ats_attribute; 632 632 uint32_t pci_segment_number; 633 - AcpiIortIdMapping id_mapping_array[0]; 633 + AcpiIortIdMapping id_mapping_array[]; 634 634 } QEMU_PACKED; 635 635 typedef struct AcpiIortRC AcpiIortRC; 636 636
+1 -1
include/hw/arm/smmu-common.h
··· 85 85 86 86 typedef struct SMMUPciBus { 87 87 PCIBus *bus; 88 - SMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */ 88 + SMMUDevice *pbdev[]; /* Parent array is sparse, so dynamically alloc */ 89 89 } SMMUPciBus; 90 90 91 91 typedef struct SMMUIOTLBKey {
+1 -1
include/hw/boards.h
··· 71 71 */ 72 72 typedef struct { 73 73 int len; 74 - CPUArchId cpus[0]; 74 + CPUArchId cpus[]; 75 75 } CPUArchIdList; 76 76 77 77 /**
+2 -1
include/hw/i386/intel_iommu.h
··· 114 114 115 115 struct VTDBus { 116 116 PCIBus* bus; /* A reference to the bus to provide translation for */ 117 - VTDAddressSpace *dev_as[0]; /* A table of VTDAddressSpace objects indexed by devfn */ 117 + /* A table of VTDAddressSpace objects indexed by devfn */ 118 + VTDAddressSpace *dev_as[]; 118 119 }; 119 120 120 121 struct VTDIOTLBEntry {
+1 -1
include/hw/s390x/event-facility.h
··· 122 122 123 123 typedef struct MDB { 124 124 MdbHeader header; 125 - MDBO mdbo[0]; 125 + MDBO mdbo[]; 126 126 } QEMU_PACKED MDB; 127 127 128 128 typedef struct SclpMsg {
+4 -4
include/hw/s390x/sclp.h
··· 132 132 uint16_t highest_cpu; 133 133 uint8_t _reserved5[124 - 122]; /* 122-123 */ 134 134 uint32_t hmfai; 135 - struct CPUEntry entries[0]; 135 + struct CPUEntry entries[]; 136 136 } QEMU_PACKED ReadInfo; 137 137 138 138 typedef struct ReadCpuInfo { ··· 142 142 uint16_t nr_standby; /* 12-13 */ 143 143 uint16_t offset_standby; /* 14-15 */ 144 144 uint8_t reserved0[24-16]; /* 16-23 */ 145 - struct CPUEntry entries[0]; 145 + struct CPUEntry entries[]; 146 146 } QEMU_PACKED ReadCpuInfo; 147 147 148 148 typedef struct ReadStorageElementInfo { ··· 151 151 uint16_t assigned; 152 152 uint16_t standby; 153 153 uint8_t _reserved0[16 - 14]; /* 14-15 */ 154 - uint32_t entries[0]; 154 + uint32_t entries[]; 155 155 } QEMU_PACKED ReadStorageElementInfo; 156 156 157 157 typedef struct AttachStorageElement { ··· 159 159 uint8_t _reserved0[10 - 8]; /* 8-9 */ 160 160 uint16_t assigned; 161 161 uint8_t _reserved1[16 - 12]; /* 12-15 */ 162 - uint32_t entries[0]; 162 + uint32_t entries[]; 163 163 } QEMU_PACKED AttachStorageElement; 164 164 165 165 typedef struct AssignStorage {
+1 -1
include/hw/virtio/virtio-iommu.h
··· 41 41 42 42 typedef struct IOMMUPciBus { 43 43 PCIBus *bus; 44 - IOMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */ 44 + IOMMUDevice *pbdev[]; /* Parent array is sparse, so dynamically alloc */ 45 45 } IOMMUPciBus; 46 46 47 47 typedef struct VirtIOIOMMU {
+3
include/qemu/cpuid.h
··· 45 45 #ifndef bit_AVX2 46 46 #define bit_AVX2 (1 << 5) 47 47 #endif 48 + #ifndef bit_AVX512F 49 + #define bit_AVX512F (1 << 16) 50 + #endif 48 51 #ifndef bit_BMI2 49 52 #define bit_BMI2 (1 << 8) 50 53 #endif
+83 -3
include/qemu/lockable.h
··· 50 50 #define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *) \ 51 51 QEMU_GENERIC(x, \ 52 52 (QemuMutex *, qemu_mutex_lock), \ 53 + (QemuRecMutex *, qemu_rec_mutex_lock), \ 53 54 (CoMutex *, qemu_co_mutex_lock), \ 54 55 (QemuSpin *, qemu_spin_lock), \ 55 56 unknown_lock_type)) ··· 57 58 #define QEMU_UNLOCK_FUNC(x) ((QemuLockUnlockFunc *) \ 58 59 QEMU_GENERIC(x, \ 59 60 (QemuMutex *, qemu_mutex_unlock), \ 61 + (QemuRecMutex *, qemu_rec_mutex_unlock), \ 60 62 (CoMutex *, qemu_co_mutex_unlock), \ 61 63 (QemuSpin *, qemu_spin_unlock), \ 62 64 unknown_lock_type)) ··· 65 67 * In C++ it would be different, but then C++ wouldn't need QemuLockable 66 68 * either... 67 69 */ 68 - #define QEMU_MAKE_LOCKABLE_(x) qemu_make_lockable((x), &(QemuLockable) { \ 70 + #define QEMU_MAKE_LOCKABLE_(x) (&(QemuLockable) { \ 69 71 .object = (x), \ 70 72 .lock = QEMU_LOCK_FUNC(x), \ 71 73 .unlock = QEMU_UNLOCK_FUNC(x), \ ··· 73 75 74 76 /* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable 75 77 * 76 - * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin). 78 + * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin). 77 79 * 78 80 * Returns a QemuLockable object that can be passed around 79 - * to a function that can operate with locks of any kind. 81 + * to a function that can operate with locks of any kind, or 82 + * NULL if @x is %NULL. 80 83 */ 81 84 #define QEMU_MAKE_LOCKABLE(x) \ 82 85 QEMU_GENERIC(x, \ 83 86 (QemuLockable *, (x)), \ 87 + qemu_make_lockable((x), QEMU_MAKE_LOCKABLE_(x))) 88 + 89 + /* QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable 90 + * 91 + * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin). 92 + * 93 + * Returns a QemuLockable object that can be passed around 94 + * to a function that can operate with locks of any kind. 95 + */ 96 + #define QEMU_MAKE_LOCKABLE_NONNULL(x) \ 97 + QEMU_GENERIC(x, \ 98 + (QemuLockable *, (x)), \ 84 99 QEMU_MAKE_LOCKABLE_(x)) 85 100 86 101 static inline void qemu_lockable_lock(QemuLockable *x) ··· 92 107 { 93 108 x->unlock(x->object); 94 109 } 110 + 111 + static inline QemuLockable *qemu_lockable_auto_lock(QemuLockable *x) 112 + { 113 + qemu_lockable_lock(x); 114 + return x; 115 + } 116 + 117 + static inline void qemu_lockable_auto_unlock(QemuLockable *x) 118 + { 119 + if (x) { 120 + qemu_lockable_unlock(x); 121 + } 122 + } 123 + 124 + G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock) 125 + 126 + #define WITH_QEMU_LOCK_GUARD_(x, var) \ 127 + for (g_autoptr(QemuLockable) var = \ 128 + qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE_NONNULL((x))); \ 129 + var; \ 130 + qemu_lockable_auto_unlock(var), var = NULL) 131 + 132 + /** 133 + * WITH_QEMU_LOCK_GUARD - Lock a lock object for scope 134 + * 135 + * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin). 136 + * 137 + * This macro defines a lock scope such that entering the scope takes the lock 138 + * and leaving the scope releases the lock. Return statements are allowed 139 + * within the scope and release the lock. Break and continue statements leave 140 + * the scope early and release the lock. 141 + * 142 + * WITH_QEMU_LOCK_GUARD(&mutex) { 143 + * ... 144 + * if (error) { 145 + * return; <-- mutex is automatically unlocked 146 + * } 147 + * 148 + * if (early_exit) { 149 + * break; <-- leave this scope early 150 + * } 151 + * ... 152 + * } 153 + */ 154 + #define WITH_QEMU_LOCK_GUARD(x) \ 155 + WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__) 156 + 157 + /** 158 + * QEMU_LOCK_GUARD - Lock an object until the end of the scope 159 + * 160 + * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin). 161 + * 162 + * This macro takes a lock until the end of the scope. Return statements 163 + * release the lock. 164 + * 165 + * ... <-- mutex not locked 166 + * QEMU_LOCK_GUARD(&mutex); <-- mutex locked from here onwards 167 + * ... 168 + * if (error) { 169 + * return; <-- mutex is automatically unlocked 170 + * } 171 + */ 172 + #define QEMU_LOCK_GUARD(x) \ 173 + g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \ 174 + qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x))) 95 175 96 176 #endif
+40 -8
include/qom/object.h
··· 1664 1664 void (*get)(Object *, struct tm *, Error **), 1665 1665 Error **errp); 1666 1666 1667 + typedef enum { 1668 + /* Automatically add a getter to the property */ 1669 + OBJ_PROP_FLAG_READ = 1 << 0, 1670 + /* Automatically add a setter to the property */ 1671 + OBJ_PROP_FLAG_WRITE = 1 << 1, 1672 + /* Automatically add a getter and a setter to the property */ 1673 + OBJ_PROP_FLAG_READWRITE = (OBJ_PROP_FLAG_READ | OBJ_PROP_FLAG_WRITE), 1674 + } ObjectPropertyFlags; 1675 + 1667 1676 /** 1668 1677 * object_property_add_uint8_ptr: 1669 1678 * @obj: the object to add a property to 1670 1679 * @name: the name of the property 1671 1680 * @v: pointer to value 1681 + * @flags: bitwise-or'd ObjectPropertyFlags 1672 1682 * @errp: if an error occurs, a pointer to an area to store the error 1673 1683 * 1674 1684 * Add an integer property in memory. This function will add a 1675 1685 * property of type 'uint8'. 1676 1686 */ 1677 1687 void object_property_add_uint8_ptr(Object *obj, const char *name, 1678 - const uint8_t *v, Error **errp); 1688 + const uint8_t *v, ObjectPropertyFlags flags, 1689 + Error **errp); 1690 + 1679 1691 ObjectProperty *object_class_property_add_uint8_ptr(ObjectClass *klass, 1680 1692 const char *name, 1681 - const uint8_t *v, Error **errp); 1693 + const uint8_t *v, 1694 + ObjectPropertyFlags flags, 1695 + Error **errp); 1682 1696 1683 1697 /** 1684 1698 * object_property_add_uint16_ptr: 1685 1699 * @obj: the object to add a property to 1686 1700 * @name: the name of the property 1687 1701 * @v: pointer to value 1702 + * @flags: bitwise-or'd ObjectPropertyFlags 1688 1703 * @errp: if an error occurs, a pointer to an area to store the error 1689 1704 * 1690 1705 * Add an integer property in memory. This function will add a 1691 1706 * property of type 'uint16'. 1692 1707 */ 1693 1708 void object_property_add_uint16_ptr(Object *obj, const char *name, 1694 - const uint16_t *v, Error **errp); 1709 + const uint16_t *v, 1710 + ObjectPropertyFlags flags, 1711 + Error **errp); 1712 + 1695 1713 ObjectProperty *object_class_property_add_uint16_ptr(ObjectClass *klass, 1696 1714 const char *name, 1697 - const uint16_t *v, Error **errp); 1715 + const uint16_t *v, 1716 + ObjectPropertyFlags flags, 1717 + Error **errp); 1698 1718 1699 1719 /** 1700 1720 * object_property_add_uint32_ptr: 1701 1721 * @obj: the object to add a property to 1702 1722 * @name: the name of the property 1703 1723 * @v: pointer to value 1724 + * @flags: bitwise-or'd ObjectPropertyFlags 1704 1725 * @errp: if an error occurs, a pointer to an area to store the error 1705 1726 * 1706 1727 * Add an integer property in memory. This function will add a 1707 1728 * property of type 'uint32'. 1708 1729 */ 1709 1730 void object_property_add_uint32_ptr(Object *obj, const char *name, 1710 - const uint32_t *v, Error **errp); 1731 + const uint32_t *v, 1732 + ObjectPropertyFlags flags, 1733 + Error **errp); 1734 + 1711 1735 ObjectProperty *object_class_property_add_uint32_ptr(ObjectClass *klass, 1712 1736 const char *name, 1713 - const uint32_t *v, Error **errp); 1737 + const uint32_t *v, 1738 + ObjectPropertyFlags flags, 1739 + Error **errp); 1714 1740 1715 1741 /** 1716 1742 * object_property_add_uint64_ptr: 1717 1743 * @obj: the object to add a property to 1718 1744 * @name: the name of the property 1719 1745 * @v: pointer to value 1746 + * @flags: bitwise-or'd ObjectPropertyFlags 1720 1747 * @errp: if an error occurs, a pointer to an area to store the error 1721 1748 * 1722 1749 * Add an integer property in memory. This function will add a 1723 1750 * property of type 'uint64'. 1724 1751 */ 1725 1752 void object_property_add_uint64_ptr(Object *obj, const char *name, 1726 - const uint64_t *v, Error **errp); 1753 + const uint64_t *v, 1754 + ObjectPropertyFlags flags, 1755 + Error **Errp); 1756 + 1727 1757 ObjectProperty *object_class_property_add_uint64_ptr(ObjectClass *klass, 1728 1758 const char *name, 1729 - const uint64_t *v, Error **errp); 1759 + const uint64_t *v, 1760 + ObjectPropertyFlags flags, 1761 + Error **Errp); 1730 1762 1731 1763 /** 1732 1764 * object_property_add_alias:
+1 -1
include/sysemu/cryptodev.h
··· 143 143 uint8_t *dst; 144 144 uint8_t *aad_data; 145 145 uint8_t *digest_result; 146 - uint8_t data[0]; 146 + uint8_t data[]; 147 147 } CryptoDevBackendSymOpInfo; 148 148 149 149 typedef struct CryptoDevBackendClass {
+7
include/sysemu/whpx.h
··· 35 35 36 36 #endif /* CONFIG_WHPX */ 37 37 38 + /* state subset only touched by the VCPU itself during runtime */ 39 + #define WHPX_SET_RUNTIME_STATE 1 40 + /* state subset modified during VCPU reset */ 41 + #define WHPX_SET_RESET_STATE 2 42 + /* full state set, modified during initialization or on vmload */ 43 + #define WHPX_SET_FULL_STATE 3 44 + 38 45 #endif /* QEMU_WHPX_H */
+1 -1
include/tcg/tcg.h
··· 267 267 typedef struct TCGPool { 268 268 struct TCGPool *next; 269 269 int size; 270 - uint8_t data[0] __attribute__ ((aligned)); 270 + uint8_t data[] __attribute__ ((aligned)); 271 271 } TCGPool; 272 272 273 273 #define TCG_POOL_CHUNK_SIZE 32768
+6 -25
memory.c
··· 1170 1170 memory_region_do_init(mr, owner, name, size); 1171 1171 } 1172 1172 1173 - static void memory_region_get_addr(Object *obj, Visitor *v, const char *name, 1174 - void *opaque, Error **errp) 1175 - { 1176 - MemoryRegion *mr = MEMORY_REGION(obj); 1177 - uint64_t value = mr->addr; 1178 - 1179 - visit_type_uint64(v, name, &value, errp); 1180 - } 1181 - 1182 1173 static void memory_region_get_container(Object *obj, Visitor *v, 1183 1174 const char *name, void *opaque, 1184 1175 Error **errp) ··· 1242 1233 NULL, NULL, &error_abort); 1243 1234 op->resolve = memory_region_resolve_container; 1244 1235 1245 - object_property_add(OBJECT(mr), "addr", "uint64", 1246 - memory_region_get_addr, 1247 - NULL, /* memory_region_set_addr */ 1248 - NULL, NULL, &error_abort); 1236 + object_property_add_uint64_ptr(OBJECT(mr), "addr", 1237 + &mr->addr, OBJ_PROP_FLAG_READ, &error_abort); 1249 1238 object_property_add(OBJECT(mr), "priority", "uint32", 1250 1239 memory_region_get_priority, 1251 1240 NULL, /* memory_region_set_priority */ ··· 1671 1660 uint64_t size, 1672 1661 Error **errp) 1673 1662 { 1674 - Error *err = NULL; 1675 - memory_region_init(mr, owner, name, size); 1676 - mr->ram = true; 1663 + memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); 1677 1664 mr->readonly = true; 1678 - mr->terminates = true; 1679 - mr->destructor = memory_region_destructor_ram; 1680 - mr->ram_block = qemu_ram_alloc(size, false, mr, &err); 1681 - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1682 - if (err) { 1683 - mr->size = int128_zero(); 1684 - object_unparent(OBJECT(mr)); 1685 - error_propagate(errp, err); 1686 - } 1687 1665 } 1688 1666 1689 1667 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, ··· 2830 2808 2831 2809 static const char *memory_region_type(MemoryRegion *mr) 2832 2810 { 2811 + if (mr->alias) { 2812 + return memory_region_type(mr->alias); 2813 + } 2833 2814 if (memory_region_is_ram_device(mr)) { 2834 2815 return "ramd"; 2835 2816 } else if (memory_region_is_romd(mr)) {
+1 -1
net/queue.c
··· 46 46 unsigned flags; 47 47 int size; 48 48 NetPacketSent *sent_cb; 49 - uint8_t data[0]; 49 + uint8_t data[]; 50 50 }; 51 51 52 52 struct NetQueue {
+1 -1
pc-bios/optionrom/pvh_main.c
··· 29 29 30 30 #define RSDP_SIGNATURE 0x2052545020445352LL /* "RSD PTR " */ 31 31 #define RSDP_AREA_ADDR 0x000E0000 32 - #define RSDP_AREA_SIZE 2048 32 + #define RSDP_AREA_SIZE 0x00020000 33 33 #define EBDA_BASE_ADDR 0x0000040E 34 34 #define EBDA_SIZE 1024 35 35
+1 -1
pc-bios/s390-ccw/bootmap.h
··· 136 136 137 137 typedef struct BootMapScript { 138 138 BootMapScriptHeader header; 139 - BootMapScriptEntry entry[0]; 139 + BootMapScriptEntry entry[]; 140 140 } __attribute__ ((packed)) BootMapScript; 141 141 142 142 /*
+1 -1
pc-bios/s390-ccw/sclp.h
··· 95 95 typedef struct WriteEventData { 96 96 SCCBHeader h; 97 97 EventBufferHeader ebh; 98 - char data[0]; 98 + char data[]; 99 99 } __attribute__((packed)) WriteEventData; 100 100 101 101 typedef struct ReadEventData {
+3 -4
plugins/core.c
··· 15 15 #include "qemu/error-report.h" 16 16 #include "qemu/config-file.h" 17 17 #include "qapi/error.h" 18 + #include "qemu/lockable.h" 18 19 #include "qemu/option.h" 19 20 #include "qemu/rcu_queue.h" 20 21 #include "qemu/xxhash.h" ··· 150 151 { 151 152 struct qemu_plugin_ctx *ctx; 152 153 153 - qemu_rec_mutex_lock(&plugin.lock); 154 + QEMU_LOCK_GUARD(&plugin.lock); 154 155 ctx = plugin_id_to_ctx_locked(id); 155 156 /* if the plugin is on its way out, ignore this request */ 156 157 if (unlikely(ctx->uninstalling)) { 157 - goto out_unlock; 158 + return; 158 159 } 159 160 if (func) { 160 161 struct qemu_plugin_cb *cb = ctx->callbacks[ev]; ··· 178 179 } else { 179 180 plugin_unregister_cb__locked(ctx, ev); 180 181 } 181 - out_unlock: 182 - qemu_rec_mutex_unlock(&plugin.lock); 183 182 } 184 183 185 184 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
+8 -8
plugins/loader.c
··· 19 19 #include "qemu/error-report.h" 20 20 #include "qemu/config-file.h" 21 21 #include "qapi/error.h" 22 + #include "qemu/lockable.h" 22 23 #include "qemu/option.h" 23 24 #include "qemu/rcu_queue.h" 24 25 #include "qemu/qht.h" ··· 367 368 struct qemu_plugin_reset_data *data; 368 369 struct qemu_plugin_ctx *ctx; 369 370 370 - qemu_rec_mutex_lock(&plugin.lock); 371 - ctx = plugin_id_to_ctx_locked(id); 372 - if (ctx->uninstalling || (reset && ctx->resetting)) { 373 - qemu_rec_mutex_unlock(&plugin.lock); 374 - return; 371 + WITH_QEMU_LOCK_GUARD(&plugin.lock) { 372 + ctx = plugin_id_to_ctx_locked(id); 373 + if (ctx->uninstalling || (reset && ctx->resetting)) { 374 + return; 375 + } 376 + ctx->resetting = reset; 377 + ctx->uninstalling = !reset; 375 378 } 376 - ctx->resetting = reset; 377 - ctx->uninstalling = !reset; 378 - qemu_rec_mutex_unlock(&plugin.lock); 379 379 380 380 data = g_new(struct qemu_plugin_reset_data, 1); 381 381 data->ctx = ctx;
+188 -24
qom/object.c
··· 2498 2498 visit_type_uint8(v, name, &value, errp); 2499 2499 } 2500 2500 2501 + static void property_set_uint8_ptr(Object *obj, Visitor *v, const char *name, 2502 + void *opaque, Error **errp) 2503 + { 2504 + uint8_t *field = opaque; 2505 + uint8_t value; 2506 + Error *local_err = NULL; 2507 + 2508 + visit_type_uint8(v, name, &value, &local_err); 2509 + if (local_err) { 2510 + error_propagate(errp, local_err); 2511 + return; 2512 + } 2513 + 2514 + *field = value; 2515 + } 2516 + 2501 2517 static void property_get_uint16_ptr(Object *obj, Visitor *v, const char *name, 2502 2518 void *opaque, Error **errp) 2503 2519 { ··· 2505 2521 visit_type_uint16(v, name, &value, errp); 2506 2522 } 2507 2523 2524 + static void property_set_uint16_ptr(Object *obj, Visitor *v, const char *name, 2525 + void *opaque, Error **errp) 2526 + { 2527 + uint16_t *field = opaque; 2528 + uint16_t value; 2529 + Error *local_err = NULL; 2530 + 2531 + visit_type_uint16(v, name, &value, &local_err); 2532 + if (local_err) { 2533 + error_propagate(errp, local_err); 2534 + return; 2535 + } 2536 + 2537 + *field = value; 2538 + } 2539 + 2508 2540 static void property_get_uint32_ptr(Object *obj, Visitor *v, const char *name, 2509 2541 void *opaque, Error **errp) 2510 2542 { ··· 2512 2544 visit_type_uint32(v, name, &value, errp); 2513 2545 } 2514 2546 2547 + static void property_set_uint32_ptr(Object *obj, Visitor *v, const char *name, 2548 + void *opaque, Error **errp) 2549 + { 2550 + uint32_t *field = opaque; 2551 + uint32_t value; 2552 + Error *local_err = NULL; 2553 + 2554 + visit_type_uint32(v, name, &value, &local_err); 2555 + if (local_err) { 2556 + error_propagate(errp, local_err); 2557 + return; 2558 + } 2559 + 2560 + *field = value; 2561 + } 2562 + 2515 2563 static void property_get_uint64_ptr(Object *obj, Visitor *v, const char *name, 2516 2564 void *opaque, Error **errp) 2517 2565 { ··· 2519 2567 visit_type_uint64(v, name, &value, errp); 2520 2568 } 2521 2569 2570 + static void property_set_uint64_ptr(Object *obj, Visitor *v, const char *name, 2571 + void *opaque, Error **errp) 2572 + { 2573 + uint64_t *field = opaque; 2574 + uint64_t value; 2575 + Error *local_err = NULL; 2576 + 2577 + visit_type_uint64(v, name, &value, &local_err); 2578 + if (local_err) { 2579 + error_propagate(errp, local_err); 2580 + return; 2581 + } 2582 + 2583 + *field = value; 2584 + } 2585 + 2522 2586 void object_property_add_uint8_ptr(Object *obj, const char *name, 2523 - const uint8_t *v, Error **errp) 2587 + const uint8_t *v, 2588 + ObjectPropertyFlags flags, 2589 + Error **errp) 2524 2590 { 2525 - object_property_add(obj, name, "uint8", property_get_uint8_ptr, 2526 - NULL, NULL, (void *)v, errp); 2591 + ObjectPropertyAccessor *getter = NULL; 2592 + ObjectPropertyAccessor *setter = NULL; 2593 + 2594 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2595 + getter = property_get_uint8_ptr; 2596 + } 2597 + 2598 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2599 + setter = property_set_uint8_ptr; 2600 + } 2601 + 2602 + object_property_add(obj, name, "uint8", 2603 + getter, setter, NULL, (void *)v, errp); 2527 2604 } 2528 2605 2529 2606 ObjectProperty * 2530 2607 object_class_property_add_uint8_ptr(ObjectClass *klass, const char *name, 2531 - const uint8_t *v, Error **errp) 2608 + const uint8_t *v, 2609 + ObjectPropertyFlags flags, 2610 + Error **errp) 2532 2611 { 2612 + ObjectPropertyAccessor *getter = NULL; 2613 + ObjectPropertyAccessor *setter = NULL; 2614 + 2615 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2616 + getter = property_get_uint8_ptr; 2617 + } 2618 + 2619 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2620 + setter = property_set_uint8_ptr; 2621 + } 2622 + 2533 2623 return object_class_property_add(klass, name, "uint8", 2534 - property_get_uint8_ptr, 2535 - NULL, NULL, (void *)v, errp); 2624 + getter, setter, NULL, (void *)v, errp); 2536 2625 } 2537 2626 2538 2627 void object_property_add_uint16_ptr(Object *obj, const char *name, 2539 - const uint16_t *v, Error **errp) 2628 + const uint16_t *v, 2629 + ObjectPropertyFlags flags, 2630 + Error **errp) 2540 2631 { 2541 - object_property_add(obj, name, "uint16", property_get_uint16_ptr, 2542 - NULL, NULL, (void *)v, errp); 2632 + ObjectPropertyAccessor *getter = NULL; 2633 + ObjectPropertyAccessor *setter = NULL; 2634 + 2635 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2636 + getter = property_get_uint16_ptr; 2637 + } 2638 + 2639 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2640 + setter = property_set_uint16_ptr; 2641 + } 2642 + 2643 + object_property_add(obj, name, "uint16", 2644 + getter, setter, NULL, (void *)v, errp); 2543 2645 } 2544 2646 2545 2647 ObjectProperty * 2546 2648 object_class_property_add_uint16_ptr(ObjectClass *klass, const char *name, 2547 - const uint16_t *v, Error **errp) 2649 + const uint16_t *v, 2650 + ObjectPropertyFlags flags, 2651 + Error **errp) 2548 2652 { 2653 + ObjectPropertyAccessor *getter = NULL; 2654 + ObjectPropertyAccessor *setter = NULL; 2655 + 2656 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2657 + getter = property_get_uint16_ptr; 2658 + } 2659 + 2660 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2661 + setter = property_set_uint16_ptr; 2662 + } 2663 + 2549 2664 return object_class_property_add(klass, name, "uint16", 2550 - property_get_uint16_ptr, 2551 - NULL, NULL, (void *)v, errp); 2665 + getter, setter, NULL, (void *)v, errp); 2552 2666 } 2553 2667 2554 2668 void object_property_add_uint32_ptr(Object *obj, const char *name, 2555 - const uint32_t *v, Error **errp) 2669 + const uint32_t *v, 2670 + ObjectPropertyFlags flags, 2671 + Error **errp) 2556 2672 { 2557 - object_property_add(obj, name, "uint32", property_get_uint32_ptr, 2558 - NULL, NULL, (void *)v, errp); 2673 + ObjectPropertyAccessor *getter = NULL; 2674 + ObjectPropertyAccessor *setter = NULL; 2675 + 2676 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2677 + getter = property_get_uint32_ptr; 2678 + } 2679 + 2680 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2681 + setter = property_set_uint32_ptr; 2682 + } 2683 + 2684 + object_property_add(obj, name, "uint32", 2685 + getter, setter, NULL, (void *)v, errp); 2559 2686 } 2560 2687 2561 2688 ObjectProperty * 2562 2689 object_class_property_add_uint32_ptr(ObjectClass *klass, const char *name, 2563 - const uint32_t *v, Error **errp) 2690 + const uint32_t *v, 2691 + ObjectPropertyFlags flags, 2692 + Error **errp) 2564 2693 { 2694 + ObjectPropertyAccessor *getter = NULL; 2695 + ObjectPropertyAccessor *setter = NULL; 2696 + 2697 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2698 + getter = property_get_uint32_ptr; 2699 + } 2700 + 2701 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2702 + setter = property_set_uint32_ptr; 2703 + } 2704 + 2565 2705 return object_class_property_add(klass, name, "uint32", 2566 - property_get_uint32_ptr, 2567 - NULL, NULL, (void *)v, errp); 2706 + getter, setter, NULL, (void *)v, errp); 2568 2707 } 2569 2708 2570 2709 void object_property_add_uint64_ptr(Object *obj, const char *name, 2571 - const uint64_t *v, Error **errp) 2710 + const uint64_t *v, 2711 + ObjectPropertyFlags flags, 2712 + Error **errp) 2572 2713 { 2573 - object_property_add(obj, name, "uint64", property_get_uint64_ptr, 2574 - NULL, NULL, (void *)v, errp); 2714 + ObjectPropertyAccessor *getter = NULL; 2715 + ObjectPropertyAccessor *setter = NULL; 2716 + 2717 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2718 + getter = property_get_uint64_ptr; 2719 + } 2720 + 2721 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2722 + setter = property_set_uint64_ptr; 2723 + } 2724 + 2725 + object_property_add(obj, name, "uint64", 2726 + getter, setter, NULL, (void *)v, errp); 2575 2727 } 2576 2728 2577 2729 ObjectProperty * 2578 2730 object_class_property_add_uint64_ptr(ObjectClass *klass, const char *name, 2579 - const uint64_t *v, Error **errp) 2731 + const uint64_t *v, 2732 + ObjectPropertyFlags flags, 2733 + Error **errp) 2580 2734 { 2735 + ObjectPropertyAccessor *getter = NULL; 2736 + ObjectPropertyAccessor *setter = NULL; 2737 + 2738 + if ((flags & OBJ_PROP_FLAG_READ) == OBJ_PROP_FLAG_READ) { 2739 + getter = property_get_uint64_ptr; 2740 + } 2741 + 2742 + if ((flags & OBJ_PROP_FLAG_WRITE) == OBJ_PROP_FLAG_WRITE) { 2743 + setter = property_set_uint64_ptr; 2744 + } 2745 + 2581 2746 return object_class_property_add(klass, name, "uint64", 2582 - property_get_uint64_ptr, 2583 - NULL, NULL, (void *)v, errp); 2747 + getter, setter, NULL, (void *)v, errp); 2584 2748 } 2585 2749 2586 2750 typedef struct {
+159
scripts/coccinelle/memory-region-housekeeping.cocci
··· 1 + /* 2 + Usage: 3 + 4 + spatch \ 5 + --macro-file scripts/cocci-macro-file.h \ 6 + --sp-file scripts/coccinelle/memory-region-housekeeping.cocci \ 7 + --keep-comments \ 8 + --in-place \ 9 + --dir . 10 + 11 + */ 12 + 13 + 14 + // Replace memory_region_init_ram(readonly) by memory_region_init_rom() 15 + @@ 16 + expression E1, E2, E3, E4, E5; 17 + symbol true; 18 + @@ 19 + ( 20 + - memory_region_init_ram(E1, E2, E3, E4, E5); 21 + + memory_region_init_rom(E1, E2, E3, E4, E5); 22 + ... WHEN != E1 23 + - memory_region_set_readonly(E1, true); 24 + | 25 + - memory_region_init_ram_nomigrate(E1, E2, E3, E4, E5); 26 + + memory_region_init_rom_nomigrate(E1, E2, E3, E4, E5); 27 + ... WHEN != E1 28 + - memory_region_set_readonly(E1, true); 29 + ) 30 + 31 + 32 + @possible_memory_region_init_rom@ 33 + expression E1, E2, E3, E4, E5; 34 + position p; 35 + @@ 36 + ( 37 + memory_region_init_ram@p(E1, E2, E3, E4, E5); 38 + ... 39 + memory_region_set_readonly(E1, true); 40 + | 41 + memory_region_init_ram_nomigrate@p(E1, E2, E3, E4, E5); 42 + ... 43 + memory_region_set_readonly(E1, true); 44 + ) 45 + @script:python@ 46 + p << possible_memory_region_init_rom.p; 47 + @@ 48 + cocci.print_main("potential use of memory_region_init_rom*() in ", p) 49 + 50 + 51 + // Do not call memory_region_set_readonly() on ROM alias 52 + @@ 53 + expression ROM, E1, E2, E3, E4; 54 + expression ALIAS, E5, E6, E7, E8; 55 + @@ 56 + ( 57 + memory_region_init_rom(ROM, E1, E2, E3, E4); 58 + | 59 + memory_region_init_rom_nomigrate(ROM, E1, E2, E3, E4); 60 + ) 61 + ... 62 + memory_region_init_alias(ALIAS, E5, E6, ROM, E7, E8); 63 + - memory_region_set_readonly(ALIAS, true); 64 + 65 + 66 + // Replace by-hand memory_region_init_ram_nomigrate/vmstate_register_ram 67 + // code sequences with use of the new memory_region_init_ram function. 68 + // Similarly for the _rom and _rom_device functions. 69 + // We don't try to replace sequences with a non-NULL owner, because 70 + // there are none in the tree that can be automatically converted 71 + // (and only a handful that can be manually converted). 72 + @@ 73 + expression MR; 74 + expression NAME; 75 + expression SIZE; 76 + expression ERRP; 77 + @@ 78 + -memory_region_init_ram_nomigrate(MR, NULL, NAME, SIZE, ERRP); 79 + +memory_region_init_ram(MR, NULL, NAME, SIZE, ERRP); 80 + ... 81 + -vmstate_register_ram_global(MR); 82 + @@ 83 + expression MR; 84 + expression NAME; 85 + expression SIZE; 86 + expression ERRP; 87 + @@ 88 + -memory_region_init_rom_nomigrate(MR, NULL, NAME, SIZE, ERRP); 89 + +memory_region_init_rom(MR, NULL, NAME, SIZE, ERRP); 90 + ... 91 + -vmstate_register_ram_global(MR); 92 + @@ 93 + expression MR; 94 + expression OPS; 95 + expression OPAQUE; 96 + expression NAME; 97 + expression SIZE; 98 + expression ERRP; 99 + @@ 100 + -memory_region_init_rom_device_nomigrate(MR, NULL, OPS, OPAQUE, NAME, SIZE, ERRP); 101 + +memory_region_init_rom_device(MR, NULL, OPS, OPAQUE, NAME, SIZE, ERRP); 102 + ... 103 + -vmstate_register_ram_global(MR); 104 + 105 + 106 + // Device is owner 107 + @@ 108 + typedef DeviceState; 109 + identifier device_fn, dev, obj; 110 + expression E1, E2, E3, E4, E5; 111 + @@ 112 + static void device_fn(DeviceState *dev, ...) 113 + { 114 + ... 115 + Object *obj = OBJECT(dev); 116 + <+... 117 + ( 118 + - memory_region_init(E1, NULL, E2, E3); 119 + + memory_region_init(E1, obj, E2, E3); 120 + | 121 + - memory_region_init_io(E1, NULL, E2, E3, E4, E5); 122 + + memory_region_init_io(E1, obj, E2, E3, E4, E5); 123 + | 124 + - memory_region_init_alias(E1, NULL, E2, E3, E4, E5); 125 + + memory_region_init_alias(E1, obj, E2, E3, E4, E5); 126 + | 127 + - memory_region_init_rom(E1, NULL, E2, E3, E4); 128 + + memory_region_init_rom(E1, obj, E2, E3, E4); 129 + | 130 + - memory_region_init_ram_shared_nomigrate(E1, NULL, E2, E3, E4, E5); 131 + + memory_region_init_ram_shared_nomigrate(E1, obj, E2, E3, E4, E5); 132 + ) 133 + ...+> 134 + } 135 + @@ 136 + identifier device_fn, dev; 137 + expression E1, E2, E3, E4, E5; 138 + @@ 139 + static void device_fn(DeviceState *dev, ...) 140 + { 141 + <+... 142 + ( 143 + - memory_region_init(E1, NULL, E2, E3); 144 + + memory_region_init(E1, OBJECT(dev), E2, E3); 145 + | 146 + - memory_region_init_io(E1, NULL, E2, E3, E4, E5); 147 + + memory_region_init_io(E1, OBJECT(dev), E2, E3, E4, E5); 148 + | 149 + - memory_region_init_alias(E1, NULL, E2, E3, E4, E5); 150 + + memory_region_init_alias(E1, OBJECT(dev), E2, E3, E4, E5); 151 + | 152 + - memory_region_init_rom(E1, NULL, E2, E3, E4); 153 + + memory_region_init_rom(E1, OBJECT(dev), E2, E3, E4); 154 + | 155 + - memory_region_init_ram_shared_nomigrate(E1, NULL, E2, E3, E4, E5); 156 + + memory_region_init_ram_shared_nomigrate(E1, OBJECT(dev), E2, E3, E4, E5); 157 + ) 158 + ...+> 159 + }
-38
scripts/coccinelle/memory-region-init-ram.cocci
··· 1 - // Replace by-hand memory_region_init_ram_nomigrate/vmstate_register_ram 2 - // code sequences with use of the new memory_region_init_ram function. 3 - // Similarly for the _rom and _rom_device functions. 4 - // We don't try to replace sequences with a non-NULL owner, because 5 - // there are none in the tree that can be automatically converted 6 - // (and only a handful that can be manually converted). 7 - @@ 8 - expression MR; 9 - expression NAME; 10 - expression SIZE; 11 - expression ERRP; 12 - @@ 13 - -memory_region_init_ram_nomigrate(MR, NULL, NAME, SIZE, ERRP); 14 - +memory_region_init_ram(MR, NULL, NAME, SIZE, ERRP); 15 - ... 16 - -vmstate_register_ram_global(MR); 17 - @@ 18 - expression MR; 19 - expression NAME; 20 - expression SIZE; 21 - expression ERRP; 22 - @@ 23 - -memory_region_init_rom_nomigrate(MR, NULL, NAME, SIZE, ERRP); 24 - +memory_region_init_rom(MR, NULL, NAME, SIZE, ERRP); 25 - ... 26 - -vmstate_register_ram_global(MR); 27 - @@ 28 - expression MR; 29 - expression OPS; 30 - expression OPAQUE; 31 - expression NAME; 32 - expression SIZE; 33 - expression ERRP; 34 - @@ 35 - -memory_region_init_rom_device_nomigrate(MR, NULL, OPS, OPAQUE, NAME, SIZE, ERRP); 36 - +memory_region_init_rom_device(MR, NULL, OPS, OPAQUE, NAME, SIZE, ERRP); 37 - ... 38 - -vmstate_register_ram_global(MR);
+10 -7
scsi/qemu-pr-helper.c
··· 421 421 int rq_servact = cdb[1]; 422 422 int rq_scope = cdb[2] >> 4; 423 423 int rq_type = cdb[2] & 0xf; 424 - struct prout_param_descriptor paramp; 424 + g_autofree struct prout_param_descriptor *paramp = NULL; 425 425 char transportids[PR_HELPER_DATA_SIZE]; 426 426 int r; 427 + 428 + paramp = g_malloc0(sizeof(struct prout_param_descriptor) 429 + + sizeof(struct transportid *) * MPATH_MX_TIDS); 427 430 428 431 if (sz < PR_OUT_FIXED_PARAM_SIZE) { 429 432 /* Illegal request, Parameter list length error. This isn't fatal; ··· 454 457 * used by libmpathpersist (which, of course, will immediately 455 458 * do the opposite). 456 459 */ 457 - memset(&paramp, 0, sizeof(paramp)); 458 - memcpy(&paramp.key, &param[0], 8); 459 - memcpy(&paramp.sa_key, &param[8], 8); 460 - paramp.sa_flags = param[20]; 460 + memcpy(&paramp->key, &param[0], 8); 461 + memcpy(&paramp->sa_key, &param[8], 8); 462 + paramp->sa_flags = param[20]; 461 463 if (sz > PR_OUT_FIXED_PARAM_SIZE) { 462 464 size_t transportid_len; 463 465 int i, j; ··· 520 522 return CHECK_CONDITION; 521 523 } 522 524 523 - paramp.trnptid_list[paramp.num_transportid++] = id; 525 + assert(paramp->num_transportid < MPATH_MX_TIDS); 526 + paramp->trnptid_list[paramp->num_transportid++] = id; 524 527 } 525 528 } 526 529 527 530 r = mpath_persistent_reserve_out(fd, rq_servact, rq_scope, rq_type, 528 - &paramp, noisy, verbose); 531 + paramp, noisy, verbose); 529 532 return mpath_reconstruct_sense(fd, r, sense); 530 533 } 531 534 #endif
+16 -10
softmmu/vl.c
··· 3789 3789 */ 3790 3790 loc_set_none(); 3791 3791 3792 + /* 3793 + * Check for -cpu help and -device help before we call select_machine(), 3794 + * which will return an error if the architecture has no default machine 3795 + * type and the user did not specify one, so that the user doesn't need 3796 + * to say '-cpu help -machine something'. 3797 + */ 3798 + if (cpu_option && is_help_option(cpu_option)) { 3799 + list_cpus(cpu_option); 3800 + exit(0); 3801 + } 3802 + 3803 + if (qemu_opts_foreach(qemu_find_opts("device"), 3804 + device_help_func, NULL, NULL)) { 3805 + exit(0); 3806 + } 3807 + 3792 3808 user_register_global_props(); 3793 3809 3794 3810 replay_configure(icount_opts); ··· 3875 3891 3876 3892 if (machine_class->hw_version) { 3877 3893 qemu_set_hw_version(machine_class->hw_version); 3878 - } 3879 - 3880 - if (cpu_option && is_help_option(cpu_option)) { 3881 - list_cpus(cpu_option); 3882 - exit(0); 3883 3894 } 3884 3895 3885 3896 if (!trace_init_backends()) { ··· 4111 4122 qemu_opts_foreach(qemu_find_opts("fsdev"), 4112 4123 fsdev_init_func, NULL, &error_fatal); 4113 4124 #endif 4114 - 4115 - if (qemu_opts_foreach(qemu_find_opts("device"), 4116 - device_help_func, NULL, NULL)) { 4117 - exit(0); 4118 - } 4119 4125 4120 4126 /* 4121 4127 * Note: we need to create block backends before
+3 -19
target/arm/cpu.c
··· 1153 1153 cpu->has_pmu = value; 1154 1154 } 1155 1155 1156 - static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name, 1157 - void *opaque, Error **errp) 1158 - { 1159 - ARMCPU *cpu = ARM_CPU(obj); 1160 - 1161 - visit_type_uint32(v, name, &cpu->init_svtor, errp); 1162 - } 1163 - 1164 - static void arm_set_init_svtor(Object *obj, Visitor *v, const char *name, 1165 - void *opaque, Error **errp) 1166 - { 1167 - ARMCPU *cpu = ARM_CPU(obj); 1168 - 1169 - visit_type_uint32(v, name, &cpu->init_svtor, errp); 1170 - } 1171 - 1172 1156 unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) 1173 1157 { 1174 1158 /* ··· 1288 1272 * a simple DEFINE_PROP_UINT32 for this because we want to permit 1289 1273 * the property to be set after realize. 1290 1274 */ 1291 - object_property_add(obj, "init-svtor", "uint32", 1292 - arm_get_init_svtor, arm_set_init_svtor, 1293 - NULL, NULL, &error_abort); 1275 + object_property_add_uint32_ptr(obj, "init-svtor", 1276 + &cpu->init_svtor, 1277 + OBJ_PROP_FLAG_READWRITE, &error_abort); 1294 1278 } 1295 1279 1296 1280 qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
+2 -31
target/i386/hax-posix.c
··· 108 108 109 109 static char *hax_vm_devfs_string(int vm_id) 110 110 { 111 - char *name; 112 - 113 - if (vm_id > MAX_VM_ID) { 114 - fprintf(stderr, "Too big VM id\n"); 115 - return NULL; 116 - } 117 - 118 - #define HAX_VM_DEVFS "/dev/hax_vm/vmxx" 119 - name = g_strdup(HAX_VM_DEVFS); 120 - if (!name) { 121 - return NULL; 122 - } 123 - 124 - snprintf(name, sizeof HAX_VM_DEVFS, "/dev/hax_vm/vm%02d", vm_id); 125 - return name; 111 + return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id); 126 112 } 127 113 128 114 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id) 129 115 { 130 - char *name; 131 - 132 - if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID) { 133 - fprintf(stderr, "Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id); 134 - return NULL; 135 - } 136 - 137 - #define HAX_VCPU_DEVFS "/dev/hax_vmxx/vcpuxx" 138 - name = g_strdup(HAX_VCPU_DEVFS); 139 - if (!name) { 140 - return NULL; 141 - } 142 - 143 - snprintf(name, sizeof HAX_VCPU_DEVFS, "/dev/hax_vm%02d/vcpu%02d", 144 - vm_id, vcpu_id); 145 - return name; 116 + return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id); 146 117 } 147 118 148 119 int hax_host_create_vm(struct hax_state *hax, int *vmid)
+2 -31
target/i386/hax-windows.c
··· 185 185 186 186 static char *hax_vm_devfs_string(int vm_id) 187 187 { 188 - char *name; 189 - 190 - if (vm_id > MAX_VM_ID) { 191 - fprintf(stderr, "Too big VM id\n"); 192 - return NULL; 193 - } 194 - 195 - #define HAX_VM_DEVFS "\\\\.\\hax_vmxx" 196 - name = g_strdup(HAX_VM_DEVFS); 197 - if (!name) { 198 - return NULL; 199 - } 200 - 201 - snprintf(name, sizeof HAX_VM_DEVFS, "\\\\.\\hax_vm%02d", vm_id); 202 - return name; 188 + return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id); 203 189 } 204 190 205 191 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id) 206 192 { 207 - char *name; 208 - 209 - if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID) { 210 - fprintf(stderr, "Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id); 211 - return NULL; 212 - } 213 - 214 - #define HAX_VCPU_DEVFS "\\\\.\\hax_vmxx_vcpuxx" 215 - name = g_strdup(HAX_VCPU_DEVFS); 216 - if (!name) { 217 - return NULL; 218 - } 219 - 220 - snprintf(name, sizeof HAX_VCPU_DEVFS, "\\\\.\\hax_vm%02d_vcpu%02d", 221 - vm_id, vcpu_id); 222 - return name; 193 + return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id); 223 194 } 224 195 225 196 int hax_host_create_vm(struct hax_state *hax, int *vmid)
+9 -97
target/i386/sev.c
··· 267 267 } 268 268 269 269 static void 270 - qsev_guest_set_handle(Object *obj, Visitor *v, const char *name, 271 - void *opaque, Error **errp) 272 - { 273 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 274 - uint32_t value; 275 - 276 - visit_type_uint32(v, name, &value, errp); 277 - sev->handle = value; 278 - } 279 - 280 - static void 281 - qsev_guest_set_policy(Object *obj, Visitor *v, const char *name, 282 - void *opaque, Error **errp) 283 - { 284 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 285 - uint32_t value; 286 - 287 - visit_type_uint32(v, name, &value, errp); 288 - sev->policy = value; 289 - } 290 - 291 - static void 292 - qsev_guest_set_cbitpos(Object *obj, Visitor *v, const char *name, 293 - void *opaque, Error **errp) 294 - { 295 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 296 - uint32_t value; 297 - 298 - visit_type_uint32(v, name, &value, errp); 299 - sev->cbitpos = value; 300 - } 301 - 302 - static void 303 - qsev_guest_set_reduced_phys_bits(Object *obj, Visitor *v, const char *name, 304 - void *opaque, Error **errp) 305 - { 306 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 307 - uint32_t value; 308 - 309 - visit_type_uint32(v, name, &value, errp); 310 - sev->reduced_phys_bits = value; 311 - } 312 - 313 - static void 314 - qsev_guest_get_policy(Object *obj, Visitor *v, const char *name, 315 - void *opaque, Error **errp) 316 - { 317 - uint32_t value; 318 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 319 - 320 - value = sev->policy; 321 - visit_type_uint32(v, name, &value, errp); 322 - } 323 - 324 - static void 325 - qsev_guest_get_handle(Object *obj, Visitor *v, const char *name, 326 - void *opaque, Error **errp) 327 - { 328 - uint32_t value; 329 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 330 - 331 - value = sev->handle; 332 - visit_type_uint32(v, name, &value, errp); 333 - } 334 - 335 - static void 336 - qsev_guest_get_cbitpos(Object *obj, Visitor *v, const char *name, 337 - void *opaque, Error **errp) 338 - { 339 - uint32_t value; 340 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 341 - 342 - value = sev->cbitpos; 343 - visit_type_uint32(v, name, &value, errp); 344 - } 345 - 346 - static void 347 - qsev_guest_get_reduced_phys_bits(Object *obj, Visitor *v, const char *name, 348 - void *opaque, Error **errp) 349 - { 350 - uint32_t value; 351 - QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 352 - 353 - value = sev->reduced_phys_bits; 354 - visit_type_uint32(v, name, &value, errp); 355 - } 356 - 357 - static void 358 270 qsev_guest_init(Object *obj) 359 271 { 360 272 QSevGuestInfo *sev = QSEV_GUEST_INFO(obj); 361 273 362 274 sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE); 363 275 sev->policy = DEFAULT_GUEST_POLICY; 364 - object_property_add(obj, "policy", "uint32", qsev_guest_get_policy, 365 - qsev_guest_set_policy, NULL, NULL, NULL); 366 - object_property_add(obj, "handle", "uint32", qsev_guest_get_handle, 367 - qsev_guest_set_handle, NULL, NULL, NULL); 368 - object_property_add(obj, "cbitpos", "uint32", qsev_guest_get_cbitpos, 369 - qsev_guest_set_cbitpos, NULL, NULL, NULL); 370 - object_property_add(obj, "reduced-phys-bits", "uint32", 371 - qsev_guest_get_reduced_phys_bits, 372 - qsev_guest_set_reduced_phys_bits, NULL, NULL, NULL); 276 + object_property_add_uint32_ptr(obj, "policy", &sev->policy, 277 + OBJ_PROP_FLAG_READWRITE, NULL); 278 + object_property_add_uint32_ptr(obj, "handle", &sev->handle, 279 + OBJ_PROP_FLAG_READWRITE, NULL); 280 + object_property_add_uint32_ptr(obj, "cbitpos", &sev->cbitpos, 281 + OBJ_PROP_FLAG_READWRITE, NULL); 282 + object_property_add_uint32_ptr(obj, "reduced-phys-bits", 283 + &sev->reduced_phys_bits, 284 + OBJ_PROP_FLAG_READWRITE, NULL); 373 285 } 374 286 375 287 /* sev guest info */
+9
target/i386/whp-dispatch.h
··· 23 23 X(HRESULT, WHvGetVirtualProcessorRegisters, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, const WHV_REGISTER_NAME* RegisterNames, UINT32 RegisterCount, WHV_REGISTER_VALUE* RegisterValues)) \ 24 24 X(HRESULT, WHvSetVirtualProcessorRegisters, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, const WHV_REGISTER_NAME* RegisterNames, UINT32 RegisterCount, const WHV_REGISTER_VALUE* RegisterValues)) \ 25 25 26 + /* 27 + * These are supplemental functions that may not be present 28 + * on all versions and are not critical for basic functionality. 29 + */ 30 + #define LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(X) \ 31 + X(HRESULT, WHvSuspendPartitionTime, (WHV_PARTITION_HANDLE Partition)) \ 26 32 27 33 #define LIST_WINHVEMULATION_FUNCTIONS(X) \ 28 34 X(HRESULT, WHvEmulatorCreateEmulator, (const WHV_EMULATOR_CALLBACKS* Callbacks, WHV_EMULATOR_HANDLE* Emulator)) \ ··· 40 46 /* Define function typedef */ 41 47 LIST_WINHVPLATFORM_FUNCTIONS(WHP_DEFINE_TYPE) 42 48 LIST_WINHVEMULATION_FUNCTIONS(WHP_DEFINE_TYPE) 49 + LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_DEFINE_TYPE) 43 50 44 51 struct WHPDispatch { 45 52 LIST_WINHVPLATFORM_FUNCTIONS(WHP_DECLARE_MEMBER) 46 53 LIST_WINHVEMULATION_FUNCTIONS(WHP_DECLARE_MEMBER) 54 + LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_DECLARE_MEMBER) 47 55 }; 48 56 49 57 extern struct WHPDispatch whp_dispatch; ··· 53 61 typedef enum WHPFunctionList { 54 62 WINHV_PLATFORM_FNS_DEFAULT, 55 63 WINHV_EMULATION_FNS_DEFAULT, 64 + WINHV_PLATFORM_FNS_SUPPLEMENTAL 56 65 } WHPFunctionList; 57 66 58 67 #endif /* WHP_DISPATCH_H */
+118 -42
target/i386/whpx-all.c
··· 114 114 WHvX64RegisterXmmControlStatus, 115 115 116 116 /* X64 MSRs */ 117 - WHvX64RegisterTsc, 118 117 WHvX64RegisterEfer, 119 118 #ifdef TARGET_X86_64 120 119 WHvX64RegisterKernelGsBase, ··· 215 214 return qs; 216 215 } 217 216 218 - static void whpx_set_registers(CPUState *cpu) 217 + static int whpx_set_tsc(CPUState *cpu) 218 + { 219 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 220 + WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; 221 + WHV_REGISTER_VALUE tsc_val; 222 + HRESULT hr; 223 + struct whpx_state *whpx = &whpx_global; 224 + 225 + /* 226 + * Suspend the partition prior to setting the TSC to reduce the variance 227 + * in TSC across vCPUs. When the first vCPU runs post suspend, the 228 + * partition is automatically resumed. 229 + */ 230 + if (whp_dispatch.WHvSuspendPartitionTime) { 231 + 232 + /* 233 + * Unable to suspend partition while setting TSC is not a fatal 234 + * error. It just increases the likelihood of TSC variance between 235 + * vCPUs and some guest OS are able to handle that just fine. 236 + */ 237 + hr = whp_dispatch.WHvSuspendPartitionTime(whpx->partition); 238 + if (FAILED(hr)) { 239 + warn_report("WHPX: Failed to suspend partition, hr=%08lx", hr); 240 + } 241 + } 242 + 243 + tsc_val.Reg64 = env->tsc; 244 + hr = whp_dispatch.WHvSetVirtualProcessorRegisters( 245 + whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val); 246 + if (FAILED(hr)) { 247 + error_report("WHPX: Failed to set TSC, hr=%08lx", hr); 248 + return -1; 249 + } 250 + 251 + return 0; 252 + } 253 + 254 + static void whpx_set_registers(CPUState *cpu, int level) 219 255 { 220 256 struct whpx_state *whpx = &whpx_global; 221 257 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); ··· 230 266 231 267 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 232 268 269 + /* 270 + * Following MSRs have side effects on the guest or are too heavy for 271 + * runtime. Limit them to full state update. 272 + */ 273 + if (level >= WHPX_SET_RESET_STATE) { 274 + whpx_set_tsc(cpu); 275 + } 276 + 233 277 memset(&vcxt, 0, sizeof(struct whpx_register_set)); 234 278 235 279 v86 = (env->eflags & VM_MASK); ··· 330 374 idx += 1; 331 375 332 376 /* MSRs */ 333 - assert(whpx_register_names[idx] == WHvX64RegisterTsc); 334 - vcxt.values[idx++].Reg64 = env->tsc; 335 377 assert(whpx_register_names[idx] == WHvX64RegisterEfer); 336 378 vcxt.values[idx++].Reg64 = env->efer; 337 379 #ifdef TARGET_X86_64 ··· 379 421 return; 380 422 } 381 423 424 + static int whpx_get_tsc(CPUState *cpu) 425 + { 426 + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); 427 + WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc; 428 + WHV_REGISTER_VALUE tsc_val; 429 + HRESULT hr; 430 + struct whpx_state *whpx = &whpx_global; 431 + 432 + hr = whp_dispatch.WHvGetVirtualProcessorRegisters( 433 + whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val); 434 + if (FAILED(hr)) { 435 + error_report("WHPX: Failed to get TSC, hr=%08lx", hr); 436 + return -1; 437 + } 438 + 439 + env->tsc = tsc_val.Reg64; 440 + return 0; 441 + } 442 + 382 443 static void whpx_get_registers(CPUState *cpu) 383 444 { 384 445 struct whpx_state *whpx = &whpx_global; ··· 393 454 int i; 394 455 395 456 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 457 + 458 + if (!env->tsc_valid) { 459 + whpx_get_tsc(cpu); 460 + env->tsc_valid = !runstate_is_running(); 461 + } 396 462 397 463 hr = whp_dispatch.WHvGetVirtualProcessorRegisters( 398 464 whpx->partition, cpu->cpu_index, ··· 492 558 idx += 1; 493 559 494 560 /* MSRs */ 495 - assert(whpx_register_names[idx] == WHvX64RegisterTsc); 496 - env->tsc = vcxt.values[idx++].Reg64; 497 561 assert(whpx_register_names[idx] == WHvX64RegisterEfer); 498 562 env->efer = vcxt.values[idx++].Reg64; 499 563 #ifdef TARGET_X86_64 ··· 841 905 842 906 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 843 907 !(env->hflags & HF_SMM_MASK)) { 844 - 908 + whpx_cpu_synchronize_state(cpu); 845 909 do_cpu_init(x86_cpu); 846 - cpu->vcpu_dirty = true; 847 910 vcpu->interruptable = true; 848 911 } 849 912 ··· 859 922 } 860 923 861 924 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { 862 - if (!cpu->vcpu_dirty) { 863 - whpx_get_registers(cpu); 864 - } 925 + whpx_cpu_synchronize_state(cpu); 865 926 do_cpu_sipi(x86_cpu); 866 927 } 867 928 868 929 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 869 930 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; 870 - if (!cpu->vcpu_dirty) { 871 - whpx_get_registers(cpu); 872 - } 931 + whpx_cpu_synchronize_state(cpu); 873 932 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, 874 933 env->tpr_access_type); 875 934 } ··· 896 955 897 956 do { 898 957 if (cpu->vcpu_dirty) { 899 - whpx_set_registers(cpu); 958 + whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE); 900 959 cpu->vcpu_dirty = false; 901 960 } 902 961 ··· 980 1039 WHV_REGISTER_VALUE reg_values[5]; 981 1040 WHV_REGISTER_NAME reg_names[5]; 982 1041 UINT32 reg_count = 5; 983 - UINT64 rip, rax, rcx, rdx, rbx; 1042 + UINT64 cpuid_fn, rip = 0, rax = 0, rcx = 0, rdx = 0, rbx = 0; 1043 + X86CPU *x86_cpu = X86_CPU(cpu); 1044 + CPUX86State *env = &x86_cpu->env; 984 1045 985 1046 memset(reg_values, 0, sizeof(reg_values)); 986 1047 987 1048 rip = vcpu->exit_ctx.VpContext.Rip + 988 1049 vcpu->exit_ctx.VpContext.InstructionLength; 989 - switch (vcpu->exit_ctx.CpuidAccess.Rax) { 990 - case 1: 991 - rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax; 992 - /* Advertise that we are running on a hypervisor */ 993 - rcx = 994 - vcpu->exit_ctx.CpuidAccess.DefaultResultRcx | 995 - CPUID_EXT_HYPERVISOR; 1050 + cpuid_fn = vcpu->exit_ctx.CpuidAccess.Rax; 996 1051 997 - rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx; 998 - rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx; 999 - break; 1052 + /* 1053 + * Ideally, these should be supplied to the hypervisor during VCPU 1054 + * initialization and it should be able to satisfy this request. 1055 + * But, currently, WHPX doesn't support setting CPUID values in the 1056 + * hypervisor once the partition has been setup, which is too late 1057 + * since VCPUs are realized later. For now, use the values from 1058 + * QEMU to satisfy these requests, until WHPX adds support for 1059 + * being able to set these values in the hypervisor at runtime. 1060 + */ 1061 + cpu_x86_cpuid(env, cpuid_fn, 0, (UINT32 *)&rax, (UINT32 *)&rbx, 1062 + (UINT32 *)&rcx, (UINT32 *)&rdx); 1063 + switch (cpuid_fn) { 1000 1064 case 0x80000001: 1001 - rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax; 1002 1065 /* Remove any support of OSVW */ 1003 - rcx = 1004 - vcpu->exit_ctx.CpuidAccess.DefaultResultRcx & 1005 - ~CPUID_EXT3_OSVW; 1006 - 1007 - rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx; 1008 - rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx; 1066 + rcx &= ~CPUID_EXT3_OSVW; 1009 1067 break; 1010 - default: 1011 - rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax; 1012 - rcx = vcpu->exit_ctx.CpuidAccess.DefaultResultRcx; 1013 - rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx; 1014 - rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx; 1015 1068 } 1016 1069 1017 1070 reg_names[0] = WHvX64RegisterRip; ··· 1067 1120 1068 1121 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) 1069 1122 { 1070 - whpx_get_registers(cpu); 1071 - cpu->vcpu_dirty = true; 1123 + if (!cpu->vcpu_dirty) { 1124 + whpx_get_registers(cpu); 1125 + cpu->vcpu_dirty = true; 1126 + } 1072 1127 } 1073 1128 1074 1129 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu, 1075 1130 run_on_cpu_data arg) 1076 1131 { 1077 - whpx_set_registers(cpu); 1132 + whpx_set_registers(cpu, WHPX_SET_RESET_STATE); 1078 1133 cpu->vcpu_dirty = false; 1079 1134 } 1080 1135 1081 1136 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu, 1082 1137 run_on_cpu_data arg) 1083 1138 { 1084 - whpx_set_registers(cpu); 1139 + whpx_set_registers(cpu, WHPX_SET_FULL_STATE); 1085 1140 cpu->vcpu_dirty = false; 1086 1141 } 1087 1142 ··· 1123 1178 1124 1179 static Error *whpx_migration_blocker; 1125 1180 1181 + static void whpx_cpu_update_state(void *opaque, int running, RunState state) 1182 + { 1183 + CPUX86State *env = opaque; 1184 + 1185 + if (running) { 1186 + env->tsc_valid = false; 1187 + } 1188 + } 1189 + 1126 1190 int whpx_init_vcpu(CPUState *cpu) 1127 1191 { 1128 1192 HRESULT hr; ··· 1178 1242 1179 1243 cpu->vcpu_dirty = true; 1180 1244 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu; 1245 + qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr); 1181 1246 1182 1247 return 0; 1183 1248 } ··· 1367 1432 1368 1433 #define WINHV_PLATFORM_DLL "WinHvPlatform.dll" 1369 1434 #define WINHV_EMULATION_DLL "WinHvEmulation.dll" 1435 + #define WHP_LOAD_FIELD_OPTIONAL(return_type, function_name, signature) \ 1436 + whp_dispatch.function_name = \ 1437 + (function_name ## _t)GetProcAddress(hLib, #function_name); \ 1438 + 1370 1439 #define WHP_LOAD_FIELD(return_type, function_name, signature) \ 1371 1440 whp_dispatch.function_name = \ 1372 1441 (function_name ## _t)GetProcAddress(hLib, #function_name); \ ··· 1393 1462 case WINHV_EMULATION_FNS_DEFAULT: 1394 1463 WHP_LOAD_LIB(WINHV_EMULATION_DLL, hLib) 1395 1464 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD) 1465 + break; 1466 + 1467 + case WINHV_PLATFORM_FNS_SUPPLEMENTAL: 1468 + WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib) 1469 + LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_LOAD_FIELD_OPTIONAL) 1396 1470 break; 1397 1471 } 1398 1472 ··· 1554 1628 goto error; 1555 1629 } 1556 1630 1631 + assert(load_whp_dispatch_fns(&hWinHvPlatform, 1632 + WINHV_PLATFORM_FNS_SUPPLEMENTAL)); 1557 1633 whp_dispatch_initialized = true; 1558 1634 1559 1635 return true;
+1 -1
target/s390x/ioinst.c
··· 347 347 uint16_t len; 348 348 uint16_t code; 349 349 uint32_t param; 350 - char data[0]; 350 + char data[]; 351 351 } QEMU_PACKED ChscResp; 352 352 353 353 #define CHSC_MIN_RESP_LEN 0x0008
+1
tests/docker/dockerfiles/debian-amd64.docker
··· 17 17 libbz2-dev \ 18 18 liblzo2-dev \ 19 19 librdmacm-dev \ 20 + libsasl2-dev \ 20 21 libsnappy-dev \ 21 22 libvte-dev 22 23
+1 -1
tests/qtest/libqos/ahci.h
··· 351 351 typedef struct FIS { 352 352 uint8_t fis_type; 353 353 uint8_t flags; 354 - char data[0]; 354 + char data[]; 355 355 } __attribute__((__packed__)) FIS; 356 356 357 357 /**
+2 -2
ui/console.c
··· 1299 1299 object_property_allow_set_link, 1300 1300 OBJ_PROP_LINK_STRONG, 1301 1301 &error_abort); 1302 - object_property_add_uint32_ptr(obj, "head", 1303 - &s->head, &error_abort); 1302 + object_property_add_uint32_ptr(obj, "head", &s->head, 1303 + OBJ_PROP_FLAG_READ, &error_abort); 1304 1304 1305 1305 if (!active_console || ((active_console->console_type != GRAPHIC_CONSOLE) && 1306 1306 (console_type == GRAPHIC_CONSOLE))) {
+7 -3
ui/curses.c
··· 54 54 }; 55 55 56 56 static DisplayChangeListener *dcl; 57 - static console_ch_t screen[160 * 100]; 57 + static console_ch_t *screen; 58 58 static WINDOW *screenpad = NULL; 59 59 static int width, height, gwidth, gheight, invalidate; 60 60 static int px, py, sminx, sminy, smaxx, smaxy; 61 61 62 62 static const char *font_charset = "CP437"; 63 - static cchar_t vga_to_curses[256]; 63 + static cchar_t *vga_to_curses; 64 64 65 65 static void curses_update(DisplayChangeListener *dcl, 66 66 int x, int y, int w, int h) ··· 405 405 static void curses_atexit(void) 406 406 { 407 407 endwin(); 408 + g_free(vga_to_curses); 409 + g_free(screen); 408 410 } 409 411 410 412 /* ··· 529 531 * Control characters are normally non-printable, but VGA does have 530 532 * well-known glyphs for them. 531 533 */ 532 - static uint16_t control_characters[0x20] = { 534 + static const uint16_t control_characters[0x20] = { 533 535 0x0020, 534 536 0x263a, 535 537 0x263b, ··· 783 785 if (opts->u.curses.charset) { 784 786 font_charset = opts->u.curses.charset; 785 787 } 788 + screen = g_new0(console_ch_t, 160 * 100); 789 + vga_to_curses = g_new0(cchar_t, 256); 786 790 curses_setup(); 787 791 curses_keyboard_setup(); 788 792 atexit(curses_atexit);
+61 -10
util/bufferiszero.c
··· 63 63 } 64 64 } 65 65 66 - #if defined(CONFIG_AVX2_OPT) || defined(__SSE2__) 66 + #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__) 67 67 /* Do not use push_options pragmas unnecessarily, because clang 68 68 * does not support them. 69 69 */ 70 - #ifdef CONFIG_AVX2_OPT 70 + #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 71 71 #pragma GCC push_options 72 72 #pragma GCC target("sse2") 73 73 #endif ··· 104 104 105 105 return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF; 106 106 } 107 - #ifdef CONFIG_AVX2_OPT 107 + #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 108 108 #pragma GCC pop_options 109 109 #endif 110 110 ··· 187 187 #pragma GCC pop_options 188 188 #endif /* CONFIG_AVX2_OPT */ 189 189 190 + #ifdef CONFIG_AVX512F_OPT 191 + #pragma GCC push_options 192 + #pragma GCC target("avx512f") 193 + #include <immintrin.h> 194 + 195 + static bool 196 + buffer_zero_avx512(const void *buf, size_t len) 197 + { 198 + /* Begin with an unaligned head of 64 bytes. */ 199 + __m512i t = _mm512_loadu_si512(buf); 200 + __m512i *p = (__m512i *)(((uintptr_t)buf + 5 * 64) & -64); 201 + __m512i *e = (__m512i *)(((uintptr_t)buf + len) & -64); 202 + 203 + /* Loop over 64-byte aligned blocks of 256. */ 204 + while (p <= e) { 205 + __builtin_prefetch(p); 206 + if (unlikely(_mm512_test_epi64_mask(t, t))) { 207 + return false; 208 + } 209 + t = p[-4] | p[-3] | p[-2] | p[-1]; 210 + p += 4; 211 + } 212 + 213 + t |= _mm512_loadu_si512(buf + len - 4 * 64); 214 + t |= _mm512_loadu_si512(buf + len - 3 * 64); 215 + t |= _mm512_loadu_si512(buf + len - 2 * 64); 216 + t |= _mm512_loadu_si512(buf + len - 1 * 64); 217 + 218 + return !_mm512_test_epi64_mask(t, t); 219 + 220 + } 221 + #pragma GCC pop_options 222 + #endif 223 + 224 + 190 225 /* Note that for test_buffer_is_zero_next_accel, the most preferred 191 226 * ISA must have the least significant bit. 192 227 */ 193 - #define CACHE_AVX2 1 194 - #define CACHE_SSE4 2 195 - #define CACHE_SSE2 4 228 + #define CACHE_AVX512F 1 229 + #define CACHE_AVX2 2 230 + #define CACHE_SSE4 4 231 + #define CACHE_SSE2 8 196 232 197 233 /* Make sure that these variables are appropriately initialized when 198 234 * SSE2 is enabled on the compiler command-line, but the compiler is 199 235 * too old to support CONFIG_AVX2_OPT. 200 236 */ 201 - #ifdef CONFIG_AVX2_OPT 237 + #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 202 238 # define INIT_CACHE 0 203 239 # define INIT_ACCEL buffer_zero_int 204 240 #else ··· 211 247 212 248 static unsigned cpuid_cache = INIT_CACHE; 213 249 static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL; 250 + static int length_to_accel = 64; 214 251 215 252 static void init_accel(unsigned cache) 216 253 { ··· 226 263 fn = buffer_zero_avx2; 227 264 } 228 265 #endif 266 + #ifdef CONFIG_AVX512F_OPT 267 + if (cache & CACHE_AVX512F) { 268 + fn = buffer_zero_avx512; 269 + length_to_accel = 256; 270 + } 271 + #endif 229 272 buffer_accel = fn; 230 273 } 231 274 232 - #ifdef CONFIG_AVX2_OPT 275 + #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 233 276 #include "qemu/cpuid.h" 234 277 235 278 static void __attribute__((constructor)) init_cpuid_cache(void) ··· 252 295 int bv; 253 296 __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); 254 297 __cpuid_count(7, 0, a, b, c, d); 255 - if ((bv & 6) == 6 && (b & bit_AVX2)) { 298 + if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) { 256 299 cache |= CACHE_AVX2; 257 300 } 301 + /* 0xe6: 302 + * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 303 + * and ZMM16-ZMM31 state are enabled by OS) 304 + * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) 305 + */ 306 + if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512F)) { 307 + cache |= CACHE_AVX512F; 308 + } 258 309 } 259 310 } 260 311 cpuid_cache = cache; ··· 277 328 278 329 static bool select_accel_fn(const void *buf, size_t len) 279 330 { 280 - if (likely(len >= 64)) { 331 + if (likely(len >= length_to_accel)) { 281 332 return buffer_accel(buf, len); 282 333 } 283 334 return buffer_zero_int(buf, len);
+14
util/module.c
··· 19 19 #endif 20 20 #include "qemu/queue.h" 21 21 #include "qemu/module.h" 22 + #ifdef CONFIG_MODULE_UPGRADES 23 + #include "qemu-version.h" 24 + #endif 22 25 23 26 typedef struct ModuleEntry 24 27 { ··· 170 173 #ifdef CONFIG_MODULES 171 174 char *fname = NULL; 172 175 char *exec_dir; 176 + #ifdef CONFIG_MODULE_UPGRADES 177 + char *version_dir; 178 + #endif 173 179 const char *search_dir; 174 180 char *dirs[4]; 175 181 char *module_name; ··· 201 207 dirs[n_dirs++] = g_strdup_printf("%s", CONFIG_QEMU_MODDIR); 202 208 dirs[n_dirs++] = g_strdup_printf("%s/..", exec_dir ? : ""); 203 209 dirs[n_dirs++] = g_strdup_printf("%s", exec_dir ? : ""); 210 + 211 + #ifdef CONFIG_MODULE_UPGRADES 212 + version_dir = g_strcanon(g_strdup(QEMU_PKGVERSION), 213 + G_CSET_A_2_Z G_CSET_a_2_z G_CSET_DIGITS "+-.~", 214 + '_'); 215 + dirs[n_dirs++] = g_strdup_printf("/var/run/qemu/%s", version_dir); 216 + #endif 217 + 204 218 assert(n_dirs <= ARRAY_SIZE(dirs)); 205 219 206 220 g_free(exec_dir);
+7
util/oslib-posix.c
··· 466 466 static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, 467 467 int smp_cpus) 468 468 { 469 + static gsize initialized = 0; 469 470 size_t numpages_per_thread, leftover; 470 471 char *addr = area; 471 472 int i = 0; 473 + 474 + if (g_once_init_enter(&initialized)) { 475 + qemu_mutex_init(&page_mutex); 476 + qemu_cond_init(&page_cond); 477 + g_once_init_leave(&initialized, 1); 478 + } 472 479 473 480 memset_thread_failed = false; 474 481 threads_created_flag = false;
+11 -12
util/qemu-timer.c
··· 25 25 #include "qemu/osdep.h" 26 26 #include "qemu/main-loop.h" 27 27 #include "qemu/timer.h" 28 + #include "qemu/lockable.h" 28 29 #include "sysemu/replay.h" 29 30 #include "sysemu/cpus.h" 30 31 ··· 186 187 return false; 187 188 } 188 189 189 - qemu_mutex_lock(&timer_list->active_timers_lock); 190 - if (!timer_list->active_timers) { 191 - qemu_mutex_unlock(&timer_list->active_timers_lock); 192 - return false; 190 + WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { 191 + if (!timer_list->active_timers) { 192 + return false; 193 + } 194 + expire_time = timer_list->active_timers->expire_time; 193 195 } 194 - expire_time = timer_list->active_timers->expire_time; 195 - qemu_mutex_unlock(&timer_list->active_timers_lock); 196 196 197 197 return expire_time <= qemu_clock_get_ns(timer_list->clock->type); 198 198 } ··· 225 225 * value but ->notify_cb() is called when the deadline changes. Therefore 226 226 * the caller should notice the change and there is no race condition. 227 227 */ 228 - qemu_mutex_lock(&timer_list->active_timers_lock); 229 - if (!timer_list->active_timers) { 230 - qemu_mutex_unlock(&timer_list->active_timers_lock); 231 - return -1; 228 + WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { 229 + if (!timer_list->active_timers) { 230 + return -1; 231 + } 232 + expire_time = timer_list->active_timers->expire_time; 232 233 } 233 - expire_time = timer_list->active_timers->expire_time; 234 - qemu_mutex_unlock(&timer_list->active_timers_lock); 235 234 236 235 delta = expire_time - qemu_clock_get_ns(timer_list->clock->type); 237 236