qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

i386: hvf: Don't duplicate register reset

hvf_reset_vcpu() duplicates actions performed by x86_cpu_reset(). The
difference is that hvf_reset_vcpu() stores initial values directly to
VMCS while x86_cpu_reset() stores it in CPUX86State and then
cpu_synchronize_all_post_init() or cpu_synchronize_all_post_reset()
flushes CPUX86State into VMCS. That makes hvf_reset_vcpu() a kind of
no-op.

Here's the trace of CPU state modifications during VM start:
hvf_reset_vcpu (resets VMCS)
cpu_synchronize_all_post_init (overwrites VMCS fields written by
hvf_reset_vcpu())
cpu_synchronize_all_states
hvf_reset_vcpu (resets VMCS)
cpu_synchronize_all_post_reset (overwrites VMCS fields written by
hvf_reset_vcpu())

General purpose registers, system registers, segment descriptors, flags
and IP are set by hvf_put_segments() in post-init and post-reset,
therefore it's safe to remove them from hvf_reset_vcpu().

PDPTE initialization can be dropped because Intel SDM (26.3.1.6 Checks
on Guest Page-Directory-Pointer-Table Entries) doesn't require PDPTE to
be clear unless PAE is used: "A VM entry to a guest that does not use
PAE paging does not check the validity of any PDPTEs."
And if PAE is used, PDPTE's are initialized from CR3 in macvm_set_cr0().

Cc: Cameron Esfahani <dirty@apple.com>
Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
Message-Id: <20200630102824.77604-8-r.bolshakov@yadro.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Roman Bolshakov and committed by
Paolo Bonzini
5009ef22 82695a1b

-93
-1
include/sysemu/hvf.h
··· 30 30 void hvf_cpu_synchronize_post_init(CPUState *); 31 31 void hvf_cpu_synchronize_pre_loadvm(CPUState *); 32 32 void hvf_vcpu_destroy(CPUState *); 33 - void hvf_reset_vcpu(CPUState *); 34 33 35 34 #define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf") 36 35
-3
target/i386/cpu.c
··· 6080 6080 if (kvm_enabled()) { 6081 6081 kvm_arch_reset_vcpu(cpu); 6082 6082 } 6083 - else if (hvf_enabled()) { 6084 - hvf_reset_vcpu(s); 6085 - } 6086 6083 #endif 6087 6084 } 6088 6085
-89
target/i386/hvf/hvf.c
··· 452 452 .log_sync = hvf_log_sync, 453 453 }; 454 454 455 - void hvf_reset_vcpu(CPUState *cpu) { 456 - uint64_t pdpte[4] = {0, 0, 0, 0}; 457 - int i; 458 - 459 - /* TODO: this shouldn't be needed; there is already a call to 460 - * cpu_synchronize_all_post_reset in vl.c 461 - */ 462 - wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0); 463 - 464 - /* Initialize PDPTE */ 465 - for (i = 0; i < 4; i++) { 466 - wvmcs(cpu->hvf_fd, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); 467 - } 468 - 469 - macvm_set_cr0(cpu->hvf_fd, 0x60000010); 470 - 471 - wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK); 472 - wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0); 473 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK); 474 - 475 - /* set VMCS guest state fields */ 476 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000); 477 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff); 478 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b); 479 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000); 480 - 481 - wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0); 482 - wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff); 483 - wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93); 484 - wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0); 485 - 486 - wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0); 487 - wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff); 488 - wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93); 489 - wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0); 490 - 491 - wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0); 492 - wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff); 493 - wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93); 494 - wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0); 495 - 496 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0); 497 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff); 498 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93); 499 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0); 500 - 501 - wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0); 502 - wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff); 503 - wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93); 504 - wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0); 505 - 506 - wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0); 507 - wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0); 508 - wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000); 509 - wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0); 510 - 511 - wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0); 512 - wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0); 513 - wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83); 514 - wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0); 515 - 516 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0); 517 - wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0); 518 - 519 - wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0); 520 - wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0); 521 - 522 - /*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/ 523 - wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0); 524 - 525 - wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0); 526 - wreg(cpu->hvf_fd, HV_X86_RDX, 0x623); 527 - wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2); 528 - wreg(cpu->hvf_fd, HV_X86_RSP, 0x0); 529 - wreg(cpu->hvf_fd, HV_X86_RAX, 0x0); 530 - wreg(cpu->hvf_fd, HV_X86_RBX, 0x0); 531 - wreg(cpu->hvf_fd, HV_X86_RCX, 0x0); 532 - wreg(cpu->hvf_fd, HV_X86_RSI, 0x0); 533 - wreg(cpu->hvf_fd, HV_X86_RDI, 0x0); 534 - wreg(cpu->hvf_fd, HV_X86_RBP, 0x0); 535 - 536 - for (int i = 0; i < 8; i++) { 537 - wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0); 538 - } 539 - 540 - hv_vcpu_invalidate_tlb(cpu->hvf_fd); 541 - hv_vcpu_flush(cpu->hvf_fd); 542 - } 543 - 544 455 void hvf_vcpu_destroy(CPUState *cpu) 545 456 { 546 457 X86CPU *x86_cpu = X86_CPU(cpu);