diff --git a/repos/ports/run/virtualbox_auto_disk.run b/repos/ports/run/virtualbox_auto_disk.run index 599c10ad6d..cae91360b1 100644 --- a/repos/ports/run/virtualbox_auto_disk.run +++ b/repos/ports/run/virtualbox_auto_disk.run @@ -70,7 +70,6 @@ source ${genode_dir}/repos/ports/run/virtualbox_auto.inc run_genode_until "ignore resize request to 720x400" 20 run_genode_until "ignore resize request to 640x480" 25 $spawn_id run_genode_until "ignore resize request to 800x600" 35 $spawn_id -run_genode_until "ignore resize request to 800x600" 90 $spawn_id -run_genode_until "ignore resize request to 720x400" 10 $spawn_id +run_genode_until {\[init -\> vbox\].*Guest Additions capability report:.*seamless: yes, hostWindowMapping: no, graphics: yes} 60 $spawn_id puts "\nTest succeeded" diff --git a/repos/ports/src/virtualbox/hwaccm.cc b/repos/ports/src/virtualbox/hwaccm.cc index 95dd9592d1..4c8e9f7b8a 100644 --- a/repos/ports/src/virtualbox/hwaccm.cc +++ b/repos/ports/src/virtualbox/hwaccm.cc @@ -51,6 +51,11 @@ VMMR3_INT_DECL(int) HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat) return VINF_SUCCESS; int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL); + + if (rc == VINF_SUCCESS) { + CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); + } + return rc; } diff --git a/repos/ports/src/virtualbox/nova/sup.cc b/repos/ports/src/virtualbox/nova/sup.cc index 3014724692..7ea7f3d3d6 100644 --- a/repos/ports/src/virtualbox/nova/sup.cc +++ b/repos/ports/src/virtualbox/nova/sup.cc @@ -113,10 +113,8 @@ int SUPR3CallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned case VMMR0_DO_HWACC_ENABLE: return VINF_SUCCESS; - /* XXX only do one of it - either recall or up - not both XXX */ case VMMR0_DO_GVMM_SCHED_POKE: vcpu_handler->recall(); - r0_halt_sem()->up(); return VINF_SUCCESS; default: diff --git a/repos/ports/src/virtualbox/nova/vcpu.h b/repos/ports/src/virtualbox/nova/vcpu.h index f5e4c00ff1..fd6a84459a 100644 --- a/repos/ports/src/virtualbox/nova/vcpu.h +++ b/repos/ports/src/virtualbox/nova/vcpu.h @@ -76,7 +76,8 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher Genode::Cap_connection _cap_connection; Vmm::Vcpu_other_pd _vcpu; - Genode::addr_t _ec_sel = 0; + Genode::addr_t _ec_sel; + bool _irq_win; void fpu_save(char * data) { Assert(!(reinterpret_cast(data) & 0xF)); @@ -88,7 +89,21 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher asm volatile ("fxrstor %0" : : "m" (*data)); } - enum { IRQ_INJ_VALID_MASK = 0x80000000UL }; + enum { + NOVA_REQ_IRQWIN_EXIT = 0x1000U, + IRQ_INJ_VALID_MASK = 0x80000000UL, + IRQ_INJ_NONE = 0U, + + /* + * Intel® 64 and IA-32 Architectures Software Developer’s Manual + * Volume 3C, Chapter 24.4.2. + * May 2012 + */ + BLOCKING_BY_STI = 1U << 0, + BLOCKING_BY_MOV_SS = 1U << 1, + ACTIVITY_STATE_ACTIVE = 0U, + INTERRUPT_STATE_NONE = 0U, + }; protected: @@ -112,45 +127,52 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher } } - __attribute__((noreturn)) void _irq_window(unsigned cond) - { - Nova::Utcb * utcb = reinterpret_cast(Thread_base::utcb()); - - Assert(!(utcb->intr_state & 3)); - Assert(utcb->flags & X86_EFL_IF); - Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK)); - - if (irq_win(utcb)) { - /* reset mtd to not transfer anything back by accident */ - utcb->mtd = 0; - /* inject IRQ */ - inj_event(utcb, _current_vcpu, utcb->flags & X86_EFL_IF); - Nova::reply(_stack_reply); - } - - /* go back to re-compiler */ - longjmp(_env, 1); - } __attribute__((noreturn)) void _default_handler() { Nova::Utcb * utcb = reinterpret_cast(Thread_base::utcb()); + Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE); Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK)); + /* go back to re-compiler */ longjmp(_env, 1); } __attribute__((noreturn)) void _recall_handler() { - /* take care - Mtd::EFL | Mtd::STA are solely written to utcb */ Nova::Utcb * utcb = reinterpret_cast(Thread_base::utcb()); - Assert(!(utcb->intr_state & 3)); + Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE); + Assert(utcb->intr_state == INTERRUPT_STATE_NONE); + if (utcb->inj_info & IRQ_INJ_VALID_MASK) { + Assert(utcb->flags & X86_EFL_IF); + + if (!continue_hw_accelerated(utcb)) + Vmm::printf("WARNING - recall ignored during IRQ delivery\n"); + + /* got recall during irq injection and X86_EFL_IF set for + * delivery of IRQ - just continue */ + Nova::reply(_stack_reply); + } + + /* are we forced to go back to emulation mode ? */ + if (!continue_hw_accelerated(utcb)) + /* go back to emulation mode */ + longjmp(_env, 1); + + /* check whether we have to request irq injection window */ utcb->mtd = 0; - inj_event(utcb, _current_vcpu, utcb->flags & X86_EFL_IF); + if (check_to_request_irq_window(utcb, _current_vcpu)) { + _irq_win = true; + Nova::reply(_stack_reply); + } + /* nothing to do at all - continue hardware accelerated */ + Assert(!_irq_win); + Assert(continue_hw_accelerated(utcb)); + Nova::reply(_stack_reply); } @@ -162,8 +184,8 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher using namespace Nova; using namespace Genode; - Assert(utcb->actv_state == 0); - Assert(!(utcb->intr_state & 3)); + Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE); + Assert(utcb->intr_state == INTERRUPT_STATE_NONE); Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK)); @@ -248,100 +270,55 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher using namespace Nova; - if (utcb->ip != pCtx->rip) { utcb->mtd |= Mtd::EIP; utcb->ip = pCtx->rip; - } - if (utcb->sp != pCtx->rsp) { utcb->mtd |= Mtd::ESP; utcb->sp = pCtx->rsp; - } - if (utcb->ax != pCtx->rax || utcb->bx != pCtx->rbx || - utcb->cx != pCtx->rcx || utcb->dx != pCtx->rdx) - { utcb->mtd |= Mtd::ACDB; utcb->ax = pCtx->rax; utcb->bx = pCtx->rbx; utcb->cx = pCtx->rcx; utcb->dx = pCtx->rdx; - } - if (utcb->bp != pCtx->rbp || utcb->si != pCtx->rsi || - utcb->di != pCtx->rdi) - { utcb->mtd |= Mtd::EBSD; utcb->bp = pCtx->rbp; utcb->si = pCtx->rsi; utcb->di = pCtx->rdi; - } - if (utcb->flags != pCtx->rflags.u) { utcb->mtd |= Mtd::EFL; utcb->flags = pCtx->rflags.u; - } - if (utcb->sysenter_cs != pCtx->SysEnter.cs || - utcb->sysenter_sp != pCtx->SysEnter.esp || - utcb->sysenter_ip != pCtx->SysEnter.eip) - { utcb->mtd |= Mtd::SYS; utcb->sysenter_cs = pCtx->SysEnter.cs; utcb->sysenter_sp = pCtx->SysEnter.esp; utcb->sysenter_ip = pCtx->SysEnter.eip; - } - if (utcb->dr7 != pCtx->dr[7]) { utcb->mtd |= Mtd::DR; utcb->dr7 = pCtx->dr[7]; - } - if (utcb->cr0 != pCtx->cr0) { utcb->mtd |= Mtd::CR; utcb->cr0 = pCtx->cr0; - } - if (utcb->cr2 != pCtx->cr2) { utcb->mtd |= Mtd::CR; utcb->cr2 = pCtx->cr2; - } - if (utcb->cr3 != pCtx->cr3) { utcb->mtd |= Mtd::CR; utcb->cr3 = pCtx->cr3; - } - if (utcb->cr4 != pCtx->cr4) { utcb->mtd |= Mtd::CR; utcb->cr4 = pCtx->cr4; - } - if (utcb->idtr.limit != pCtx->idtr.cbIdt || - utcb->idtr.base != pCtx->idtr.pIdt) - { utcb->mtd |= Mtd::IDTR; utcb->idtr.limit = pCtx->idtr.cbIdt; utcb->idtr.base = pCtx->idtr.pIdt; - } - if (utcb->gdtr.limit != pCtx->gdtr.cbGdt || - utcb->gdtr.base != pCtx->gdtr.pGdt) - { utcb->mtd |= Mtd::GDTR; utcb->gdtr.limit = pCtx->gdtr.cbGdt; utcb->gdtr.base = pCtx->gdtr.pGdt; - } - if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { - if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) { - PERR("intr_state nothing !="); - VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); - utcb->intr_state = 0; - while (1) {} - } - - } + Assert(!(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))); return true; } @@ -400,51 +377,69 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher /* tell rem compiler that FPU register changed XXX optimizations ? */ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); /* redundant ? XXX */ pVCpu->cpum.s.fUseFlags |= (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM); /* redundant ? XXX */ - - if (utcb->intr_state != 0) + + if (utcb->intr_state != 0) { + Assert(utcb->intr_state == BLOCKING_BY_STI || + utcb->intr_state == BLOCKING_BY_MOV_SS); EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); - else + } else VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); return true; } - inline bool inj_event(Nova::Utcb * utcb, PVMCPU pVCpu, bool if_flag) + inline bool check_to_request_irq_window(Nova::Utcb * utcb, PVMCPU pVCpu) { + if (!TRPMHasTrap(pVCpu) && + !VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | + VMCPU_FF_INTERRUPT_PIC))) + return false; + + unsigned vector = 0; + utcb->inj_info = NOVA_REQ_IRQWIN_EXIT | vector; + utcb->mtd |= Nova::Mtd::INJ; + + return true; + } + + + __attribute__((noreturn)) void _irq_window() + { + Nova::Utcb * utcb = reinterpret_cast(Thread_base::utcb()); + + PVMCPU pVCpu = _current_vcpu; + + Assert(utcb->intr_state == INTERRUPT_STATE_NONE); + Assert(utcb->flags & X86_EFL_IF); + Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); + Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK)); + + Assert(_irq_win); + _irq_win = false; + if (!TRPMHasTrap(pVCpu)) { - if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI)) { - PDBG("%u hoho", __LINE__); - while (1) {} - } + bool res = VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); + Assert(!res); - if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))) { + if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | + VMCPU_FF_INTERRUPT_PIC))) { - if (!if_flag) { + uint8_t irq; + int rc = PDMGetInterrupt(pVCpu, &irq); + Assert(RT_SUCCESS(rc)); - unsigned vector = 0; - utcb->inj_info = 0x1000 | vector; - utcb->mtd |= Nova::Mtd::INJ; - - } else - if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { - - uint8_t irq; - int rc = PDMGetInterrupt(pVCpu, &irq); - Assert(RT_SUCCESS(rc)); - - rc = TRPMAssertTrap(pVCpu, irq, TRPM_HARDWARE_INT); - Assert(RT_SUCCESS(rc)); - } else - Vmm::printf("pending interrupt blocked due to INHIBIT flag\n"); + rc = TRPMAssertTrap(pVCpu, irq, TRPM_HARDWARE_INT); + Assert(RT_SUCCESS(rc)); } } - /* can an interrupt be dispatched ? */ - if (!TRPMHasTrap(pVCpu) || !if_flag || - VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) - return false; + /* + * If we have no IRQ for injection, something with requesting the + * IRQ window went wrong. Probably it was forgotten to be reset. + */ + Assert(TRPMHasTrap(pVCpu)); #ifdef VBOX_STRICT if (TRPMHasTrap(pVCpu)) { @@ -480,16 +475,16 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher utcb->inj_info = Event.au64[0]; utcb->inj_error = Event.n.u32ErrorCode; - utcb->mtd |= Nova::Mtd::INJ; /* - Vmm::printf("type:info:vector %x:%x:%x\n", - Event.n.u3Type, utcb->inj_info, u8Vector); + Vmm::printf("type:info:vector %x:%x:%x intr:actv - %x:%x mtd %x\n", + Event.n.u3Type, utcb->inj_info, u8Vector, utcb->intr_state, utcb->actv_state, utcb->mtd); */ - return true; + utcb->mtd = Nova::Mtd::INJ; + Nova::reply(_stack_reply); } - inline bool irq_win(Nova::Utcb * utcb) + inline bool continue_hw_accelerated(Nova::Utcb * utcb) { Assert(!(VMCPU_FF_ISSET(_current_vcpu, VMCPU_FF_INHIBIT_INTERRUPTS))); @@ -501,24 +496,13 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST; - if (VM_FF_ISPENDING(_current_vm, check_vm) - || VMCPU_FF_ISPENDING(_current_vcpu, check_vcpu)) - { - Assert(VM_FF_ISPENDING(_current_vm, VM_FF_HWACCM_TO_R3_MASK) || - VMCPU_FF_ISPENDING(_current_vcpu, - VMCPU_FF_HWACCM_TO_R3_MASK)); + if (!VM_FF_ISPENDING(_current_vm, check_vm) && + !VMCPU_FF_ISPENDING(_current_vcpu, check_vcpu)) + return true; - Assert(!(RT_UNLIKELY(VM_FF_ISPENDING(_current_vm, - VM_FF_PGM_NO_MEMORY)))); + Assert(!(VM_FF_ISPENDING(_current_vm, VM_FF_PGM_NO_MEMORY))); - return false; - } - - /* Is in Realmode ? */ - if (!(utcb->cr0 & X86_CR0_PE)) - return false; - - return true; + return false; } virtual bool hw_load_state(Nova::Utcb *, VM *, PVMCPU) = 0; @@ -545,7 +529,8 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher Vmm::Vcpu_dispatcher(stack_size, _cap_connection, attr ? *attr : 0, start_routine, arg), _vcpu(cpu_session), - _ec_sel(Genode::cap_map()->insert()) + _ec_sel(Genode::cap_map()->insert()), + _irq_win(false) { } void start() { @@ -657,16 +642,14 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher /* take the utcb state prepared during the last exit */ utcb->mtd = next_utcb.mtd; + utcb->inj_info = IRQ_INJ_NONE; utcb->intr_state = next_utcb.intr_state; - utcb->actv_state = 0; /* XXX */ + utcb->actv_state = ACTIVITY_STATE_ACTIVE; utcb->ctrl[0] = next_utcb.ctrl[0]; utcb->ctrl[1] = next_utcb.ctrl[1]; using namespace Nova; - /* check whether to inject interrupts */ - inj_event(utcb, pVCpu, pCtx->rflags.u & X86_EFL_IF); - /* Transfer vCPU state from vBox to Nova format */ if (!vbox_to_utcb(utcb, pVM, pVCpu) || !hw_load_state(utcb, pVM, pVCpu)) { @@ -675,6 +658,9 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher return VERR_INTERNAL_ERROR; } + /* check whether to request interrupt window for injection */ + _irq_win = check_to_request_irq_window(utcb, pVCpu); + /* * Flag vCPU to be "pokeable" by external events such as interrupts * from virtual devices. Only if this flag is set, the @@ -695,7 +681,7 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher /* switch to hardware accelerated mode */ switch_to_hw(pCtx); - Assert(utcb->actv_state == 0); + Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE); _current_vm = 0; _current_vcpu = 0; @@ -716,7 +702,10 @@ class Vcpu_handler : public Vmm::Vcpu_dispatcher } /* reset message transfer descriptor for next invocation */ - next_utcb.mtd = 0; + Assert (!(utcb->inj_info & IRQ_INJ_VALID_MASK)); + /* Reset irq window next time if we are still requesting it */ + next_utcb.mtd = _irq_win ? Mtd::INJ : 0; + next_utcb.intr_state = utcb->intr_state; next_utcb.ctrl[0] = utcb->ctrl[0]; next_utcb.ctrl[1] = utcb->ctrl[1]; diff --git a/repos/ports/src/virtualbox/nova/vcpu_svm.h b/repos/ports/src/virtualbox/nova/vcpu_svm.h index 8bd2ca91ff..ecb63f22e0 100644 --- a/repos/ports/src/virtualbox/nova/vcpu_svm.h +++ b/repos/ports/src/virtualbox/nova/vcpu_svm.h @@ -21,9 +21,7 @@ class Vcpu_handler_svm : public Vcpu_handler __attribute__((noreturn)) void _svm_default() { _default_handler(); } - __attribute__((noreturn)) void _svm_vintr() { - _irq_window(SVM_EXIT_VINTR); - } + __attribute__((noreturn)) void _svm_vintr() { _irq_window(); } __attribute__((noreturn)) void _svm_ioio() { diff --git a/repos/ports/src/virtualbox/nova/vcpu_vmx.h b/repos/ports/src/virtualbox/nova/vcpu_vmx.h index 39969276d1..f54bc2bc76 100644 --- a/repos/ports/src/virtualbox/nova/vcpu_vmx.h +++ b/repos/ports/src/virtualbox/nova/vcpu_vmx.h @@ -48,10 +48,29 @@ class Vcpu_handler_vmx : public Vcpu_handler Genode::Thread_base *myself = Genode::Thread_base::myself(); Utcb *utcb = reinterpret_cast(myself->utcb()); - /* avoid as many as possible VM exits */ + /* configure VM exits to get */ next_utcb.mtd = Nova::Mtd::CTRL; - next_utcb.ctrl[0] = 0; - next_utcb.ctrl[1] = 0; + /* from src/VBox/VMM/VMMR0/HWVMXR0.cpp of virtualbox sources */ + next_utcb.ctrl[0] = VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT | +/* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT | */ + /* we don't support tsc offsetting for now - so let the rdtsc exit */ + VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; + + next_utcb.ctrl[1] = VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC | + VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT | + VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE | + VMX_VMCS_CTRL_PROC_EXEC2_VPID | +/* VMX_VMCS_CTRL_PROC_EXEC2_X2APIC | */ + VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP | + VMX_VMCS_CTRL_PROC_EXEC2_EPT; void *exit_status = _start_routine(_arg); pthread_exit(exit_status); @@ -69,16 +88,29 @@ class Vcpu_handler_vmx : public Vcpu_handler _default_handler(); } - __attribute__((noreturn)) void _vmx_irqwin() - { - _irq_window(VMX_EXIT_IRQ_WINDOW); - } + __attribute__((noreturn)) void _vmx_irqwin() { _irq_window(); } __attribute__((noreturn)) void _vmx_recall() { Vcpu_handler::_recall_handler(); } + __attribute__((noreturn)) void _vmx_invalid() + { + Genode::Thread_base *myself = Genode::Thread_base::myself(); + Nova::Utcb *utcb = reinterpret_cast(myself->utcb()); + + unsigned const dubious = utcb->inj_info | utcb->inj_error | + utcb->intr_state | utcb->actv_state; + if (dubious) + Vmm::printf("%s - dubious - inj_info=0x%x inj_error=%x" + " intr_state=0x%x actv_state=0x%x\n", __func__, + utcb->inj_info, utcb->inj_error, + utcb->intr_state, utcb->actv_state); + + Vcpu_handler::_default_handler(); + } + public: Vcpu_handler_vmx(size_t stack_size, const pthread_attr_t *attr, @@ -102,8 +134,11 @@ class Vcpu_handler_vmx : public Vcpu_handler &This::_vmx_default> (exc_base, Mtd::ALL | Mtd::FPU); register_handler (exc_base, Mtd::ALL | Mtd::FPU); + + /* we don't support tsc offsetting for now - so let the rdtsc exit */ register_handler (exc_base, Mtd::ALL | Mtd::FPU); + register_handler (exc_base, Mtd::ALL | Mtd::FPU); register_handler (exc_base, Mtd::ALL | Mtd::FPU); register_handler (exc_base, Mtd::ALL | Mtd::FPU); +// register_handler (exc_base, Mtd::ALL | Mtd::FPU); + register_handler (exc_base, Mtd::ALL | Mtd::FPU); - register_handler (exc_base, Mtd::ALL | Mtd::FPU); register_handler> (exc_base, Mtd::ALL | Mtd::FPU); register_handler (exc_base, Mtd::ALL | Mtd::FPU); register_handler (exc_base, Mtd::EFL | Mtd::STA); + &This::_vmx_recall> (exc_base, Mtd::ALL | Mtd::FPU); start(); } diff --git a/repos/ports/src/virtualbox/nova/vmx.h b/repos/ports/src/virtualbox/nova/vmx.h index 2976f8fec8..45b66cf248 100644 --- a/repos/ports/src/virtualbox/nova/vmx.h +++ b/repos/ports/src/virtualbox/nova/vmx.h @@ -59,18 +59,6 @@ static inline bool vmx_save_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) enum { VMCS_SEG_UNUSABLE = 0x10000 }; -#define GENODE_WRITE_SELREG_REQUIRED(REG) \ - (utcb->REG.sel != pCtx->REG.Sel) || \ - (utcb->REG.limit != pCtx->REG.u32Limit) || \ - (utcb->REG.base != pCtx->REG.u64Base) || \ - ((( pCtx->REG.Sel \ - || !CPUMIsGuestInPagedProtectedModeEx(pCtx) \ - || (!pCtx->cs.Attr.n.u1DefBig && !CPUMIsGuestIn64BitCodeEx(pCtx))) \ - && pCtx->REG.Attr.n.u1Present == 1) ? \ - utcb->REG.ar != sel_ar_conv_to_nova(pCtx->REG.Attr.u | X86_SEL_TYPE_ACCESSED) : \ - utcb->REG.ar != sel_ar_conv_to_nova(VMCS_SEG_UNUSABLE) \ - ) - #define GENODE_WRITE_SELREG(REG) \ Assert(pCtx->REG.fFlags & CPUMSELREG_FLAGS_VALID); \ Assert(pCtx->REG.ValidSel == pCtx->REG.Sel); \ @@ -93,24 +81,18 @@ static inline bool vmx_load_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) { PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); - if ((GENODE_WRITE_SELREG_REQUIRED(es)) || - (GENODE_WRITE_SELREG_REQUIRED(ds))) { utcb->mtd |= Nova::Mtd::ESDS; GENODE_WRITE_SELREG(es); GENODE_WRITE_SELREG(ds); } - if ((GENODE_WRITE_SELREG_REQUIRED(fs)) || - (GENODE_WRITE_SELREG_REQUIRED(gs))) { utcb->mtd |= Nova::Mtd::FSGS; GENODE_WRITE_SELREG(fs); GENODE_WRITE_SELREG(gs); } - if ((GENODE_WRITE_SELREG_REQUIRED(cs)) || - (GENODE_WRITE_SELREG_REQUIRED(ss))) { utcb->mtd |= Nova::Mtd::CSSS; GENODE_WRITE_SELREG(cs); @@ -119,10 +101,6 @@ static inline bool vmx_load_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) /* ldtr */ if (pCtx->ldtr.Sel == 0) { - if (utcb->ldtr.sel != 0 || - utcb->ldtr.limit != 0 || - utcb->ldtr.base != 0 || - utcb->ldtr.ar != sel_ar_conv_to_nova(0x82)) { utcb->mtd |= Nova::Mtd::LDTR; @@ -132,10 +110,6 @@ static inline bool vmx_load_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) utcb->ldtr.ar = sel_ar_conv_to_nova(0x82); } } else { - if (utcb->ldtr.sel != pCtx->ldtr.Sel || - utcb->ldtr.limit != pCtx->ldtr.u32Limit || - utcb->ldtr.base != pCtx->ldtr.u64Base || - utcb->ldtr.ar != sel_ar_conv_to_nova(pCtx->ldtr.Attr.u)) { utcb->mtd |= Nova::Mtd::LDTR; @@ -150,10 +124,6 @@ static inline bool vmx_load_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) Assert(pCtx->tr.Attr.u & X86_SEL_TYPE_SYS_TSS_BUSY_MASK); Assert(!CPUMIsGuestInRealModeEx(pCtx)); - if (utcb->tr.sel != pCtx->tr.Sel || - utcb->tr.limit != pCtx->tr.u32Limit || - utcb->tr.base != pCtx->tr.u64Base || - utcb->tr.ar != sel_ar_conv_to_nova(pCtx->tr.Attr.u)) { utcb->mtd |= Nova::Mtd::TR; @@ -167,6 +137,5 @@ static inline bool vmx_load_state(Nova::Utcb * utcb, VM * pVM, PVMCPU pVCpu) } #undef GENODE_WRITE_SELREG -#undef GENODE_WRITE_SELREG_REQUIRED #endif /* _GENODE_VIRTUALBOX_VMX__H_ */