VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp@ 99984

Last change on this file since 99984 was 99984, checked in by vboxsync, 2 years ago

VMM/IEM: Fixed a few places in IEMAllCImpl.cpp and IEMAllCImplSvmInstr.cpp where decoder state was used directly instead of being passed as arguments. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 68.3 KB
Line 
1/* $Id: IEMAllCImplSvmInstr.cpp 99984 2023-05-26 01:20:46Z vboxsync $ */
2/** @file
3 * IEM - AMD-V (Secure Virtual Machine) instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_SVM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/hm.h>
40#include <VBox/vmm/pgm.h>
41#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
42# include <VBox/vmm/hm_svm.h>
43#endif
44#include <VBox/vmm/gim.h>
45#include <VBox/vmm/tm.h>
46#include "IEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/log.h>
49#include <VBox/disopcode-x86-amd64.h> /* for OP_VMMCALL */
50#include <VBox/err.h>
51#include <VBox/param.h>
52#include <iprt/assert.h>
53#include <iprt/string.h>
54#include <iprt/x86.h>
55
56#include "IEMInline.h"
57
58#ifdef VBOX_WITH_NESTED_HWVIRT_SVM /* Almost the whole file. */
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64/**
65 * Check the common SVM instruction preconditions.
66 */
67# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
68 do { \
69 if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \
70 { \
71 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
72 return iemRaiseUndefinedOpcode(a_pVCpu); \
73 } \
74 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
75 { \
76 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
77 return iemRaiseUndefinedOpcode(a_pVCpu); \
78 } \
79 if ((a_pVCpu)->iem.s.uCpl != 0) \
80 { \
81 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
82 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
83 } \
84 } while (0)
85
86
87/**
88 * Converts an IEM exception event type to an SVM event type.
89 *
90 * @returns The SVM event type.
91 * @retval UINT8_MAX if the specified type of event isn't among the set
92 * of recognized IEM event types.
93 *
94 * @param uVector The vector of the event.
95 * @param fIemXcptFlags The IEM exception / interrupt flags.
96 */
97IEM_STATIC uint8_t iemGetSvmEventType(uint32_t uVector, uint32_t fIemXcptFlags)
98{
99 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
100 {
101 if (uVector != X86_XCPT_NMI)
102 return SVM_EVENT_EXCEPTION;
103 return SVM_EVENT_NMI;
104 }
105
106 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */
107 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
108 return SVM_EVENT_EXCEPTION;
109
110 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
111 return SVM_EVENT_EXTERNAL_IRQ;
112
113 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
114 return SVM_EVENT_SOFTWARE_INT;
115
116 AssertMsgFailed(("iemGetSvmEventType: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
117 return UINT8_MAX;
118}
119
120
121/**
122 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and IEM internals.
123 *
124 * @returns Strict VBox status code from PGMChangeMode.
125 * @param pVCpu The cross context virtual CPU structure.
126 */
127DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu)
128{
129 /*
130 * Inform PGM about paging mode changes.
131 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
132 * see comment in iemMemPageTranslateAndCheckAccess().
133 */
134 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
135 true /* fForce */);
136 AssertRCReturn(rc, rc);
137
138 /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
139 IEMTlbInvalidateAll(pVCpu);
140
141 /* Inform CPUM (recompiler), can later be removed. */
142 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
143
144 /* Re-initialize IEM cache/state after the drastic mode switch. */
145 iemReInitExec(pVCpu);
146 return rc;
147}
148
149
150/**
151 * SVM \#VMEXIT handler.
152 *
153 * @returns Strict VBox status code.
154 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
155 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
156 * "host state" and a shutdown is required.
157 *
158 * @param pVCpu The cross context virtual CPU structure.
159 * @param uExitCode The exit code.
160 * @param uExitInfo1 The exit info. 1 field.
161 * @param uExitInfo2 The exit info. 2 field.
162 */
163VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT
164{
165 VBOXSTRICTRC rcStrict;
166 if ( CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
167 || uExitCode == SVM_EXIT_INVALID)
168 {
169 Log2(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n",
170 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
171
172 /*
173 * Disable the global-interrupt flag to prevent interrupts during the 'atomic' world switch.
174 */
175 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
176
177 /*
178 * Map the nested-guest VMCB from its location in guest memory.
179 * Write exactly what the CPU does on #VMEXIT thereby preserving most other bits in the
180 * guest's VMCB in memory, see @bugref{7243#c113} and related comment on iemSvmVmrun().
181 */
182 PSVMVMCB pVmcbMem;
183 PGMPAGEMAPLOCK PgLockMem;
184 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
185 rcStrict = iemMemPageMap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem,
186 &PgLockMem);
187 if (rcStrict == VINF_SUCCESS)
188 {
189 /*
190 * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
191 * would have modified some VMCB state) that might need to be restored on #VMEXIT before
192 * writing the VMCB back to guest memory.
193 */
194 HMNotifySvmNstGstVmexit(pVCpu, IEM_GET_CTX(pVCpu));
195
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
200
201 /*
202 * Save the nested-guest state into the VMCB state-save area.
203 */
204 PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest;
205 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, ES, es);
206 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, CS, cs);
207 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, SS, ss);
208 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, DS, ds);
209 pVmcbMemState->GDTR.u32Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
210 pVmcbMemState->GDTR.u64Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
211 pVmcbMemState->IDTR.u32Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
212 pVmcbMemState->IDTR.u64Base = pVCpu->cpum.GstCtx.idtr.pIdt;
213 pVmcbMemState->u64EFER = pVCpu->cpum.GstCtx.msrEFER;
214 pVmcbMemState->u64CR4 = pVCpu->cpum.GstCtx.cr4;
215 pVmcbMemState->u64CR3 = pVCpu->cpum.GstCtx.cr3;
216 pVmcbMemState->u64CR2 = pVCpu->cpum.GstCtx.cr2;
217 pVmcbMemState->u64CR0 = pVCpu->cpum.GstCtx.cr0;
218 /** @todo Nested paging. */
219 pVmcbMemState->u64RFlags = pVCpu->cpum.GstCtx.rflags.u;
220 pVmcbMemState->u64RIP = pVCpu->cpum.GstCtx.rip;
221 pVmcbMemState->u64RSP = pVCpu->cpum.GstCtx.rsp;
222 pVmcbMemState->u64RAX = pVCpu->cpum.GstCtx.rax;
223 pVmcbMemState->u64DR7 = pVCpu->cpum.GstCtx.dr[7];
224 pVmcbMemState->u64DR6 = pVCpu->cpum.GstCtx.dr[6];
225 pVmcbMemState->u8CPL = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
226 Assert(CPUMGetGuestCPL(pVCpu) == pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl);
227 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, IEM_GET_CTX(pVCpu)))
228 pVmcbMemState->u64PAT = pVCpu->cpum.GstCtx.msrPAT;
229
230 /*
231 * Save additional state and intercept information.
232 *
233 * - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
234 * - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted
235 * SVM execution.
236 * - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
237 */
238 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
239 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */
240 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
241 else
242 {
243 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
244 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
245 }
246
247 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */
248
249 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) /* Interrupt shadow. */
250 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
251 else
252 {
253 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
254 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip));
255 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
256 }
257
258 /*
259 * Save nRIP, instruction length and byte fields.
260 */
261 pVmcbMemCtrl->u64NextRIP = pVmcbCtrl->u64NextRIP;
262 pVmcbMemCtrl->cbInstrFetched = pVmcbCtrl->cbInstrFetched;
263 memcpy(&pVmcbMemCtrl->abInstr[0], &pVmcbCtrl->abInstr[0], sizeof(pVmcbMemCtrl->abInstr));
264
265 /*
266 * Save exit information.
267 */
268 pVmcbMemCtrl->u64ExitCode = uExitCode;
269 pVmcbMemCtrl->u64ExitInfo1 = uExitInfo1;
270 pVmcbMemCtrl->u64ExitInfo2 = uExitInfo2;
271
272 /*
273 * Update the exit interrupt-information field if this #VMEXIT happened as a result
274 * of delivering an event through IEM.
275 *
276 * Don't update the exit interrupt-information field if the event wasn't being injected
277 * through IEM, as it would have been updated by real hardware if the nested-guest was
278 * executed using hardware-assisted SVM.
279 */
280 {
281 uint8_t uExitIntVector;
282 uint32_t uExitIntErr;
283 uint32_t fExitIntFlags;
284 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
285 NULL /* uExitIntCr2 */);
286 if (fRaisingEvent)
287 {
288 pVmcbCtrl->ExitIntInfo.n.u1Valid = 1;
289 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
290 pVmcbCtrl->ExitIntInfo.n.u3Type = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
291 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
292 {
293 pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
294 pVmcbCtrl->ExitIntInfo.n.u32ErrorCode = uExitIntErr;
295 }
296 }
297 }
298
299 /*
300 * Save the exit interrupt-information field.
301 *
302 * We write the whole field including overwriting reserved bits as it was observed on an
303 * AMD Ryzen 5 Pro 1500 that the CPU does not preserve reserved bits in EXITINTINFO.
304 */
305 pVmcbMemCtrl->ExitIntInfo = pVmcbCtrl->ExitIntInfo;
306
307 /*
308 * Clear event injection.
309 */
310 pVmcbMemCtrl->EventInject.n.u1Valid = 0;
311
312 iemMemPageUnmap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);
313 }
314
315 /*
316 * Prepare for guest's "host mode" by clearing internal processor state bits.
317 *
318 * We don't need to zero out the state-save area, just the controls should be
319 * sufficient because it has the critical bit of indicating whether we're inside
320 * the nested-guest or not.
321 */
322 memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
323 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
324
325 /*
326 * Restore the subset of the inhibit flags that were preserved.
327 */
328 pVCpu->cpum.GstCtx.eflags.uBoth |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit;
329
330 if (rcStrict == VINF_SUCCESS)
331 {
332 /** @todo Nested paging. */
333 /** @todo ASID. */
334
335 /*
336 * If we are switching to PAE mode host, validate the PDPEs first.
337 * Any invalid PDPEs here causes a VCPU shutdown.
338 */
339 PCSVMHOSTSTATE pHostState = &pVCpu->cpum.GstCtx.hwvirt.svm.HostState;
340 bool const fHostInPaeMode = CPUMIsPaePagingEnabled(pHostState->uCr0, pHostState->uCr4, pHostState->uEferMsr);
341 if (fHostInPaeMode)
342 rcStrict = PGMGstMapPaePdpesAtCr3(pVCpu, pHostState->uCr3);
343 if (RT_SUCCESS(rcStrict))
344 {
345 /*
346 * Reload the host state.
347 */
348 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
349
350 /*
351 * Update PGM, IEM and others of a world-switch.
352 */
353 rcStrict = iemSvmWorldSwitch(pVCpu);
354 if (rcStrict == VINF_SUCCESS)
355 rcStrict = VINF_SVM_VMEXIT;
356 else if (RT_SUCCESS(rcStrict))
357 {
358 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
359 iemSetPassUpStatus(pVCpu, rcStrict);
360 rcStrict = VINF_SVM_VMEXIT;
361 }
362 else
363 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
364 }
365 else
366 {
367 Log(("iemSvmVmexit: PAE PDPEs invalid while restoring host state. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
368 rcStrict = VINF_EM_TRIPLE_FAULT;
369 }
370 }
371 else
372 {
373 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
374 rcStrict = VINF_EM_TRIPLE_FAULT;
375 }
376 }
377 else
378 {
379 AssertMsgFailed(("iemSvmVmexit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2));
380 rcStrict = VERR_SVM_IPE_3;
381 }
382
383# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
384 /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
385 if ( HMIsEnabled(pVCpu->CTX_SUFF(pVM))
386 && HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)))
387 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
388# endif
389 return rcStrict;
390}
391
392
393/**
394 * Interface for HM and EM to emulate \#VMEXIT.
395 *
396 * @returns Strict VBox status code.
397 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
398 * @param uExitCode The exit code.
399 * @param uExitInfo1 The exit info. 1 field.
400 * @param uExitInfo2 The exit info. 2 field.
401 * @thread EMT(pVCpu)
402 */
403VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
404{
405 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
406 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
407 if (pVCpu->iem.s.cActiveMappings)
408 iemMemRollback(pVCpu);
409 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
410}
411
412
413/**
414 * Performs the operations necessary that are part of the vmrun instruction
415 * execution in the guest.
416 *
417 * @returns Strict VBox status code (i.e. informational status codes too).
418 * @retval VINF_SUCCESS successfully executed VMRUN and entered nested-guest
419 * code execution.
420 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
421 * (SVM_EXIT_INVALID most likely).
422 *
423 * @param pVCpu The cross context virtual CPU structure.
424 * @param cbInstr The length of the VMRUN instruction.
425 * @param GCPhysVmcb Guest physical address of the VMCB to run.
426 */
427static VBOXSTRICTRC iemSvmVmrun(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) RT_NOEXCEPT
428{
429 LogFlow(("iemSvmVmrun\n"));
430
431 /*
432 * Cache the physical address of the VMCB for #VMEXIT exceptions.
433 */
434 pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
435
436 /*
437 * Save the host state.
438 */
439 CPUMSvmVmRunSaveHostState(IEM_GET_CTX(pVCpu), cbInstr);
440
441 /*
442 * Read the guest VMCB.
443 */
444 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
445 int rc = PGMPhysSimpleReadGCPhys(pVM, &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb, GCPhysVmcb, sizeof(SVMVMCB));
446 if (RT_SUCCESS(rc))
447 {
448 /*
449 * AMD-V seems to preserve reserved fields and only writes back selected, recognized
450 * fields on #VMEXIT. However, not all reserved bits are preserved (e.g, EXITINTINFO)
451 * but in our implementation we try to preserve as much as we possibly can.
452 *
453 * We could read the entire page here and only write back the relevant fields on
454 * #VMEXIT but since our internal VMCB is also being used by HM during hardware-assisted
455 * SVM execution, it creates a potential for a nested-hypervisor to set bits that are
456 * currently reserved but may be recognized as features bits in future CPUs causing
457 * unexpected & undesired results. Hence, we zero out unrecognized fields here as we
458 * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}.
459 */
460 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
461 PSVMVMCBSTATESAVE pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.guest;
462
463 RT_ZERO(pVmcbCtrl->u8Reserved0);
464 RT_ZERO(pVmcbCtrl->u8Reserved1);
465 RT_ZERO(pVmcbCtrl->u8Reserved2);
466 RT_ZERO(pVmcbNstGst->u8Reserved0);
467 RT_ZERO(pVmcbNstGst->u8Reserved1);
468 RT_ZERO(pVmcbNstGst->u8Reserved2);
469 RT_ZERO(pVmcbNstGst->u8Reserved3);
470 RT_ZERO(pVmcbNstGst->u8Reserved4);
471 RT_ZERO(pVmcbNstGst->u8Reserved5);
472 pVmcbCtrl->u32Reserved0 = 0;
473 pVmcbCtrl->TLBCtrl.n.u24Reserved = 0;
474 pVmcbCtrl->IntCtrl.n.u6Reserved = 0;
475 pVmcbCtrl->IntCtrl.n.u3Reserved = 0;
476 pVmcbCtrl->IntCtrl.n.u5Reserved = 0;
477 pVmcbCtrl->IntCtrl.n.u24Reserved = 0;
478 pVmcbCtrl->IntShadow.n.u30Reserved = 0;
479 pVmcbCtrl->ExitIntInfo.n.u19Reserved = 0;
480 pVmcbCtrl->NestedPagingCtrl.n.u29Reserved = 0;
481 pVmcbCtrl->EventInject.n.u19Reserved = 0;
482 pVmcbCtrl->LbrVirt.n.u30Reserved = 0;
483
484 /*
485 * Validate guest-state and controls.
486 */
487 /* VMRUN must always be intercepted. */
488 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_VMRUN))
489 {
490 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
491 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
492 }
493
494 /* Nested paging. */
495 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
496 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
497 {
498 Log(("iemSvmVmrun: Nested paging not supported -> Disabling\n"));
499 pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = 0;
500 }
501
502 /* AVIC. */
503 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
504 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
505 {
506 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
507 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
508 }
509
510 /* Last branch record (LBR) virtualization. */
511 if ( pVmcbCtrl->LbrVirt.n.u1LbrVirt
512 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
513 {
514 Log(("iemSvmVmrun: LBR virtualization not supported -> Disabling\n"));
515 pVmcbCtrl->LbrVirt.n.u1LbrVirt = 0;
516 }
517
518 /* Virtualized VMSAVE/VMLOAD. */
519 if ( pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload
520 && !pVM->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
521 {
522 Log(("iemSvmVmrun: Virtualized VMSAVE/VMLOAD not supported -> Disabling\n"));
523 pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = 0;
524 }
525
526 /* Virtual GIF. */
527 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable
528 && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
529 {
530 Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n"));
531 pVmcbCtrl->IntCtrl.n.u1VGifEnable = 0;
532 }
533
534 /* Guest ASID. */
535 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
536 {
537 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
538 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
539 }
540
541 /* Guest AVIC. */
542 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
543 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
544 {
545 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
546 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
547 }
548
549 /* Guest Secure Encrypted Virtualization. */
550 if ( ( pVmcbCtrl->NestedPagingCtrl.n.u1Sev
551 || pVmcbCtrl->NestedPagingCtrl.n.u1SevEs)
552 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
553 {
554 Log(("iemSvmVmrun: SEV not supported -> Disabling\n"));
555 pVmcbCtrl->NestedPagingCtrl.n.u1Sev = 0;
556 pVmcbCtrl->NestedPagingCtrl.n.u1SevEs = 0;
557 }
558
559 /* Flush by ASID. */
560 if ( !pVM->cpum.ro.GuestFeatures.fSvmFlusbByAsid
561 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING
562 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_ENTIRE)
563 {
564 Log(("iemSvmVmrun: Flush-by-ASID not supported -> #VMEXIT\n"));
565 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
566 }
567
568 /* IO permission bitmap. */
569 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
570 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
571 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
572 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
573 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
574 {
575 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
576 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
577 }
578
579 /* MSR permission bitmap. */
580 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
581 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
582 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
583 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
584 {
585 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
586 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
587 }
588
589 /* CR0. */
590 if ( !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
591 && (pVmcbNstGst->u64CR0 & X86_CR0_NW))
592 {
593 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
594 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
595 }
596 if (pVmcbNstGst->u64CR0 >> 32)
597 {
598 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
599 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
600 }
601 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
602
603 /* DR6 and DR7. */
604 if ( pVmcbNstGst->u64DR6 >> 32
605 || pVmcbNstGst->u64DR7 >> 32)
606 {
607 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
608 pVmcbNstGst->u64DR6));
609 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
610 }
611
612 /*
613 * PAT (Page Attribute Table) MSR.
614 *
615 * The CPU only validates and loads it when nested-paging is enabled.
616 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
617 */
618 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
619 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
620 {
621 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
622 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
623 }
624
625 /*
626 * Copy the IO permission bitmap into the cache.
627 */
628 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap) == SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
629 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, GCPhysIOBitmap,
630 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap));
631 if (RT_FAILURE(rc))
632 {
633 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
634 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
635 }
636
637 /*
638 * Copy the MSR permission bitmap into the cache.
639 */
640 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap) == SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
641 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, GCPhysMsrBitmap,
642 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap));
643 if (RT_FAILURE(rc))
644 {
645 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
646 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
647 }
648
649 /*
650 * Copy segments from nested-guest VMCB state to the guest-CPU state.
651 *
652 * We do this here as we need to use the CS attributes and it's easier this way
653 * then using the VMCB format selectors. It doesn't really matter where we copy
654 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
655 */
656 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, ES, es);
657 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, CS, cs);
658 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, SS, ss);
659 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, DS, ds);
660
661 /** @todo Segment attribute overrides by VMRUN. */
662
663 /*
664 * CPL adjustments and overrides.
665 *
666 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
667 * We shall thus adjust both CS.DPL and SS.DPL here.
668 */
669 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
670 if (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(pVCpu)))
671 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 3;
672 if (CPUMIsGuestInRealModeEx(IEM_GET_CTX(pVCpu)))
673 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 0;
674 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
675
676 /*
677 * Continue validating guest-state and controls.
678 *
679 * We pass CR0 as 0 to CPUMIsGuestEferMsrWriteValid() below to skip the illegal
680 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
681 * old and new EFER value to not have any guest EFER bits influence the new
682 * nested-guest EFER.
683 */
684 uint64_t uValidEfer;
685 rc = CPUMIsGuestEferMsrWriteValid(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
686 if (RT_FAILURE(rc))
687 {
688 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
689 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
690 }
691
692 /* Validate paging and CPU mode bits. */
693 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
694 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
695 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
696 bool const fPaging = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
697 bool const fPae = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
698 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
699 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
700 bool const fLongModeConformCS = pVCpu->cpum.GstCtx.cs.Attr.n.u1Long && pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig;
701 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
702 if (fLongModeWithPaging)
703 uValidEfer |= MSR_K6_EFER_LMA;
704 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
705 if ( !fSvm
706 || (!fLongModeSupported && fLongModeActiveOrEnabled)
707 || (fLongModeWithPaging && !fPae)
708 || (fLongModeWithPaging && !fProtMode)
709 || ( fLongModeEnabled
710 && fPaging
711 && fPae
712 && fLongModeConformCS))
713 {
714 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
715 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
716 }
717
718 /*
719 * Preserve the required force-flags.
720 *
721 * We only preserve the force-flags that would affect the execution of the
722 * nested-guest (or the guest).
723 *
724 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
725 * execution of a subsequent IRET instruction in the guest.
726 *
727 * The remaining FFs (e.g. timers) can stay in place so that we will be able to
728 * generate interrupts that should cause #VMEXITs for the nested-guest.
729 *
730 * VMRUN has implicit GIF (Global Interrupt Flag) handling, we don't need to
731 * preserve VMCPU_FF_INHIBIT_INTERRUPTS.
732 */
733 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_INHIBIT_NMI;
734 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_NMI;
735
736 /*
737 * Pause filter.
738 */
739 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilter)
740 {
741 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount;
742 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilterThreshold)
743 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount;
744 }
745
746 /*
747 * Interrupt shadow.
748 */
749 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
750 {
751 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
752 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
753 CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcbNstGst->u64RIP);
754 }
755
756 /*
757 * TLB flush control.
758 * Currently disabled since it's redundant as we unconditionally flush the TLB
759 * in iemSvmWorldSwitch() below.
760 */
761# if 0
762 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
763 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
764 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
765 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
766 PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
767# endif
768
769 /*
770 * Validate and map PAE PDPEs if the guest will be using PAE paging.
771 * Invalid PAE PDPEs here causes a #VMEXIT.
772 */
773 if ( !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
774 && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer))
775 {
776 rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3);
777 if (RT_SUCCESS(rc))
778 { /* likely */ }
779 else
780 {
781 Log(("iemSvmVmrun: PAE PDPEs invalid -> #VMEXIT\n"));
782 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
783 }
784 }
785
786 /*
787 * Copy the remaining guest state from the VMCB to the guest-CPU context.
788 */
789 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
790 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;
791 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
792 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcbNstGst->IDTR.u64Base;
793 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0);
794 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4);
795 pVCpu->cpum.GstCtx.cr3 = pVmcbNstGst->u64CR3;
796 pVCpu->cpum.GstCtx.cr2 = pVmcbNstGst->u64CR2;
797 pVCpu->cpum.GstCtx.dr[6] = pVmcbNstGst->u64DR6;
798 pVCpu->cpum.GstCtx.dr[7] = pVmcbNstGst->u64DR7;
799 pVCpu->cpum.GstCtx.rflags.u = pVmcbNstGst->u64RFlags;
800 pVCpu->cpum.GstCtx.rax = pVmcbNstGst->u64RAX;
801 pVCpu->cpum.GstCtx.rsp = pVmcbNstGst->u64RSP;
802 pVCpu->cpum.GstCtx.rip = pVmcbNstGst->u64RIP;
803 CPUMSetGuestEferMsrNoChecks(pVCpu, pVCpu->cpum.GstCtx.msrEFER, uValidEfer);
804 if (pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging)
805 pVCpu->cpum.GstCtx.msrPAT = pVmcbNstGst->u64PAT;
806
807 /* Mask DR6, DR7 bits mandatory set/clear bits. */
808 pVCpu->cpum.GstCtx.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
809 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_RA1_MASK;
810 pVCpu->cpum.GstCtx.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
811 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
812
813 /*
814 * Check for pending virtual interrupts.
815 */
816 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
817 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
818 else
819 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
820
821 /*
822 * Update PGM, IEM and others of a world-switch.
823 */
824 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu);
825 if (rcStrict == VINF_SUCCESS)
826 { /* likely */ }
827 else if (RT_SUCCESS(rcStrict))
828 {
829 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
830 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
831 }
832 else
833 {
834 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
835 return rcStrict;
836 }
837
838 /*
839 * Set the global-interrupt flag to allow interrupts in the guest.
840 */
841 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
842
843 /*
844 * Event injection.
845 */
846 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
847 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
848 if (pEventInject->n.u1Valid)
849 {
850 uint8_t const uVector = pEventInject->n.u8Vector;
851 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject, uVector);
852 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
853
854 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
855 if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
856 {
857 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
858 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
859 }
860 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
861 {
862 if ( uVector == X86_XCPT_NMI
863 || uVector > X86_XCPT_LAST)
864 {
865 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
866 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
867 }
868 if ( uVector == X86_XCPT_BR
869 && CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
870 {
871 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
872 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
873 }
874 /** @todo any others? */
875 }
876
877 /*
878 * Invalidate the exit interrupt-information field here. This field is fully updated
879 * on #VMEXIT as events other than the one below can also cause intercepts during
880 * their injection (e.g. exceptions).
881 */
882 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
883
884 /*
885 * Clear the event injection valid bit here. While the AMD spec. mentions that the CPU
886 * clears this bit from the VMCB unconditionally on #VMEXIT, internally the CPU could be
887 * clearing it at any time, most likely before/after injecting the event. Since VirtualBox
888 * doesn't have any virtual-CPU internal representation of this bit, we clear/update the
889 * VMCB here. This also has the added benefit that we avoid the risk of injecting the event
890 * twice if we fallback to executing the nested-guest using hardware-assisted SVM after
891 * injecting the event through IEM here.
892 */
893 pVmcbCtrl->EventInject.n.u1Valid = 0;
894
895 /** @todo NRIP: Software interrupts can only be pushed properly if we support
896 * NRIP for the nested-guest to calculate the instruction length
897 * below. */
898 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n",
899 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2,
900 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
901
902 /*
903 * We shall not inject the event here right away. There may be paging mode related updates
904 * as a result of the world-switch above that are yet to be honored. Instead flag the event
905 * as pending for injection.
906 */
907 TRPMAssertTrap(pVCpu, uVector, enmType);
908 if (pEventInject->n.u1ErrorCodeValid)
909 TRPMSetErrorCode(pVCpu, uErrorCode);
910 if ( enmType == TRPM_TRAP
911 && uVector == X86_XCPT_PF)
912 TRPMSetFaultAddress(pVCpu, pVCpu->cpum.GstCtx.cr2);
913 }
914 else
915 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
916 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3,
917 pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.eflags.u));
918
919 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
920
921# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
922 /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
923 if ( HMIsEnabled(pVM)
924 && HMIsSvmVGifActive(pVM))
925 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
926# endif
927
928 return rcStrict;
929 }
930
931 /* Shouldn't really happen as the caller should've validated the physical address already. */
932 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
933 return rc;
934}
935
936
937/**
938 * Checks if the event intercepts and performs the \#VMEXIT if the corresponding
939 * intercept is active.
940 *
941 * @returns Strict VBox status code.
942 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
943 * we're not executing a nested-guest.
944 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
945 * successfully.
946 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
947 * failed and a shutdown needs to be initiated for the guest.
948 *
949 * @returns VBox strict status code.
950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
951 * @param cbInstr The length of the instruction in bytes triggering the
952 * event.
953 * @param u8Vector The interrupt or exception vector.
954 * @param fFlags The exception flags (see IEM_XCPT_FLAGS_XXX).
955 * @param uErr The error-code associated with the exception.
956 * @param uCr2 The CR2 value in case of a \#PF exception.
957 */
958VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
959 uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT
960{
961 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
962
963 /*
964 * Handle SVM exception and software interrupt intercepts, see AMD spec. 15.12 "Exception Intercepts".
965 *
966 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_XCPT_2 #VMEXITs.
967 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
968 * even when they use a vector in the range 0 to 31.
969 * - ICEBP should not trigger #DB intercept, but its own intercept.
970 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
971 */
972 /* Check NMI intercept */
973 if ( u8Vector == X86_XCPT_NMI
974 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
975 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
976 {
977 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
978 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
979 }
980
981 /* Check ICEBP intercept. */
982 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
983 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
984 {
985 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
986 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
987 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
988 }
989
990 /* Check CPU exception intercepts. */
991 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
992 && IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
993 {
994 Assert(u8Vector <= X86_XCPT_LAST);
995 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
996 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
997 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists
998 && u8Vector == X86_XCPT_PF
999 && !(uErr & X86_TRAP_PF_ID))
1000 {
1001 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
1002# ifdef IEM_WITH_CODE_TLB
1003 uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1004 uint8_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1005 pVmcbCtrl->cbInstrFetched = RT_MIN(cbInstrBuf, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
1006 if ( pbInstrBuf
1007 && cbInstrBuf > 0)
1008 memcpy(&pVmcbCtrl->abInstr[0], pbInstrBuf, pVmcbCtrl->cbInstrFetched);
1009# else
1010 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1011 pVmcbCtrl->cbInstrFetched = RT_MIN(cbOpcode, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
1012 if (cbOpcode > 0)
1013 memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[0], pVmcbCtrl->cbInstrFetched);
1014# endif
1015 }
1016 if (u8Vector == X86_XCPT_BR)
1017 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
1018 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
1019 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt,
1020 u8Vector, uExitInfo1, uExitInfo2));
1021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
1022 }
1023
1024 /* Check software interrupt (INTn) intercepts. */
1025 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
1026 | IEM_XCPT_FLAGS_BP_INSTR
1027 | IEM_XCPT_FLAGS_ICEBP_INSTR
1028 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1029 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
1030 {
1031 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
1032 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
1033 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
1034 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
1035 }
1036
1037 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1038}
1039
1040
1041/**
1042 * Checks the SVM IO permission bitmap and performs the \#VMEXIT if the
1043 * corresponding intercept is active.
1044 *
1045 * @returns Strict VBox status code.
1046 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
1047 * we're not executing a nested-guest.
1048 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1049 * successfully.
1050 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1051 * failed and a shutdown needs to be initiated for the guest.
1052 *
1053 * @returns VBox strict status code.
1054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1055 * @param u16Port The IO port being accessed.
1056 * @param enmIoType The type of IO access.
1057 * @param cbReg The IO operand size in bytes.
1058 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
1059 * @param iEffSeg The effective segment number.
1060 * @param fRep Whether this is a repeating IO instruction (REP prefix).
1061 * @param fStrIo Whether this is a string IO instruction.
1062 * @param cbInstr The length of the IO instruction in bytes.
1063 */
1064VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1065 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT
1066{
1067 Assert(IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
1068 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
1069 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
1070
1071 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port));
1072
1073 SVMIOIOEXITINFO IoExitInfo;
1074 bool const fIntercept = CPUMIsSvmIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, u16Port, enmIoType, cbReg,
1075 cAddrSizeBits, iEffSeg, fRep, fStrIo, &IoExitInfo);
1076 if (fIntercept)
1077 {
1078 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
1079 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
1080 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr);
1081 }
1082
1083 /** @todo remove later (for debugging as VirtualBox always traps all IO
1084 * intercepts). */
1085 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
1086 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1087}
1088
1089
1090/**
1091 * Checks the SVM MSR permission bitmap and performs the \#VMEXIT if the
1092 * corresponding intercept is active.
1093 *
1094 * @returns Strict VBox status code.
1095 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
1096 * specify interception of the accessed MSR @a idMsr.
1097 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1098 * successfully.
1099 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1100 * failed and a shutdown needs to be initiated for the guest.
1101 *
1102 * @param pVCpu The cross context virtual CPU structure.
1103 * @param idMsr The MSR being accessed in the nested-guest.
1104 * @param fWrite Whether this is an MSR write access, @c false implies an
1105 * MSR read.
1106 * @param cbInstr The length of the MSR read/write instruction in bytes.
1107 */
1108VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT
1109{
1110 /*
1111 * Check if any MSRs are being intercepted.
1112 */
1113 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_MSR_PROT));
1114 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1115
1116 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
1117
1118 /*
1119 * Get the byte and bit offset of the permission bits corresponding to the MSR.
1120 */
1121 uint16_t offMsrpm;
1122 uint8_t uMsrpmBit;
1123 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
1124 if (RT_SUCCESS(rc))
1125 {
1126 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
1127 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1128 if (fWrite)
1129 ++uMsrpmBit;
1130
1131 /*
1132 * Check if the bit is set, if so, trigger a #VMEXIT.
1133 */
1134 if (pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit))
1135 {
1136 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
1137 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1138 }
1139 }
1140 else
1141 {
1142 /*
1143 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (nested hypervisor) deal with it.
1144 */
1145 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite));
1146 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1147 }
1148 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1149}
1150
1151
1152
1153/**
1154 * Implements 'VMRUN'.
1155 */
1156IEM_CIMPL_DEF_0(iemCImpl_vmrun)
1157{
1158# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1159 RT_NOREF2(pVCpu, cbInstr);
1160 return VINF_EM_RAW_EMULATE_INSTR;
1161# else
1162 LogFlow(("iemCImpl_vmrun\n"));
1163 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
1164
1165 /** @todo Check effective address size using address size prefix. */
1166 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1167 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1168 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1169 {
1170 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1171 return iemRaiseGeneralProtectionFault0(pVCpu);
1172 }
1173
1174 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
1175 {
1176 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
1177 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1178 }
1179
1180 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, cbInstr, GCPhysVmcb);
1181 if (rcStrict == VERR_SVM_VMEXIT_FAILED)
1182 {
1183 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1184 rcStrict = VINF_EM_TRIPLE_FAULT;
1185 }
1186 return rcStrict;
1187# endif
1188}
1189
1190
1191/**
1192 * Interface for HM and EM to emulate the VMRUN instruction.
1193 *
1194 * @returns Strict VBox status code.
1195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1196 * @param cbInstr The instruction length in bytes.
1197 * @thread EMT(pVCpu)
1198 */
1199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
1200{
1201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
1203
1204 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
1206 Assert(!pVCpu->iem.s.cActiveMappings);
1207 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1208}
1209
1210
1211/**
1212 * Implements 'VMLOAD'.
1213 */
1214IEM_CIMPL_DEF_0(iemCImpl_vmload)
1215{
1216# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1217 RT_NOREF2(pVCpu, cbInstr);
1218 return VINF_EM_RAW_EMULATE_INSTR;
1219# else
1220 LogFlow(("iemCImpl_vmload\n"));
1221 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
1222
1223 /** @todo Check effective address size using address size prefix. */
1224 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1225 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1226 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1227 {
1228 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1229 return iemRaiseGeneralProtectionFault0(pVCpu);
1230 }
1231
1232 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
1233 {
1234 Log(("vmload: Guest intercept -> #VMEXIT\n"));
1235 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1236 }
1237
1238 SVMVMCBSTATESAVE VmcbNstGst;
1239 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1240 sizeof(SVMVMCBSTATESAVE));
1241 if (rcStrict == VINF_SUCCESS)
1242 {
1243 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1244 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1245 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1246 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1247 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1248
1249 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
1250 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR;
1251 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR;
1252 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR;
1253 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK;
1254
1255 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS;
1256 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP;
1257 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP;
1258
1259 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1260 }
1261 return rcStrict;
1262# endif
1263}
1264
1265
1266/**
1267 * Interface for HM and EM to emulate the VMLOAD instruction.
1268 *
1269 * @returns Strict VBox status code.
1270 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1271 * @param cbInstr The instruction length in bytes.
1272 * @thread EMT(pVCpu)
1273 */
1274VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
1275{
1276 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1277
1278 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1279 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
1280 Assert(!pVCpu->iem.s.cActiveMappings);
1281 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1282}
1283
1284
1285/**
1286 * Implements 'VMSAVE'.
1287 */
1288IEM_CIMPL_DEF_0(iemCImpl_vmsave)
1289{
1290# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1291 RT_NOREF2(pVCpu, cbInstr);
1292 return VINF_EM_RAW_EMULATE_INSTR;
1293# else
1294 LogFlow(("iemCImpl_vmsave\n"));
1295 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
1296
1297 /** @todo Check effective address size using address size prefix. */
1298 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1299 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1300 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1301 {
1302 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1303 return iemRaiseGeneralProtectionFault0(pVCpu);
1304 }
1305
1306 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
1307 {
1308 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
1309 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1310 }
1311
1312 SVMVMCBSTATESAVE VmcbNstGst;
1313 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1314 sizeof(SVMVMCBSTATESAVE));
1315 if (rcStrict == VINF_SUCCESS)
1316 {
1317 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1318 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
1319 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
1320
1321 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1322 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1323 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1324 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1325
1326 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
1327 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR;
1328 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR;
1329 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR;
1330 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK;
1331
1332 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1333 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp;
1334 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip;
1335
1336 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
1337 sizeof(SVMVMCBSTATESAVE));
1338 if (rcStrict == VINF_SUCCESS)
1339 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1340 }
1341 return rcStrict;
1342# endif
1343}
1344
1345
1346/**
1347 * Interface for HM and EM to emulate the VMSAVE instruction.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1351 * @param cbInstr The instruction length in bytes.
1352 * @thread EMT(pVCpu)
1353 */
1354VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
1355{
1356 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1357
1358 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1359 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
1360 Assert(!pVCpu->iem.s.cActiveMappings);
1361 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1362}
1363
1364
1365/**
1366 * Implements 'CLGI'.
1367 */
1368IEM_CIMPL_DEF_0(iemCImpl_clgi)
1369{
1370# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1371 RT_NOREF2(pVCpu, cbInstr);
1372 return VINF_EM_RAW_EMULATE_INSTR;
1373# else
1374 LogFlow(("iemCImpl_clgi\n"));
1375 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
1376 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
1377 {
1378 Log(("clgi: Guest intercept -> #VMEXIT\n"));
1379 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1380 }
1381
1382 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
1383
1384# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1385 iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1386 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1387# else
1388 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1389# endif
1390# endif
1391}
1392
1393
1394/**
1395 * Interface for HM and EM to emulate the CLGI instruction.
1396 *
1397 * @returns Strict VBox status code.
1398 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1399 * @param cbInstr The instruction length in bytes.
1400 * @thread EMT(pVCpu)
1401 */
1402VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1403{
1404 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1405
1406 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
1408 Assert(!pVCpu->iem.s.cActiveMappings);
1409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1410}
1411
1412
1413/**
1414 * Implements 'STGI'.
1415 */
1416IEM_CIMPL_DEF_0(iemCImpl_stgi)
1417{
1418# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1419 RT_NOREF2(pVCpu, cbInstr);
1420 return VINF_EM_RAW_EMULATE_INSTR;
1421# else
1422 LogFlow(("iemCImpl_stgi\n"));
1423 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
1424 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
1425 {
1426 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
1427 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1428 }
1429
1430 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
1431
1432# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1433 iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1434 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1435# else
1436 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1437# endif
1438# endif
1439}
1440
1441
1442/**
1443 * Interface for HM and EM to emulate the STGI instruction.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1447 * @param cbInstr The instruction length in bytes.
1448 * @thread EMT(pVCpu)
1449 */
1450VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1451{
1452 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1453
1454 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
1456 Assert(!pVCpu->iem.s.cActiveMappings);
1457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1458}
1459
1460
1461/**
1462 * Implements 'INVLPGA'.
1463 */
1464IEM_CIMPL_DEF_0(iemCImpl_invlpga)
1465{
1466 /** @todo Check effective address size using address size prefix. */
1467 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1468 /** @todo PGM needs virtual ASID support. */
1469# if 0
1470 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx;
1471# endif
1472
1473 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1474 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
1475 {
1476 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
1477 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1478 }
1479
1480 PGMInvalidatePage(pVCpu, GCPtrPage);
1481 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1482}
1483
1484
1485/**
1486 * Interface for HM and EM to emulate the INVLPGA instruction.
1487 *
1488 * @returns Strict VBox status code.
1489 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1490 * @param cbInstr The instruction length in bytes.
1491 * @thread EMT(pVCpu)
1492 */
1493VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
1494{
1495 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1496
1497 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1498 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
1499 Assert(!pVCpu->iem.s.cActiveMappings);
1500 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1501}
1502
1503
1504/**
1505 * Implements 'SKINIT'.
1506 */
1507IEM_CIMPL_DEF_0(iemCImpl_skinit)
1508{
1509 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1510
1511 uint32_t uIgnore;
1512 uint32_t fFeaturesECX;
1513 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
1514 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
1515 return iemRaiseUndefinedOpcode(pVCpu);
1516
1517 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
1518 {
1519 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
1520 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1521 }
1522
1523 RT_NOREF(cbInstr);
1524 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1525}
1526
1527
1528/**
1529 * Implements SVM's implementation of PAUSE.
1530 */
1531IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
1532{
1533 bool fCheckIntercept = true;
1534 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
1535 {
1536 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1537
1538 /* TSC based pause-filter thresholding. */
1539 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
1540 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
1541 {
1542 uint64_t const uTick = TMCpuTickGet(pVCpu);
1543 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
1544 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu));
1545 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
1546 }
1547
1548 /* Simple pause-filter counter. */
1549 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
1550 {
1551 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
1552 fCheckIntercept = false;
1553 }
1554 }
1555
1556 if (fCheckIntercept)
1557 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0, cbInstr);
1558
1559 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1560}
1561
1562#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
1563
1564/**
1565 * Common code for iemCImpl_vmmcall and iemCImpl_vmcall (latter in IEMAllCImplVmxInstr.cpp.h).
1566 */
1567IEM_CIMPL_DEF_1(iemCImpl_Hypercall, uint16_t, uDisOpcode)
1568{
1569 if (EMAreHypercallInstructionsEnabled(pVCpu))
1570 {
1571 NOREF(uDisOpcode);
1572 VBOXSTRICTRC rcStrict = GIMHypercallEx(pVCpu, IEM_GET_CTX(pVCpu), uDisOpcode, cbInstr);
1573 if (RT_SUCCESS(rcStrict))
1574 {
1575 /** @todo finish: Sort out assertion here when iemRegAddToRipAndFinishingClearingRF
1576 * starts returning non-VINF_SUCCESS statuses. */
1577 if (rcStrict == VINF_SUCCESS)
1578 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1579 if ( rcStrict == VINF_SUCCESS
1580 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
1581 return VINF_SUCCESS;
1582 AssertMsgReturn(rcStrict == VINF_GIM_R3_HYPERCALL, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1583 return rcStrict;
1584 }
1585 AssertMsgReturn( rcStrict == VERR_GIM_HYPERCALL_ACCESS_DENIED
1586 || rcStrict == VERR_GIM_HYPERCALLS_NOT_AVAILABLE
1587 || rcStrict == VERR_GIM_NOT_ENABLED
1588 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_READ_FAILED
1589 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED,
1590 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1591
1592 /* Raise #UD on all failures. */
1593 }
1594 return iemRaiseUndefinedOpcode(pVCpu);
1595}
1596
1597
1598/**
1599 * Implements 'VMMCALL'.
1600 */
1601IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
1602{
1603 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
1604 {
1605 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
1606 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1607 }
1608
1609 /* This is a little bit more complicated than the VT-x version because HM/SVM may
1610 patch MOV CR8 instructions to speed up APIC.TPR access for 32-bit windows guests. */
1611 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1612 if (VM_IS_HM_ENABLED(pVM))
1613 {
1614 int rc = HMHCMaybeMovTprSvmHypercall(pVM, pVCpu);
1615 if (RT_SUCCESS(rc))
1616 {
1617 Log(("vmmcall: MovTpr\n"));
1618 return VINF_SUCCESS;
1619 }
1620 }
1621
1622 /* Join forces with vmcall. */
1623 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMMCALL);
1624}
1625
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette