VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 87626

Last change on this file since 87626 was 87606, checked in by vboxsync, 4 years ago

VMM/HMVMX: Translate fMdsClearOnSched and fL1dFlushOnSched to world switcher flags too and use them in VMXR0Enter and VMXR0ThreadCtxCallback. Added missing MDS flushing to the latter. Moved the flushing up to the start of the functions. bugref:9453 bugref:9087

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 72.5 KB
Line 
1/* $Id: HMInternal.h 87606 2021-02-04 13:35:36Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_HMInternal_h
19#define VMM_INCLUDED_SRC_include_HMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/dis.h>
28#include <VBox/vmm/hm.h>
29#include <VBox/vmm/hm_vmx.h>
30#include <VBox/vmm/hm_svm.h>
31#include <VBox/vmm/pgm.h>
32#include <VBox/vmm/cpum.h>
33#include <VBox/vmm/trpm.h>
34#include <iprt/memobj.h>
35#include <iprt/cpuset.h>
36#include <iprt/mp.h>
37#include <iprt/avl.h>
38#include <iprt/string.h>
39
40#if HC_ARCH_BITS == 32
41# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0 || defined(DOXYGEN_RUNNING)
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50RT_C_DECLS_BEGIN
51
52
53/** @defgroup grp_hm_int Internal
54 * @ingroup grp_hm
55 * @internal
56 * @{
57 */
58
59/** @name HM_CHANGED_XXX
60 * HM CPU-context changed flags.
61 *
62 * These flags are used to keep track of which registers and state has been
63 * modified since they were imported back into the guest-CPU context.
64 *
65 * @{
66 */
67#define HM_CHANGED_HOST_CONTEXT UINT64_C(0x0000000000000001)
68#define HM_CHANGED_GUEST_RIP UINT64_C(0x0000000000000004)
69#define HM_CHANGED_GUEST_RFLAGS UINT64_C(0x0000000000000008)
70
71#define HM_CHANGED_GUEST_RAX UINT64_C(0x0000000000000010)
72#define HM_CHANGED_GUEST_RCX UINT64_C(0x0000000000000020)
73#define HM_CHANGED_GUEST_RDX UINT64_C(0x0000000000000040)
74#define HM_CHANGED_GUEST_RBX UINT64_C(0x0000000000000080)
75#define HM_CHANGED_GUEST_RSP UINT64_C(0x0000000000000100)
76#define HM_CHANGED_GUEST_RBP UINT64_C(0x0000000000000200)
77#define HM_CHANGED_GUEST_RSI UINT64_C(0x0000000000000400)
78#define HM_CHANGED_GUEST_RDI UINT64_C(0x0000000000000800)
79#define HM_CHANGED_GUEST_R8_R15 UINT64_C(0x0000000000001000)
80#define HM_CHANGED_GUEST_GPRS_MASK UINT64_C(0x0000000000001ff0)
81
82#define HM_CHANGED_GUEST_ES UINT64_C(0x0000000000002000)
83#define HM_CHANGED_GUEST_CS UINT64_C(0x0000000000004000)
84#define HM_CHANGED_GUEST_SS UINT64_C(0x0000000000008000)
85#define HM_CHANGED_GUEST_DS UINT64_C(0x0000000000010000)
86#define HM_CHANGED_GUEST_FS UINT64_C(0x0000000000020000)
87#define HM_CHANGED_GUEST_GS UINT64_C(0x0000000000040000)
88#define HM_CHANGED_GUEST_SREG_MASK UINT64_C(0x000000000007e000)
89
90#define HM_CHANGED_GUEST_GDTR UINT64_C(0x0000000000080000)
91#define HM_CHANGED_GUEST_IDTR UINT64_C(0x0000000000100000)
92#define HM_CHANGED_GUEST_LDTR UINT64_C(0x0000000000200000)
93#define HM_CHANGED_GUEST_TR UINT64_C(0x0000000000400000)
94#define HM_CHANGED_GUEST_TABLE_MASK UINT64_C(0x0000000000780000)
95
96#define HM_CHANGED_GUEST_CR0 UINT64_C(0x0000000000800000)
97#define HM_CHANGED_GUEST_CR2 UINT64_C(0x0000000001000000)
98#define HM_CHANGED_GUEST_CR3 UINT64_C(0x0000000002000000)
99#define HM_CHANGED_GUEST_CR4 UINT64_C(0x0000000004000000)
100#define HM_CHANGED_GUEST_CR_MASK UINT64_C(0x0000000007800000)
101
102#define HM_CHANGED_GUEST_APIC_TPR UINT64_C(0x0000000008000000)
103#define HM_CHANGED_GUEST_EFER_MSR UINT64_C(0x0000000010000000)
104
105#define HM_CHANGED_GUEST_DR0_DR3 UINT64_C(0x0000000020000000)
106#define HM_CHANGED_GUEST_DR6 UINT64_C(0x0000000040000000)
107#define HM_CHANGED_GUEST_DR7 UINT64_C(0x0000000080000000)
108#define HM_CHANGED_GUEST_DR_MASK UINT64_C(0x00000000e0000000)
109
110#define HM_CHANGED_GUEST_X87 UINT64_C(0x0000000100000000)
111#define HM_CHANGED_GUEST_SSE_AVX UINT64_C(0x0000000200000000)
112#define HM_CHANGED_GUEST_OTHER_XSAVE UINT64_C(0x0000000400000000)
113#define HM_CHANGED_GUEST_XCRx UINT64_C(0x0000000800000000)
114
115#define HM_CHANGED_GUEST_KERNEL_GS_BASE UINT64_C(0x0000001000000000)
116#define HM_CHANGED_GUEST_SYSCALL_MSRS UINT64_C(0x0000002000000000)
117#define HM_CHANGED_GUEST_SYSENTER_CS_MSR UINT64_C(0x0000004000000000)
118#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR UINT64_C(0x0000008000000000)
119#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR UINT64_C(0x0000010000000000)
120#define HM_CHANGED_GUEST_SYSENTER_MSR_MASK UINT64_C(0x000001c000000000)
121#define HM_CHANGED_GUEST_TSC_AUX UINT64_C(0x0000020000000000)
122#define HM_CHANGED_GUEST_OTHER_MSRS UINT64_C(0x0000040000000000)
123#define HM_CHANGED_GUEST_ALL_MSRS ( HM_CHANGED_GUEST_EFER \
124 | HM_CHANGED_GUEST_KERNEL_GS_BASE \
125 | HM_CHANGED_GUEST_SYSCALL_MSRS \
126 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK \
127 | HM_CHANGED_GUEST_TSC_AUX \
128 | HM_CHANGED_GUEST_OTHER_MSRS)
129
130#define HM_CHANGED_GUEST_HWVIRT UINT64_C(0x0000080000000000)
131#define HM_CHANGED_GUEST_MASK UINT64_C(0x00000ffffffffffc)
132
133#define HM_CHANGED_KEEPER_STATE_MASK UINT64_C(0xffff000000000000)
134
135#define HM_CHANGED_VMX_XCPT_INTERCEPTS UINT64_C(0x0001000000000000)
136#define HM_CHANGED_VMX_GUEST_AUTO_MSRS UINT64_C(0x0002000000000000)
137#define HM_CHANGED_VMX_GUEST_LAZY_MSRS UINT64_C(0x0004000000000000)
138#define HM_CHANGED_VMX_ENTRY_EXIT_CTLS UINT64_C(0x0008000000000000)
139#define HM_CHANGED_VMX_MASK UINT64_C(0x000f000000000000)
140#define HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_DR_MASK \
141 | HM_CHANGED_VMX_GUEST_LAZY_MSRS)
142
143#define HM_CHANGED_SVM_XCPT_INTERCEPTS UINT64_C(0x0001000000000000)
144#define HM_CHANGED_SVM_MASK UINT64_C(0x0001000000000000)
145#define HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE HM_CHANGED_GUEST_DR_MASK
146
147#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_MASK \
148 | HM_CHANGED_KEEPER_STATE_MASK)
149
150/** Mask of what state might have changed when IEM raised an exception.
151 * This is a based on IEM_CPUMCTX_EXTRN_XCPT_MASK. */
152#define HM_CHANGED_RAISED_XCPT_MASK ( HM_CHANGED_GUEST_GPRS_MASK \
153 | HM_CHANGED_GUEST_RIP \
154 | HM_CHANGED_GUEST_RFLAGS \
155 | HM_CHANGED_GUEST_SS \
156 | HM_CHANGED_GUEST_CS \
157 | HM_CHANGED_GUEST_CR0 \
158 | HM_CHANGED_GUEST_CR3 \
159 | HM_CHANGED_GUEST_CR4 \
160 | HM_CHANGED_GUEST_APIC_TPR \
161 | HM_CHANGED_GUEST_EFER_MSR \
162 | HM_CHANGED_GUEST_DR7 \
163 | HM_CHANGED_GUEST_CR2 \
164 | HM_CHANGED_GUEST_SREG_MASK \
165 | HM_CHANGED_GUEST_TABLE_MASK)
166
167#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
168/** Mask of what state might have changed when \#VMEXIT is emulated. */
169# define HM_CHANGED_SVM_VMEXIT_MASK ( HM_CHANGED_GUEST_RSP \
170 | HM_CHANGED_GUEST_RAX \
171 | HM_CHANGED_GUEST_RIP \
172 | HM_CHANGED_GUEST_RFLAGS \
173 | HM_CHANGED_GUEST_CS \
174 | HM_CHANGED_GUEST_SS \
175 | HM_CHANGED_GUEST_DS \
176 | HM_CHANGED_GUEST_ES \
177 | HM_CHANGED_GUEST_GDTR \
178 | HM_CHANGED_GUEST_IDTR \
179 | HM_CHANGED_GUEST_CR_MASK \
180 | HM_CHANGED_GUEST_EFER_MSR \
181 | HM_CHANGED_GUEST_DR6 \
182 | HM_CHANGED_GUEST_DR7 \
183 | HM_CHANGED_GUEST_OTHER_MSRS \
184 | HM_CHANGED_GUEST_HWVIRT \
185 | HM_CHANGED_SVM_MASK \
186 | HM_CHANGED_GUEST_APIC_TPR)
187
188/** Mask of what state might have changed when VMRUN is emulated. */
189# define HM_CHANGED_SVM_VMRUN_MASK HM_CHANGED_SVM_VMEXIT_MASK
190#endif
191#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
192/** Mask of what state might have changed when VM-exit is emulated.
193 *
194 * This is currently unused, but keeping it here in case we can get away a bit more
195 * fine-grained state handling.
196 *
197 * @note Update IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK when this changes. */
198# define HM_CHANGED_VMX_VMEXIT_MASK ( HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 \
199 | HM_CHANGED_GUEST_DR7 | HM_CHANGED_GUEST_DR6 \
200 | HM_CHANGED_GUEST_EFER_MSR \
201 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK \
202 | HM_CHANGED_GUEST_OTHER_MSRS /* for PAT MSR */ \
203 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS \
204 | HM_CHANGED_GUEST_SREG_MASK \
205 | HM_CHANGED_GUEST_TR \
206 | HM_CHANGED_GUEST_LDTR | HM_CHANGED_GUEST_GDTR | HM_CHANGED_GUEST_IDTR \
207 | HM_CHANGED_GUEST_HWVIRT )
208#endif
209/** @} */
210
211/** Maximum number of exit reason statistics counters. */
212#define MAX_EXITREASON_STAT 0x100
213#define MASK_EXITREASON_STAT 0xff
214#define MASK_INJECT_IRQ_STAT 0xff
215
216/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
217#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
218/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
219#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
220/** Total guest mapped memory needed. */
221#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
222
223
224/** @name Macros for enabling and disabling preemption.
225 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
226 * preemption has already been disabled when there is no context hook.
227 * @{ */
228#ifdef VBOX_STRICT
229# define HM_DISABLE_PREEMPT(a_pVCpu) \
230 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
231 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled((a_pVCpu))); \
232 RTThreadPreemptDisable(&PreemptStateInternal)
233#else
234# define HM_DISABLE_PREEMPT(a_pVCpu) \
235 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
236 RTThreadPreemptDisable(&PreemptStateInternal)
237#endif /* VBOX_STRICT */
238#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
239/** @} */
240
241
242/** @name HM saved state versions.
243 * @{
244 */
245#define HM_SAVED_STATE_VERSION HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
246#define HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 6
247#define HM_SAVED_STATE_VERSION_TPR_PATCHING 5
248#define HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 4
249#define HM_SAVED_STATE_VERSION_2_0_X 3
250/** @} */
251
252
253/**
254 * HM physical (host) CPU information.
255 */
256typedef struct HMPHYSCPU
257{
258 /** The CPU ID. */
259 RTCPUID idCpu;
260 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
261 RTR0MEMOBJ hMemObj;
262 /** The physical address of the first page in hMemObj (it's a
263 * physcially contigous allocation if it spans multiple pages). */
264 RTHCPHYS HCPhysMemObj;
265 /** The address of the memory (for pfnEnable). */
266 void *pvMemObj;
267 /** Current ASID (AMD-V) / VPID (Intel). */
268 uint32_t uCurrentAsid;
269 /** TLB flush count. */
270 uint32_t cTlbFlushes;
271 /** Whether to flush each new ASID/VPID before use. */
272 bool fFlushAsidBeforeUse;
273 /** Configured for VT-x or AMD-V. */
274 bool fConfigured;
275 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
276 bool fIgnoreAMDVInUseError;
277 /** Whether CR4.VMXE was already enabled prior to us enabling it. */
278 bool fVmxeAlreadyEnabled;
279 /** In use by our code. (for power suspend) */
280 bool volatile fInUse;
281#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
282 /** Nested-guest union (put data common to SVM/VMX outside the union). */
283 union
284 {
285 /** Nested-guest SVM data. */
286 struct
287 {
288 /** The active nested-guest MSR permission bitmap memory backing. */
289 RTR0MEMOBJ hNstGstMsrpm;
290 /** The physical address of the first page in hNstGstMsrpm (physcially
291 * contiguous allocation). */
292 RTHCPHYS HCPhysNstGstMsrpm;
293 /** The address of the active nested-guest MSRPM. */
294 void *pvNstGstMsrpm;
295 } svm;
296 /** @todo Nested-VMX. */
297 } n;
298#endif
299} HMPHYSCPU;
300/** Pointer to HMPHYSCPU struct. */
301typedef HMPHYSCPU *PHMPHYSCPU;
302/** Pointer to a const HMPHYSCPU struct. */
303typedef const HMPHYSCPU *PCHMPHYSCPU;
304
305/**
306 * TPR-instruction type.
307 */
308typedef enum
309{
310 HMTPRINSTR_INVALID,
311 HMTPRINSTR_READ,
312 HMTPRINSTR_READ_SHR4,
313 HMTPRINSTR_WRITE_REG,
314 HMTPRINSTR_WRITE_IMM,
315 HMTPRINSTR_JUMP_REPLACEMENT,
316 /** The usual 32-bit paranoia. */
317 HMTPRINSTR_32BIT_HACK = 0x7fffffff
318} HMTPRINSTR;
319
320/**
321 * TPR patch information.
322 */
323typedef struct
324{
325 /** The key is the address of patched instruction. (32 bits GC ptr) */
326 AVLOU32NODECORE Core;
327 /** Original opcode. */
328 uint8_t aOpcode[16];
329 /** Instruction size. */
330 uint32_t cbOp;
331 /** Replacement opcode. */
332 uint8_t aNewOpcode[16];
333 /** Replacement instruction size. */
334 uint32_t cbNewOp;
335 /** Instruction type. */
336 HMTPRINSTR enmType;
337 /** Source operand. */
338 uint32_t uSrcOperand;
339 /** Destination operand. */
340 uint32_t uDstOperand;
341 /** Number of times the instruction caused a fault. */
342 uint32_t cFaults;
343 /** Patch address of the jump replacement. */
344 RTGCPTR32 pJumpTarget;
345} HMTPRPATCH;
346/** Pointer to HMTPRPATCH. */
347typedef HMTPRPATCH *PHMTPRPATCH;
348/** Pointer to a const HMTPRPATCH. */
349typedef const HMTPRPATCH *PCHMTPRPATCH;
350
351
352/**
353 * Makes a HMEXITSTAT::uKey value from a program counter and an exit code.
354 *
355 * @returns 64-bit key
356 * @param a_uPC The RIP + CS.BASE value of the exit.
357 * @param a_uExit The exit code.
358 * @todo Add CPL?
359 */
360#define HMEXITSTAT_MAKE_KEY(a_uPC, a_uExit) (((a_uPC) & UINT64_C(0x0000ffffffffffff)) | (uint64_t)(a_uExit) << 48)
361
362typedef struct HMEXITINFO
363{
364 /** See HMEXITSTAT_MAKE_KEY(). */
365 uint64_t uKey;
366 /** Number of recent hits (depreciates with time). */
367 uint32_t volatile cHits;
368 /** The age + lock. */
369 uint16_t volatile uAge;
370 /** Action or action table index. */
371 uint16_t iAction;
372} HMEXITINFO;
373AssertCompileSize(HMEXITINFO, 16); /* Lots of these guys, so don't add any unnecessary stuff! */
374
375typedef struct HMEXITHISTORY
376{
377 /** The exit timestamp. */
378 uint64_t uTscExit;
379 /** The index of the corresponding HMEXITINFO entry.
380 * UINT32_MAX if none (too many collisions, race, whatever). */
381 uint32_t iExitInfo;
382 /** Figure out later, needed for padding now. */
383 uint32_t uSomeClueOrSomething;
384} HMEXITHISTORY;
385
386/**
387 * Switcher function, HC to the special 64-bit RC.
388 *
389 * @param pVM The cross context VM structure.
390 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
391 * @returns Return code indicating the action to take.
392 */
393typedef DECLCALLBACKTYPE(int, FNHMSWITCHERHC,(PVM pVM, uint32_t offCpumVCpu));
394/** Pointer to switcher function. */
395typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
396
397
398/**
399 * HM event.
400 *
401 * VT-x and AMD-V common event injection structure.
402 */
403typedef struct HMEVENT
404{
405 /** Whether the event is pending. */
406 uint32_t fPending;
407 /** The error-code associated with the event. */
408 uint32_t u32ErrCode;
409 /** The length of the instruction in bytes (only relevant for software
410 * interrupts or software exceptions). */
411 uint32_t cbInstr;
412 /** Alignment. */
413 uint32_t u32Padding;
414 /** The encoded event (VM-entry interruption-information for VT-x or EVENTINJ
415 * for SVM). */
416 uint64_t u64IntInfo;
417 /** Guest virtual address if this is a page-fault event. */
418 RTGCUINTPTR GCPtrFaultAddress;
419} HMEVENT;
420/** Pointer to a HMEVENT struct. */
421typedef HMEVENT *PHMEVENT;
422/** Pointer to a const HMEVENT struct. */
423typedef const HMEVENT *PCHMEVENT;
424AssertCompileSizeAlignment(HMEVENT, 8);
425
426/**
427 * HM VM Instance data.
428 * Changes to this must checked against the padding of the hm union in VM!
429 */
430typedef struct HM
431{
432 /** Set when the debug facility has breakpoints/events enabled that requires
433 * us to use the debug execution loop in ring-0. */
434 bool fUseDebugLoop;
435 /** Set when TPR patching is allowed. */
436 bool fTprPatchingAllowed;
437 /** Set when TPR patching is active. */
438 bool fTprPatchingActive;
439 /** Alignment padding. */
440 bool afAlignment1[5];
441
442 struct
443 {
444 /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */
445 bool fSupported;
446 /** Set when we've enabled VMX. */
447 bool fEnabled;
448 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */
449 uint8_t cPreemptTimerShift;
450 bool fAlignment1;
451
452 /** @name Configuration (gets copied if problematic)
453 * @{ */
454 /** Set if Last Branch Record (LBR) is enabled. */
455 bool fLbrCfg;
456 /** Set if VT-x VPID is allowed. */
457 bool fAllowVpid;
458 /** Set if unrestricted guest execution is in use (real and protected mode
459 * without paging). */
460 bool fUnrestrictedGuestCfg;
461 /** Set if the preemption timer should be used if available. Ring-0
462 * quietly clears this if the hardware doesn't support the preemption timer. */
463 bool fUsePreemptTimerCfg;
464 /** @} */
465
466 /** Pause-loop exiting (PLE) gap in ticks. */
467 uint32_t cPleGapTicks;
468 /** Pause-loop exiting (PLE) window in ticks. */
469 uint32_t cPleWindowTicks;
470
471 /** Virtual address of the TSS page used for real mode emulation. */
472 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
473 /** Virtual address of the identity page table used for real mode and protected
474 * mode without paging emulation in EPT mode. */
475 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
476 } vmx;
477
478 struct
479 {
480 /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */
481 bool fSupported;
482 /** Set when we've enabled SVM. */
483 bool fEnabled;
484 /** Set when the hack to ignore VERR_SVM_IN_USE is active.
485 * @todo Safe? */
486 bool fIgnoreInUseError;
487 /** Whether to use virtualized VMSAVE/VMLOAD feature. */
488 bool fVirtVmsaveVmload;
489 /** Whether to use virtual GIF feature. */
490 bool fVGif;
491 /** Whether to use LBR virtualization feature. */
492 bool fLbrVirt;
493 bool afAlignment1[2];
494
495 /** Pause filter counter. */
496 uint16_t cPauseFilter;
497 /** Pause filter treshold in ticks. */
498 uint16_t cPauseFilterThresholdTicks;
499 uint32_t u32Alignment2;
500 } svm;
501
502 /** AVL tree with all patches (active or disabled) sorted by guest instruction address.
503 * @todo For @bugref{9217} this AVL tree must be eliminated and instead
504 * sort aPatches by address and do a safe binary search on it. */
505 AVLOU32TREE PatchTree;
506 uint32_t cPatches;
507 HMTPRPATCH aPatches[64];
508
509 /** Guest allocated memory for patching purposes. */
510 RTGCPTR pGuestPatchMem;
511 /** Current free pointer inside the patch block. */
512 RTGCPTR pFreeGuestPatchMem;
513 /** Size of the guest patch memory block. */
514 uint32_t cbGuestPatchMem;
515 uint32_t u32Alignment2;
516
517 /** For ring-3 use only. */
518 struct
519 {
520 /** Last recorded error code during HM ring-0 init. */
521 int32_t rcInit;
522 uint32_t u32Alignment3;
523
524 /** Maximum ASID allowed.
525 * This is mainly for the release log. */
526 uint32_t uMaxAsid;
527 /** World switcher flags (HM_WSF_XXX) for the release log. */
528 uint32_t fWorldSwitcher;
529
530 struct
531 {
532 /** Set if VPID is supported (ring-3 copy). */
533 bool fVpid;
534 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
535 * init, for logging). */
536 bool fSupportsVmcsEfer;
537 /** Whether to use VMCS shadowing. */
538 bool fUseVmcsShadowing;
539 bool fAlignment2;
540
541 /** Host CR4 value (set by ring-0 VMX init, for logging). */
542 uint64_t u64HostCr4;
543 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
544 uint64_t u64HostSmmMonitorCtl;
545 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
546 uint64_t u64HostMsrEfer;
547
548 /** The first valid host LBR branch-from-IP stack range. */
549 uint32_t idLbrFromIpMsrFirst;
550 /** The last valid host LBR branch-from-IP stack range. */
551 uint32_t idLbrFromIpMsrLast;
552
553 /** The first valid host LBR branch-to-IP stack range. */
554 uint32_t idLbrToIpMsrFirst;
555 /** The last valid host LBR branch-to-IP stack range. */
556 uint32_t idLbrToIpMsrLast;
557
558 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
559 RTHCPHYS HCPhysVmxEnableError;
560 /** VMX MSR values (only for ring-3 consumption). */
561 VMXMSRS Msrs;
562
563 /** Tagged-TLB flush type (only for ring-3 consumption). */
564 VMXTLBFLUSHTYPE enmTlbFlushType;
565 /** Flush type to use for INVEPT (only for ring-3 consumption). */
566 VMXTLBFLUSHEPT enmTlbFlushEpt;
567 /** Flush type to use for INVVPID (only for ring-3 consumption). */
568 VMXTLBFLUSHVPID enmTlbFlushVpid;
569 } vmx;
570
571 struct
572 {
573 /** SVM revision. */
574 uint32_t u32Rev;
575 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
576 uint32_t fFeatures;
577 /** HWCR MSR (for diagnostics). */
578 uint64_t u64MsrHwcr;
579 } svm;
580 } ForR3;
581
582 /** @name Configuration not used (much) after VM setup
583 * @{ */
584 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
585 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
586 uint32_t cMaxResumeLoopsCfg;
587 /** Set if nested paging is enabled.
588 * Config value that is copied to HMR0PERVM::fNestedPaging on setup. */
589 bool fNestedPagingCfg;
590 /** Set if large pages are enabled (requires nested paging).
591 * Config only, passed on the PGM where it really belongs.
592 * @todo move to PGM */
593 bool fLargePages;
594 /** Set if we can support 64-bit guests or not.
595 * Config value that is copied to HMR0PERVM::fAllow64BitGuests on setup. */
596 bool fAllow64BitGuestsCfg;
597 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
598 bool fGlobalInit;
599 /** Set if hardware APIC virtualization is enabled.
600 * @todo Not really used by HM, move to APIC where it's actually used. */
601 bool fVirtApicRegs;
602 /** Set if posted interrupt processing is enabled.
603 * @todo Not really used by HM, move to APIC where it's actually used. */
604 bool fPostedIntrs;
605 /** @} */
606
607 /** @name Processed into HMR0PERVCPU::fWorldSwitcher by ring-0 on VM init.
608 * @{ */
609 /** Set if indirect branch prediction barrier on VM exit. */
610 bool fIbpbOnVmExit;
611 /** Set if indirect branch prediction barrier on VM entry. */
612 bool fIbpbOnVmEntry;
613 /** Set if level 1 data cache should be flushed on VM entry. */
614 bool fL1dFlushOnVmEntry;
615 /** Set if level 1 data cache should be flushed on EMT scheduling. */
616 bool fL1dFlushOnSched;
617 /** Set if MDS related buffers should be cleared on VM entry. */
618 bool fMdsClearOnVmEntry;
619 /** Set if MDS related buffers should be cleared on EMT scheduling. */
620 bool fMdsClearOnSched;
621 /** Set if host manages speculation control settings.
622 * @todo doesn't do anything ... */
623 bool fSpecCtrlByHost;
624 /** @} */
625
626 /** Set when we've finalized the VMX / SVM initialization in ring-3
627 * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */
628 bool fInitialized;
629
630 bool afAlignment2[6];
631
632 STAMCOUNTER StatTprPatchSuccess;
633 STAMCOUNTER StatTprPatchFailure;
634 STAMCOUNTER StatTprReplaceSuccessCr8;
635 STAMCOUNTER StatTprReplaceSuccessVmc;
636 STAMCOUNTER StatTprReplaceFailure;
637} HM;
638/** Pointer to HM VM instance data. */
639typedef HM *PHM;
640AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
641AssertCompileMemberAlignment(HM, vmx, 8);
642AssertCompileMemberAlignment(HM, svm, 8);
643AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
644AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */
645
646
647/**
648 * Per-VM ring-0 instance data for HM.
649 */
650typedef struct HMR0PERVM
651{
652 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
653 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
654 uint32_t cMaxResumeLoops;
655
656 /** Set if nested paging is enabled. */
657 bool fNestedPaging;
658 /** Set if we can support 64-bit guests or not. */
659 bool fAllow64BitGuests;
660 bool afAlignment1[1];
661
662 /** AMD-V specific data. */
663 struct HMR0SVMVM
664 {
665 /** Set if erratum 170 affects the AMD cpu. */
666 bool fAlwaysFlushTLB;
667 } svm;
668
669 /** VT-x specific data. */
670 struct HMR0VMXVM
671 {
672 /** Set if unrestricted guest execution is in use (real and protected mode
673 * without paging). */
674 bool fUnrestrictedGuest;
675 /** Set if the preemption timer is in use. */
676 bool fUsePreemptTimer;
677 /** Whether to use VMCS shadowing. */
678 bool fUseVmcsShadowing;
679 /** Set if Last Branch Record (LBR) is enabled. */
680 bool fLbr;
681 bool afAlignment2[3];
682
683 /** Set if VPID is supported (copy in HM::vmx::fVpidForRing3). */
684 bool fVpid;
685 /** Tagged-TLB flush type. */
686 VMXTLBFLUSHTYPE enmTlbFlushType;
687 /** Flush type to use for INVEPT. */
688 VMXTLBFLUSHEPT enmTlbFlushEpt;
689 /** Flush type to use for INVVPID. */
690 VMXTLBFLUSHVPID enmTlbFlushVpid;
691
692 /** The host LBR TOS (top-of-stack) MSR id. */
693 uint32_t idLbrTosMsr;
694
695 /** The first valid host LBR branch-from-IP stack range. */
696 uint32_t idLbrFromIpMsrFirst;
697 /** The last valid host LBR branch-from-IP stack range. */
698 uint32_t idLbrFromIpMsrLast;
699
700 /** The first valid host LBR branch-to-IP stack range. */
701 uint32_t idLbrToIpMsrFirst;
702 /** The last valid host LBR branch-to-IP stack range. */
703 uint32_t idLbrToIpMsrLast;
704
705 /** Pointer to the VMREAD bitmap. */
706 R0PTRTYPE(void *) pvVmreadBitmap;
707 /** Pointer to the VMWRITE bitmap. */
708 R0PTRTYPE(void *) pvVmwriteBitmap;
709
710 /** Pointer to the shadow VMCS read-only fields array. */
711 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields;
712 /** Pointer to the shadow VMCS read/write fields array. */
713 R0PTRTYPE(uint32_t *) paShadowVmcsFields;
714 /** Number of elements in the shadow VMCS read-only fields array. */
715 uint32_t cShadowVmcsRoFields;
716 /** Number of elements in the shadow VMCS read-write fields array. */
717 uint32_t cShadowVmcsFields;
718
719 /** Host-physical address of the APIC-access page. */
720 RTHCPHYS HCPhysApicAccess;
721 /** Host-physical address of the VMREAD bitmap. */
722 RTHCPHYS HCPhysVmreadBitmap;
723 /** Host-physical address of the VMWRITE bitmap. */
724 RTHCPHYS HCPhysVmwriteBitmap;
725
726#ifdef VBOX_WITH_CRASHDUMP_MAGIC
727 /** Host-physical address of the crash-dump scratch area. */
728 RTHCPHYS HCPhysScratch;
729 /** Pointer to the crash-dump scratch bitmap. */
730 R0PTRTYPE(uint8_t *) pbScratch;
731#endif
732
733 /** Ring-0 memory object for per-VM VMX structures. */
734 RTR0MEMOBJ hMemObj;
735 /** Virtual address of the APIC-access page (not used). */
736 R0PTRTYPE(uint8_t *) pbApicAccess;
737 } vmx;
738} HMR0PERVM;
739/** Pointer to HM's per-VM ring-0 instance data. */
740typedef HMR0PERVM *PHMR0PERVM;
741
742
743/** @addtogroup grp_hm_int_svm SVM Internal
744 * @{ */
745/** SVM VMRun function, see SVMR0VMRun(). */
746typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB));
747/** Pointer to a SVM VMRun function. */
748typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
749
750/**
751 * SVM nested-guest VMCB cache.
752 *
753 * Contains VMCB fields from the nested-guest VMCB before they're modified by
754 * SVM R0 code for hardware-assisted SVM execution of a nested-guest.
755 *
756 * A VMCB field needs to be cached when it needs to be modified for execution using
757 * hardware-assisted SVM and any of the following are true:
758 * - If the original field needs to be inspected during execution of the
759 * nested-guest or \#VMEXIT processing.
760 * - If the field is written back to memory on \#VMEXIT by the physical CPU.
761 *
762 * A VMCB field needs to be restored only when the field is written back to
763 * memory on \#VMEXIT by the physical CPU and thus would be visible to the
764 * guest.
765 *
766 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
767 * this structure.
768 */
769typedef struct SVMNESTEDVMCBCACHE
770{
771 /** Cache of CRX read intercepts. */
772 uint16_t u16InterceptRdCRx;
773 /** Cache of CRX write intercepts. */
774 uint16_t u16InterceptWrCRx;
775 /** Cache of DRX read intercepts. */
776 uint16_t u16InterceptRdDRx;
777 /** Cache of DRX write intercepts. */
778 uint16_t u16InterceptWrDRx;
779
780 /** Cache of the pause-filter threshold. */
781 uint16_t u16PauseFilterThreshold;
782 /** Cache of the pause-filter count. */
783 uint16_t u16PauseFilterCount;
784
785 /** Cache of exception intercepts. */
786 uint32_t u32InterceptXcpt;
787 /** Cache of control intercepts. */
788 uint64_t u64InterceptCtrl;
789
790 /** Cache of the TSC offset. */
791 uint64_t u64TSCOffset;
792
793 /** Cache of V_INTR_MASKING bit. */
794 bool fVIntrMasking;
795 /** Cache of the nested-paging bit. */
796 bool fNestedPaging;
797 /** Cache of the LBR virtualization bit. */
798 bool fLbrVirt;
799 /** Whether the VMCB is cached by HM. */
800 bool fCacheValid;
801 /** Alignment. */
802 bool afPadding0[4];
803} SVMNESTEDVMCBCACHE;
804/** Pointer to the SVMNESTEDVMCBCACHE structure. */
805typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
806/** Pointer to a const SVMNESTEDVMCBCACHE structure. */
807typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
808AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
809
810/** @} */
811
812
813/** @addtogroup grp_hm_int_vmx VMX Internal
814 * @{ */
815/**
816 * VMX VMCS information, shared.
817 *
818 * This structure provides information maintained for and during the executing of a
819 * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX.
820 *
821 * Note! The members here are ordered and aligned based on estimated frequency of
822 * usage and grouped to fit within a cache line in hot code paths. Even subtle
823 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
824 * care.
825 */
826typedef struct VMXVMCSINFOSHARED
827{
828 /** @name Real-mode emulation state.
829 * @{ */
830 /** Set if guest was executing in real mode (extra checks). */
831 bool fWasInRealMode;
832 /** Padding. */
833 bool afPadding0[7];
834 struct
835 {
836 X86DESCATTR AttrCS;
837 X86DESCATTR AttrDS;
838 X86DESCATTR AttrES;
839 X86DESCATTR AttrFS;
840 X86DESCATTR AttrGS;
841 X86DESCATTR AttrSS;
842 X86EFLAGS Eflags;
843 bool fRealOnV86Active;
844 bool afPadding1[3];
845 } RealMode;
846 /** @} */
847
848 /** @name LBR MSR data.
849 * @{ */
850 /** List of LastBranch-From-IP MSRs. */
851 uint64_t au64LbrFromIpMsr[32];
852 /** List of LastBranch-To-IP MSRs. */
853 uint64_t au64LbrToIpMsr[32];
854 /** The MSR containing the index to the most recent branch record. */
855 uint64_t u64LbrTosMsr;
856 /** @} */
857} VMXVMCSINFOSHARED;
858/** Pointer to a VMXVMCSINFOSHARED struct. */
859typedef VMXVMCSINFOSHARED *PVMXVMCSINFOSHARED;
860/** Pointer to a const VMXVMCSINFOSHARED struct. */
861typedef const VMXVMCSINFOSHARED *PCVMXVMCSINFOSHARED;
862AssertCompileSizeAlignment(VMXVMCSINFOSHARED, 8);
863
864
865/**
866 * VMX VMCS information, ring-0 only.
867 *
868 * This structure provides information maintained for and during the executing of a
869 * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX.
870 *
871 * Note! The members here are ordered and aligned based on estimated frequency of
872 * usage and grouped to fit within a cache line in hot code paths. Even subtle
873 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
874 * care.
875 */
876typedef struct VMXVMCSINFO
877{
878 /** Pointer to the bits we share with ring-3. */
879 R0PTRTYPE(PVMXVMCSINFOSHARED) pShared;
880
881 /** @name Auxiliary information.
882 * @{ */
883 /** Host-physical address of the EPTP. */
884 RTHCPHYS HCPhysEPTP;
885 /** The VMCS launch state, see VMX_V_VMCS_LAUNCH_STATE_XXX. */
886 uint32_t fVmcsState;
887 /** The VMCS launch state of the shadow VMCS, see VMX_V_VMCS_LAUNCH_STATE_XXX. */
888 uint32_t fShadowVmcsState;
889 /** The host CPU for which its state has been exported to this VMCS. */
890 RTCPUID idHostCpuState;
891 /** The host CPU on which we last executed this VMCS. */
892 RTCPUID idHostCpuExec;
893 /** Number of guest MSRs in the VM-entry MSR-load area. */
894 uint32_t cEntryMsrLoad;
895 /** Number of guest MSRs in the VM-exit MSR-store area. */
896 uint32_t cExitMsrStore;
897 /** Number of host MSRs in the VM-exit MSR-load area. */
898 uint32_t cExitMsrLoad;
899 /** @} */
900
901 /** @name Cache of execution related VMCS fields.
902 * @{ */
903 /** Pin-based VM-execution controls. */
904 uint32_t u32PinCtls;
905 /** Processor-based VM-execution controls. */
906 uint32_t u32ProcCtls;
907 /** Secondary processor-based VM-execution controls. */
908 uint32_t u32ProcCtls2;
909 /** VM-entry controls. */
910 uint32_t u32EntryCtls;
911 /** VM-exit controls. */
912 uint32_t u32ExitCtls;
913 /** Exception bitmap. */
914 uint32_t u32XcptBitmap;
915 /** Page-fault exception error-code mask. */
916 uint32_t u32XcptPFMask;
917 /** Page-fault exception error-code match. */
918 uint32_t u32XcptPFMatch;
919 /** Padding. */
920 uint32_t u32Alignment0;
921 /** TSC offset. */
922 uint64_t u64TscOffset;
923 /** VMCS link pointer. */
924 uint64_t u64VmcsLinkPtr;
925 /** CR0 guest/host mask. */
926 uint64_t u64Cr0Mask;
927 /** CR4 guest/host mask. */
928 uint64_t u64Cr4Mask;
929 /** Current VMX_VMCS_HOST_RIP value (only used in HMR0A.asm). */
930 uint64_t uHostRip;
931 /** Current VMX_VMCS_HOST_RSP value (only used in HMR0A.asm). */
932 uint64_t uHostRsp;
933 /** @} */
934
935 /** @name Host-virtual address of VMCS and related data structures.
936 * @{ */
937 /** The VMCS. */
938 R0PTRTYPE(void *) pvVmcs;
939 /** The shadow VMCS. */
940 R0PTRTYPE(void *) pvShadowVmcs;
941 /** The virtual-APIC page. */
942 R0PTRTYPE(uint8_t *) pbVirtApic;
943 /** The MSR bitmap. */
944 R0PTRTYPE(void *) pvMsrBitmap;
945 /** The VM-entry MSR-load area. */
946 R0PTRTYPE(void *) pvGuestMsrLoad;
947 /** The VM-exit MSR-store area. */
948 R0PTRTYPE(void *) pvGuestMsrStore;
949 /** The VM-exit MSR-load area. */
950 R0PTRTYPE(void *) pvHostMsrLoad;
951 /** @} */
952
953 /** @name Host-physical address of VMCS and related data structures.
954 * @{ */
955 /** The VMCS. */
956 RTHCPHYS HCPhysVmcs;
957 /** The shadow VMCS. */
958 RTHCPHYS HCPhysShadowVmcs;
959 /** The virtual APIC page. */
960 RTHCPHYS HCPhysVirtApic;
961 /** The MSR bitmap. */
962 RTHCPHYS HCPhysMsrBitmap;
963 /** The VM-entry MSR-load area. */
964 RTHCPHYS HCPhysGuestMsrLoad;
965 /** The VM-exit MSR-store area. */
966 RTHCPHYS HCPhysGuestMsrStore;
967 /** The VM-exit MSR-load area. */
968 RTHCPHYS HCPhysHostMsrLoad;
969 /** @} */
970
971 /** @name R0-memory objects address for VMCS and related data structures.
972 * @{ */
973 /** R0-memory object for VMCS and related data structures. */
974 RTR0MEMOBJ hMemObj;
975 /** @} */
976} VMXVMCSINFO;
977/** Pointer to a VMXVMCSINFOR0 struct. */
978typedef VMXVMCSINFO *PVMXVMCSINFO;
979/** Pointer to a const VMXVMCSINFO struct. */
980typedef const VMXVMCSINFO *PCVMXVMCSINFO;
981AssertCompileSizeAlignment(VMXVMCSINFO, 8);
982AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4);
983AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8);
984AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8);
985AssertCompileMemberAlignment(VMXVMCSINFO, pvShadowVmcs, 8);
986AssertCompileMemberAlignment(VMXVMCSINFO, pbVirtApic, 8);
987AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8);
988AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrLoad, 8);
989AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrStore, 8);
990AssertCompileMemberAlignment(VMXVMCSINFO, pvHostMsrLoad, 8);
991AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8);
992AssertCompileMemberAlignment(VMXVMCSINFO, hMemObj, 8);
993
994
995/** @name Host-state restoration flags.
996 * @note If you change these values don't forget to update the assembly
997 * defines as well!
998 * @{
999 */
1000#define VMX_RESTORE_HOST_SEL_DS RT_BIT(0)
1001#define VMX_RESTORE_HOST_SEL_ES RT_BIT(1)
1002#define VMX_RESTORE_HOST_SEL_FS RT_BIT(2)
1003#define VMX_RESTORE_HOST_SEL_GS RT_BIT(3)
1004#define VMX_RESTORE_HOST_SEL_TR RT_BIT(4)
1005#define VMX_RESTORE_HOST_GDTR RT_BIT(5)
1006#define VMX_RESTORE_HOST_IDTR RT_BIT(6)
1007#define VMX_RESTORE_HOST_GDT_READ_ONLY RT_BIT(7)
1008#define VMX_RESTORE_HOST_GDT_NEED_WRITABLE RT_BIT(8)
1009#define VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE RT_BIT(9)
1010/**
1011 * This _must_ be the top most bit, so that we can easily that that it and
1012 * something else is set w/o having to do two checks like this:
1013 * @code
1014 * if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
1015 * && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
1016 * @endcode
1017 * Instead we can then do:
1018 * @code
1019 * if (pVCpu->hm.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
1020 * @endcode
1021 */
1022#define VMX_RESTORE_HOST_REQUIRED RT_BIT(10)
1023/** @} */
1024
1025/**
1026 * Host-state restoration structure.
1027 *
1028 * This holds host-state fields that require manual restoration.
1029 * Assembly version found in HMInternal.mac (should be automatically verified).
1030 */
1031typedef struct VMXRESTOREHOST
1032{
1033 RTSEL uHostSelDS; /**< 0x00 */
1034 RTSEL uHostSelES; /**< 0x02 */
1035 RTSEL uHostSelFS; /**< 0x04 */
1036 X86XDTR64 HostGdtr; /**< 0x06 - should be aligned by its 64-bit member. */
1037 RTSEL uHostSelGS; /**< 0x10 */
1038 RTSEL uHostSelTR; /**< 0x12 */
1039 RTSEL uHostSelSS; /**< 0x14 - not restored, just for fetching */
1040 X86XDTR64 HostGdtrRw; /**< 0x16 - should be aligned by its 64-bit member. */
1041 RTSEL uHostSelCS; /**< 0x20 - not restored, just for fetching */
1042 uint8_t abPadding1[4]; /**< 0x22 */
1043 X86XDTR64 HostIdtr; /**< 0x26 - should be aligned by its 64-bit member. */
1044 uint64_t uHostFSBase; /**< 0x30 */
1045 uint64_t uHostGSBase; /**< 0x38 */
1046} VMXRESTOREHOST;
1047/** Pointer to VMXRESTOREHOST. */
1048typedef VMXRESTOREHOST *PVMXRESTOREHOST;
1049AssertCompileSize(X86XDTR64, 10);
1050AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr.uAddr, 0x08);
1051AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtrRw.uAddr, 0x18);
1052AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr.uAddr, 0x28);
1053AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 0x30);
1054AssertCompileSize(VMXRESTOREHOST, 64);
1055AssertCompileSizeAlignment(VMXRESTOREHOST, 8);
1056
1057/**
1058 * VMX StartVM function.
1059 *
1060 * @returns VBox status code (no informational stuff).
1061 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
1062 * @param pVCpu Pointer to the cross context per-CPU structure.
1063 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
1064 */
1065typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume));
1066/** Pointer to a VMX StartVM function. */
1067typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
1068/** @} */
1069
1070/**
1071 * HM VMCPU Instance data.
1072 *
1073 * Note! If you change members of this struct, make sure to check if the
1074 * assembly counterpart in HMInternal.mac needs to be updated as well.
1075 *
1076 * Note! The members here are ordered and aligned based on estimated frequency of
1077 * usage and grouped to fit within a cache line in hot code paths. Even subtle
1078 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
1079 * care.
1080 */
1081typedef struct HMCPU
1082{
1083 /** Set when the TLB has been checked until we return from the world switch. */
1084 bool volatile fCheckedTLBFlush;
1085 /** Set when we're using VT-x or AMD-V at that moment.
1086 * @todo r=bird: Misleading description. For AMD-V this will be set the first
1087 * time HMCanExecuteGuest() is called and only cleared again by
1088 * HMR3ResetCpu(). For VT-x it will be set by HMCanExecuteGuest when we
1089 * can execute something in VT-x mode, and cleared if we cannot.
1090 *
1091 * The field is much more about recording the last HMCanExecuteGuest
1092 * return value than anything about any "moment". */
1093 bool fActive;
1094
1095 /** Whether we should use the debug loop because of single stepping or special
1096 * debug breakpoints / events are armed. */
1097 bool fUseDebugLoop;
1098
1099 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
1100 bool fGIMTrapXcptUD;
1101 /** Whether \#GP needs to be intercepted for mesa driver workaround. */
1102 bool fTrapXcptGpForLovelyMesaDrv;
1103 /** Whether we're executing a single instruction. */
1104 bool fSingleInstruction;
1105
1106 bool afAlignment0[2];
1107
1108 /** An additional error code used for some gurus. */
1109 uint32_t u32HMError;
1110 /** The last exit-to-ring-3 reason. */
1111 int32_t rcLastExitToR3;
1112 /** CPU-context changed flags (see HM_CHANGED_xxx). */
1113 uint64_t fCtxChanged;
1114
1115 /** VT-x data. */
1116 struct HMCPUVMX
1117 {
1118 /** @name Guest information.
1119 * @{ */
1120 /** Guest VMCS information shared with ring-3. */
1121 VMXVMCSINFOSHARED VmcsInfo;
1122 /** Nested-guest VMCS information shared with ring-3. */
1123 VMXVMCSINFOSHARED VmcsInfoNstGst;
1124 /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
1125 * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs */
1126 bool fSwitchedToNstGstVmcsCopyForRing3;
1127 /** Whether the static guest VMCS controls has been merged with the
1128 * nested-guest VMCS controls. */
1129 bool fMergedNstGstCtls;
1130 /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
1131 bool fCopiedNstGstToShadowVmcs;
1132 /** Whether flushing the TLB is required due to switching to/from the
1133 * nested-guest. */
1134 bool fSwitchedNstGstFlushTlb;
1135 /** Alignment. */
1136 bool afAlignment0[4];
1137 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
1138 uint64_t u64GstMsrApicBase;
1139 /** @} */
1140
1141 /** @name Error reporting and diagnostics.
1142 * @{ */
1143 /** VT-x error-reporting (mainly for ring-3 propagation). */
1144 struct
1145 {
1146 RTCPUID idCurrentCpu;
1147 RTCPUID idEnteredCpu;
1148 RTHCPHYS HCPhysCurrentVmcs;
1149 uint32_t u32VmcsRev;
1150 uint32_t u32InstrError;
1151 uint32_t u32ExitReason;
1152 uint32_t u32GuestIntrState;
1153 } LastError;
1154 /** @} */
1155 } vmx;
1156
1157 /** SVM data. */
1158 struct HMCPUSVM
1159 {
1160 /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
1161 * does. This means intercepting \#UD to emulate the instructions in
1162 * long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
1163 * preserve the upper 32 bits written to them (AMD will ignore and discard). */
1164 bool fEmulateLongModeSysEnterExit;
1165 uint8_t au8Alignment0[7];
1166
1167 /** Cache of the nested-guest's VMCB fields that we modify in order to run the
1168 * nested-guest using AMD-V. This will be restored on \#VMEXIT. */
1169 SVMNESTEDVMCBCACHE NstGstVmcbCache;
1170 } svm;
1171
1172 /** Event injection state. */
1173 HMEVENT Event;
1174
1175 /** Current shadow paging mode for updating CR4.
1176 * @todo move later (@bugref{9217}). */
1177 PGMMODE enmShadowMode;
1178 uint32_t u32TemporaryPadding;
1179
1180 /** The PAE PDPEs used with Nested Paging (only valid when
1181 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
1182 X86PDPE aPdpes[4];
1183
1184 /* These two comes because they are accessed from assembly and we don't
1185 want to detail all the stats in the assembly version of this structure. */
1186 STAMCOUNTER StatVmxWriteHostRip;
1187 STAMCOUNTER StatVmxWriteHostRsp;
1188 STAMCOUNTER StatVmxVmLaunch;
1189 STAMCOUNTER StatVmxVmResume;
1190
1191 STAMPROFILEADV StatEntry;
1192 STAMPROFILEADV StatPreExit;
1193 STAMPROFILEADV StatExitHandling;
1194 STAMPROFILEADV StatExitIO;
1195 STAMPROFILEADV StatExitMovCRx;
1196 STAMPROFILEADV StatExitXcptNmi;
1197 STAMPROFILEADV StatExitVmentry;
1198 STAMPROFILEADV StatImportGuestState;
1199 STAMPROFILEADV StatExportGuestState;
1200 STAMPROFILEADV StatLoadGuestFpuState;
1201 STAMPROFILEADV StatInGC;
1202 STAMPROFILEADV StatPoke;
1203 STAMPROFILEADV StatSpinPoke;
1204 STAMPROFILEADV StatSpinPokeFailed;
1205
1206 STAMCOUNTER StatInjectInterrupt;
1207 STAMCOUNTER StatInjectXcpt;
1208 STAMCOUNTER StatInjectReflect;
1209 STAMCOUNTER StatInjectConvertDF;
1210 STAMCOUNTER StatInjectInterpret;
1211 STAMCOUNTER StatInjectReflectNPF;
1212
1213 STAMCOUNTER StatExitAll;
1214 STAMCOUNTER StatNestedExitAll;
1215 STAMCOUNTER StatExitShadowNM;
1216 STAMCOUNTER StatExitGuestNM;
1217 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
1218 STAMCOUNTER StatExitShadowPFEM;
1219 STAMCOUNTER StatExitGuestPF;
1220 STAMCOUNTER StatExitGuestUD;
1221 STAMCOUNTER StatExitGuestSS;
1222 STAMCOUNTER StatExitGuestNP;
1223 STAMCOUNTER StatExitGuestTS;
1224 STAMCOUNTER StatExitGuestOF;
1225 STAMCOUNTER StatExitGuestGP;
1226 STAMCOUNTER StatExitGuestDE;
1227 STAMCOUNTER StatExitGuestDF;
1228 STAMCOUNTER StatExitGuestBR;
1229 STAMCOUNTER StatExitGuestAC;
1230 STAMCOUNTER StatExitGuestDB;
1231 STAMCOUNTER StatExitGuestMF;
1232 STAMCOUNTER StatExitGuestBP;
1233 STAMCOUNTER StatExitGuestXF;
1234 STAMCOUNTER StatExitGuestXcpUnk;
1235 STAMCOUNTER StatExitDRxWrite;
1236 STAMCOUNTER StatExitDRxRead;
1237 STAMCOUNTER StatExitCR0Read;
1238 STAMCOUNTER StatExitCR2Read;
1239 STAMCOUNTER StatExitCR3Read;
1240 STAMCOUNTER StatExitCR4Read;
1241 STAMCOUNTER StatExitCR8Read;
1242 STAMCOUNTER StatExitCR0Write;
1243 STAMCOUNTER StatExitCR2Write;
1244 STAMCOUNTER StatExitCR3Write;
1245 STAMCOUNTER StatExitCR4Write;
1246 STAMCOUNTER StatExitCR8Write;
1247 STAMCOUNTER StatExitRdmsr;
1248 STAMCOUNTER StatExitWrmsr;
1249 STAMCOUNTER StatExitClts;
1250 STAMCOUNTER StatExitXdtrAccess;
1251 STAMCOUNTER StatExitLmsw;
1252 STAMCOUNTER StatExitIOWrite;
1253 STAMCOUNTER StatExitIORead;
1254 STAMCOUNTER StatExitIOStringWrite;
1255 STAMCOUNTER StatExitIOStringRead;
1256 STAMCOUNTER StatExitIntWindow;
1257 STAMCOUNTER StatExitExtInt;
1258 STAMCOUNTER StatExitHostNmiInGC;
1259 STAMCOUNTER StatExitHostNmiInGCIpi;
1260 STAMCOUNTER StatExitPreemptTimer;
1261 STAMCOUNTER StatExitTprBelowThreshold;
1262 STAMCOUNTER StatExitTaskSwitch;
1263 STAMCOUNTER StatExitApicAccess;
1264 STAMCOUNTER StatExitReasonNpf;
1265
1266 STAMCOUNTER StatNestedExitReasonNpf;
1267
1268 STAMCOUNTER StatFlushPage;
1269 STAMCOUNTER StatFlushPageManual;
1270 STAMCOUNTER StatFlushPhysPageManual;
1271 STAMCOUNTER StatFlushTlb;
1272 STAMCOUNTER StatFlushTlbNstGst;
1273 STAMCOUNTER StatFlushTlbManual;
1274 STAMCOUNTER StatFlushTlbWorldSwitch;
1275 STAMCOUNTER StatNoFlushTlbWorldSwitch;
1276 STAMCOUNTER StatFlushEntire;
1277 STAMCOUNTER StatFlushAsid;
1278 STAMCOUNTER StatFlushNestedPaging;
1279 STAMCOUNTER StatFlushTlbInvlpgVirt;
1280 STAMCOUNTER StatFlushTlbInvlpgPhys;
1281 STAMCOUNTER StatTlbShootdown;
1282 STAMCOUNTER StatTlbShootdownFlush;
1283
1284 STAMCOUNTER StatSwitchPendingHostIrq;
1285 STAMCOUNTER StatSwitchTprMaskedIrq;
1286 STAMCOUNTER StatSwitchGuestIrq;
1287 STAMCOUNTER StatSwitchHmToR3FF;
1288 STAMCOUNTER StatSwitchVmReq;
1289 STAMCOUNTER StatSwitchPgmPoolFlush;
1290 STAMCOUNTER StatSwitchDma;
1291 STAMCOUNTER StatSwitchExitToR3;
1292 STAMCOUNTER StatSwitchLongJmpToR3;
1293 STAMCOUNTER StatSwitchMaxResumeLoops;
1294 STAMCOUNTER StatSwitchHltToR3;
1295 STAMCOUNTER StatSwitchApicAccessToR3;
1296 STAMCOUNTER StatSwitchPreempt;
1297 STAMCOUNTER StatSwitchNstGstVmexit;
1298
1299 STAMCOUNTER StatTscParavirt;
1300 STAMCOUNTER StatTscOffset;
1301 STAMCOUNTER StatTscIntercept;
1302
1303 STAMCOUNTER StatDRxArmed;
1304 STAMCOUNTER StatDRxContextSwitch;
1305 STAMCOUNTER StatDRxIoCheck;
1306
1307 STAMCOUNTER StatExportMinimal;
1308 STAMCOUNTER StatExportFull;
1309 STAMCOUNTER StatLoadGuestFpu;
1310 STAMCOUNTER StatExportHostState;
1311
1312 STAMCOUNTER StatVmxCheckBadRmSelBase;
1313 STAMCOUNTER StatVmxCheckBadRmSelLimit;
1314 STAMCOUNTER StatVmxCheckBadRmSelAttr;
1315 STAMCOUNTER StatVmxCheckBadV86SelBase;
1316 STAMCOUNTER StatVmxCheckBadV86SelLimit;
1317 STAMCOUNTER StatVmxCheckBadV86SelAttr;
1318 STAMCOUNTER StatVmxCheckRmOk;
1319 STAMCOUNTER StatVmxCheckBadSel;
1320 STAMCOUNTER StatVmxCheckBadRpl;
1321 STAMCOUNTER StatVmxCheckPmOk;
1322
1323#ifdef VBOX_WITH_STATISTICS
1324 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
1325 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
1326 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
1327 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
1328 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedXcpts;
1329 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedXcptsR0;
1330 R3PTRTYPE(PSTAMCOUNTER) paStatNestedExitReason;
1331 R0PTRTYPE(PSTAMCOUNTER) paStatNestedExitReasonR0;
1332#endif
1333#ifdef HM_PROFILE_EXIT_DISPATCH
1334 STAMPROFILEADV StatExitDispatch;
1335#endif
1336} HMCPU;
1337/** Pointer to HM VMCPU instance data. */
1338typedef HMCPU *PHMCPU;
1339AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4);
1340AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
1341AssertCompileMemberAlignment(HMCPU, vmx, 8);
1342AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfo, 8);
1343AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfoNstGst, 8);
1344AssertCompileMemberAlignment(HMCPU, svm, 8);
1345AssertCompileMemberAlignment(HMCPU, Event, 8);
1346
1347
1348/**
1349 * HM per-VCpu ring-0 only instance data.
1350 */
1351typedef struct HMR0PERVCPU
1352{
1353 /** World switch exit counter. */
1354 uint32_t volatile cWorldSwitchExits;
1355 /** TLB flush count. */
1356 uint32_t cTlbFlushes;
1357 /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */
1358 RTCPUID idLastCpu;
1359 /** The CPU ID of the CPU currently owning the VMCS. Set in
1360 * HMR0Enter and cleared in HMR0Leave. */
1361 RTCPUID idEnteredCpu;
1362 /** Current ASID in use by the VM. */
1363 uint32_t uCurrentAsid;
1364
1365 /** Set if we need to flush the TLB during the world switch. */
1366 bool fForceTLBFlush;
1367 /** Whether we've completed the inner HM leave function. */
1368 bool fLeaveDone;
1369 /** Whether we're using the hyper DR7 or guest DR7. */
1370 bool fUsingHyperDR7;
1371 /** Whether we are currently executing in the debug loop.
1372 * Mainly for assertions. */
1373 bool fUsingDebugLoop;
1374 /** Set if we using the debug loop and wish to intercept RDTSC. */
1375 bool fDebugWantRdTscExit;
1376 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
1377 * execution. */
1378 bool fLoadSaveGuestXcr0;
1379 /** Set if we need to clear the trap flag because of single stepping. */
1380 bool fClearTrapFlag;
1381
1382 bool afPadding1[1];
1383 /** World switcher flags (HM_WSF_XXX - was CPUMCTX::fWorldSwitcher in 6.1). */
1384 uint32_t fWorldSwitcher;
1385
1386 /** VT-x data. */
1387 struct HMR0CPUVMX
1388 {
1389 /** Ring-0 pointer to the hardware-assisted VMX execution function. */
1390 PFNHMVMXSTARTVM pfnStartVm;
1391
1392 /** @name Guest information.
1393 * @{ */
1394 /** Guest VMCS information. */
1395 VMXVMCSINFO VmcsInfo;
1396 /** Nested-guest VMCS information. */
1397 VMXVMCSINFO VmcsInfoNstGst;
1398 /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
1399 * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3 */
1400 bool fSwitchedToNstGstVmcs;
1401 bool afAlignment0[7];
1402 /** @} */
1403
1404 /** @name Host information.
1405 * @{ */
1406 /** Host LSTAR MSR to restore lazily while leaving VT-x. */
1407 uint64_t u64HostMsrLStar;
1408 /** Host STAR MSR to restore lazily while leaving VT-x. */
1409 uint64_t u64HostMsrStar;
1410 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
1411 uint64_t u64HostMsrSfMask;
1412 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
1413 uint64_t u64HostMsrKernelGsBase;
1414 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
1415 uint32_t fLazyMsrs;
1416 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
1417 bool fUpdatedHostAutoMsrs;
1418 /** Alignment. */
1419 uint8_t au8Alignment0[3];
1420 /** Which host-state bits to restore before being preempted, see
1421 * VMX_RESTORE_HOST_XXX. */
1422 uint32_t fRestoreHostFlags;
1423 /** Alignment. */
1424 uint32_t u32Alignment0;
1425 /** The host-state restoration structure. */
1426 VMXRESTOREHOST RestoreHost;
1427 /** @} */
1428 } vmx;
1429
1430 /** SVM data. */
1431 struct HMR0CPUSVM
1432 {
1433 /** Ring 0 handlers for VT-x. */
1434 PFNHMSVMVMRUN pfnVMRun;
1435
1436 /** Physical address of the host VMCB which holds additional host-state. */
1437 RTHCPHYS HCPhysVmcbHost;
1438 /** R0 memory object for the host VMCB which holds additional host-state. */
1439 RTR0MEMOBJ hMemObjVmcbHost;
1440
1441 /** Physical address of the guest VMCB. */
1442 RTHCPHYS HCPhysVmcb;
1443 /** R0 memory object for the guest VMCB. */
1444 RTR0MEMOBJ hMemObjVmcb;
1445 /** Pointer to the guest VMCB. */
1446 R0PTRTYPE(PSVMVMCB) pVmcb;
1447
1448 /** Physical address of the MSR bitmap (8 KB). */
1449 RTHCPHYS HCPhysMsrBitmap;
1450 /** R0 memory object for the MSR bitmap (8 KB). */
1451 RTR0MEMOBJ hMemObjMsrBitmap;
1452 /** Pointer to the MSR bitmap. */
1453 R0PTRTYPE(void *) pvMsrBitmap;
1454
1455 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
1456 * we should check if the VTPR changed on every VM-exit. */
1457 bool fSyncVTpr;
1458 bool afAlignment[7];
1459
1460 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
1461 uint64_t u64HostTscAux;
1462
1463 /** For saving stack space, the disassembler state is allocated here
1464 * instead of on the stack. */
1465 DISCPUSTATE DisState;
1466 } svm;
1467} HMR0PERVCPU;
1468/** Pointer to HM ring-0 VMCPU instance data. */
1469typedef HMR0PERVCPU *PHMR0PERVCPU;
1470AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
1471AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush, 4);
1472AssertCompileMemberAlignment(HMR0PERVCPU, vmx.RestoreHost, 8);
1473
1474
1475/** @name HM_WSF_XXX - @bugref{9453}, @bugref{9087}
1476 * @{ */
1477/** Touch IA32_PRED_CMD.IBPB on VM exit. */
1478#define HM_WSF_IBPB_EXIT RT_BIT_32(0)
1479/** Touch IA32_PRED_CMD.IBPB on VM entry. */
1480#define HM_WSF_IBPB_ENTRY RT_BIT_32(1)
1481/** Touch IA32_FLUSH_CMD.L1D on VM entry. */
1482#define HM_WSF_L1D_ENTRY RT_BIT_32(2)
1483/** Flush MDS buffers on VM entry. */
1484#define HM_WSF_MDS_ENTRY RT_BIT_32(3)
1485
1486/** Touch IA32_FLUSH_CMD.L1D on VM scheduling. */
1487#define HM_WSF_L1D_SCHED RT_BIT_32(16)
1488/** Flush MDS buffers on VM scheduling. */
1489#define HM_WSF_MDS_SCHED RT_BIT_32(17)
1490/** @} */
1491
1492
1493#ifdef IN_RING0
1494extern bool g_fHmVmxSupported;
1495extern uint32_t g_fHmHostKernelFeatures;
1496extern uint32_t g_uHmMaxAsid;
1497extern bool g_fHmVmxUsePreemptTimer;
1498extern uint8_t g_cHmVmxPreemptTimerShift;
1499extern bool g_fHmVmxSupportsVmcsEfer;
1500extern uint64_t g_uHmVmxHostCr4;
1501extern uint64_t g_uHmVmxHostMsrEfer;
1502extern uint64_t g_uHmVmxHostSmmMonitorCtl;
1503extern bool g_fHmSvmSupported;
1504extern uint32_t g_uHmSvmRev;
1505extern uint32_t g_fHmSvmFeatures;
1506
1507extern SUPHWVIRTMSRS g_HmMsrs;
1508
1509
1510VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
1511VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPUCC pVCpu);
1512
1513# ifdef VBOX_STRICT
1514# define HM_DUMP_REG_FLAGS_GPRS RT_BIT(0)
1515# define HM_DUMP_REG_FLAGS_FPU RT_BIT(1)
1516# define HM_DUMP_REG_FLAGS_MSRS RT_BIT(2)
1517# define HM_DUMP_REG_FLAGS_ALL (HM_DUMP_REG_FLAGS_GPRS | HM_DUMP_REG_FLAGS_FPU | HM_DUMP_REG_FLAGS_MSRS)
1518
1519VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPUCC pVCpu, uint32_t fFlags);
1520VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1521# endif
1522
1523DECLASM(void) hmR0MdsClear(void);
1524#endif /* IN_RING0 */
1525
1526
1527/** @addtogroup grp_hm_int_svm SVM Internal
1528 * @{ */
1529VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCC pVM, PVMCPUCC pVCpu);
1530
1531/**
1532 * Prepares for and executes VMRUN (64-bit register context).
1533 *
1534 * @returns VBox status code (no informational stuff).
1535 * @param pVM The cross context VM structure. (Not used.)
1536 * @param pVCpu The cross context virtual CPU structure.
1537 * @param HCPhyspVMCB Physical address of the VMCB.
1538 *
1539 * @remarks With spectre mitigations and the usual need for speed (/ micro
1540 * optimizations), we have a bunch of variations of this code depending
1541 * on a few precoditions. In release builds, the code is entirely
1542 * without conditionals. Debug builds have a couple of assertions that
1543 * shouldn't ever be triggered.
1544 *
1545 * @{
1546 */
1547DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1548DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1549DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1550DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1551DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1552DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1553DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1554DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB);
1555/** @} */
1556
1557/** @} */
1558
1559
1560/** @addtogroup grp_hm_int_vmx VMX Internal
1561 * @{ */
1562VMM_INT_DECL(PVMXVMCSINFOSHARED) hmGetVmxActiveVmcsInfoShared(PVMCPUCC pVCpu);
1563
1564/**
1565 * Used on platforms with poor inline assembly support to retrieve all the
1566 * info from the CPU and put it in the @a pRestoreHost structure.
1567 */
1568DECLASM(void) hmR0VmxExportHostSegmentRegsAsmHlp(PVMXRESTOREHOST pRestoreHost, bool fHaveFsGsBase);
1569
1570/**
1571 * Restores some host-state fields that need not be done on every VM-exit.
1572 *
1573 * @returns VBox status code.
1574 * @param fRestoreHostFlags Flags of which host registers needs to be
1575 * restored.
1576 * @param pRestoreHost Pointer to the host-restore structure.
1577 */
1578DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
1579
1580/**
1581 * VMX StartVM functions.
1582 *
1583 * @returns VBox status code (no informational stuff).
1584 * @param pVmcsInfo Pointer to the VMCS info (for cached host RIP and RSP).
1585 * @param pVCpu Pointer to the cross context per-CPU structure of the
1586 * calling EMT.
1587 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
1588 *
1589 * @remarks With spectre mitigations and the usual need for speed (/ micro
1590 * optimizations), we have a bunch of variations of this code depending
1591 * on a few precoditions. In release builds, the code is entirely
1592 * without conditionals. Debug builds have a couple of assertions that
1593 * shouldn't ever be triggered.
1594 *
1595 * @{
1596 */
1597DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1598DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1599DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1600DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1601DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1602DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1603DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1604DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1605DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1606DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1607DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1608DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1609DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1610DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1611DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1612DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1613DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1614DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1615DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1616DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1617DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1618DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1619DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1620DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1621DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1622DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1623DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1624DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1625DECLASM(int) hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1626DECLASM(int) hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1627DECLASM(int) hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1628DECLASM(int) hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume);
1629/** @} */
1630
1631/** @} */
1632
1633/** @} */
1634
1635RT_C_DECLS_END
1636
1637#endif /* !VMM_INCLUDED_SRC_include_HMInternal_h */
1638
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette