VirtualBox

source: vbox/trunk/include/VBox/vmm/hm.h@ 48565

Last change on this file since 48565 was 48565, checked in by vboxsync, 12 years ago

VMM/HM: Added total VM-exits STAM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.2 KB
Line 
1/** @file
2 * HM - Intel/AMD VM Hardware Assisted Virtualization Manager (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2013 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.215389.xyz. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_hm_h
27#define ___VBox_vmm_hm_h
28
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/cpum.h>
31#include <VBox/vmm/vmm.h>
32#include <iprt/mp.h>
33
34
35/** @defgroup grp_hm The VM Hardware Manager API
36 * @{
37 */
38
39RT_C_DECLS_BEGIN
40
41/** @def VMCPU_HMCF_CLEAR
42 * Clears a HM-context flag for the given VCPU.
43 *
44 * @param pVCpu Pointer to the VMCPU.
45 * @param fFlag The flag to clear.
46 */
47#define VMCPU_HMCF_CLEAR(pVCpu, fFlag) ((pVCpu)->hm.s.fContextUseFlags &= ~(fFlag))
48
49/** @def VMCPU_FF_SET
50 * Sets a HM-context flag for the given VCPU.
51 *
52 * @param pVCpu Pointer to the VMCPU.
53 * @param fFlag The flag to set.
54 */
55#define VMCPU_HMCF_SET(pVCpu, fFlag) ((pVCpu)->hm.s.fContextUseFlags |= (fFlag))
56
57/** @def VMCPU_HMCF_IS_SET
58 * Checks if -only- the specified HM-context flag is set and nothing else.
59 *
60 * @param pVCpu Pointer to the VMCPU.
61 * @param fFlag The flag to check.
62 */
63#define VMCPU_HMCF_IS_SET(pVCpu, fFlag) (((pVCpu)->hm.s.fContextUseFlags & (fFlag)) == (fFlag))
64
65/** @def VMCPU_HMCF_IS_PENDING
66 * Checks if a HM-context flags is pending.
67 *
68 * @param pVCpu Pointer to the VMCPU.
69 * @param fFlags The flags to check for.
70 */
71#define VMCPU_HMCF_IS_PENDING(pVCpu, fFlags) RT_BOOL((pVCpu)->hm.s.fContextUseFlags & (fFlags))
72
73/** @def VMCPU_HMCF_RESET_TO
74 * Resets the HM-context flags to the specified value.
75 *
76 * @param pVCpu Pointer to the VMCPU.
77 * @param fFlags The reset value.
78 */
79#define VMCPU_HMCF_RESET_TO(pVCpu, fFlags) ((pVCpu)->hm.s.fContextUseFlags = (fFlags))
80
81/**
82 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
83 *
84 * @retval @c true if used.
85 * @retval @c false if software virtualization (raw-mode) is used.
86 *
87 * @param a_pVM The cross context VM structure.
88 * @sa HMIsEnabledNotMacro, HMR3IsEnabled
89 * @internal
90 */
91#if defined(VBOX_STRICT) && defined(IN_RING3)
92# define HMIsEnabled(a_pVM) HMIsEnabledNotMacro(a_pVM)
93#else
94# define HMIsEnabled(a_pVM) ((a_pVM)->fHMEnabled)
95#endif
96
97/**
98 * Checks whether raw-mode context is required for any purpose.
99 *
100 * @retval @c true if required either by raw-mode itself or by HM for doing
101 * switching the cpu to 64-bit mode.
102 * @retval @c false if not required.
103 *
104 * @param a_pVM The cross context VM structure.
105 * @internal
106 */
107#if HC_ARCH_BITS == 64
108# define HMIsRawModeCtxNeeded(a_pVM) (!HMIsEnabled(a_pVM))
109#else
110# define HMIsRawModeCtxNeeded(a_pVM) (!HMIsEnabled(a_pVM) || (a_pVM)->fHMNeedRawModeCtx)
111#endif
112
113 /**
114 * Check if the current CPU state is valid for emulating IO blocks in the recompiler
115 *
116 * @returns boolean
117 * @param a_pVCpu Pointer to the shared virtual CPU structure.
118 * @internal
119 */
120#define HMCanEmulateIoBlock(a_pVCpu) (!CPUMIsGuestInPagedProtectedMode(a_pVCpu))
121
122 /**
123 * Check if the current CPU state is valid for emulating IO blocks in the recompiler
124 *
125 * @returns boolean
126 * @param a_pCtx Pointer to the CPU context (within PVM).
127 * @internal
128 */
129#define HMCanEmulateIoBlockEx(a_pCtx) (!CPUMIsGuestInPagedProtectedModeEx(a_pCtx))
130
131/**
132 * Checks whether we're in the special hardware virtualization context.
133 * @returns true / false.
134 * @param a_pVCpu The caller's cross context virtual CPU structure.
135 * @thread EMT
136 */
137#ifdef IN_RING0
138# define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
139#else
140# define HMIsInHwVirtCtx(a_pVCpu) (false)
141#endif
142
143/**
144 * Checks whether we're in the special hardware virtualization context and we
145 * cannot perform long jump without guru meditating and possibly messing up the
146 * host and/or guest state.
147 *
148 * This is after we've turned interrupts off and such.
149 *
150 * @returns true / false.
151 * @param a_pVCpu The caller's cross context virtual CPU structure.
152 * @thread EMT
153 */
154#ifdef IN_RING0
155# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_EXEC)
156#else
157# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (false)
158#endif
159
160/**
161 * 64-bit raw-mode (intermediate memory context) operations.
162 *
163 * These are special hypervisor eip values used when running 64-bit guests on
164 * 32-bit hosts. Each operation corresponds to a routine.
165 *
166 * @note Duplicated in the assembly code!
167 */
168typedef enum HM64ON32OP
169{
170 HM64ON32OP_INVALID = 0,
171 HM64ON32OP_VMXRCStartVM64,
172 HM64ON32OP_SVMRCVMRun64,
173 HM64ON32OP_HMRCSaveGuestFPU64,
174 HM64ON32OP_HMRCSaveGuestDebug64,
175 HM64ON32OP_HMRCTestSwitcher64,
176 HM64ON32OP_END,
177 HM64ON32OP_32BIT_HACK = 0x7fffffff
178} HM64ON32OP;
179
180VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM);
181VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
182VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM);
183VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu);
184VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
185VMM_INT_DECL(bool) HMSetSingleInstruction(PVMCPU pVCpu, bool fEnable);
186
187#ifndef IN_RC
188VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu);
189VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM);
190VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt);
191VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys);
192VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM);
193VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM);
194#else /* Nops in RC: */
195# define HMFlushTLB(pVCpu) do { } while (0)
196# define HMIsNestedPagingActive(pVM) false
197# define HMFlushTLBOnAllVCpus(pVM) do { } while (0)
198#endif
199
200#ifdef IN_RING0
201/** @defgroup grp_hm_r0 The VM Hardware Manager API
202 * @ingroup grp_hm
203 * @{
204 */
205VMMR0_INT_DECL(int) HMR0Init(void);
206VMMR0_INT_DECL(int) HMR0Term(void);
207VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM);
208VMMR0_INT_DECL(int) HMR0TermVM(PVM pVM);
209VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVM pVM);
210VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled);
211VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled);
212
213VMMR0_INT_DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext,
214 unsigned uPort, unsigned uAndVal, unsigned cbSize);
215VMMR0_INT_DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext,
216 unsigned uPort, unsigned uAndVal, unsigned cbSize);
217
218/** @} */
219#endif /* IN_RING0 */
220
221
222#ifdef IN_RING3
223/** @defgroup grp_hm_r3 The VM Hardware Manager API
224 * @ingroup grp_hm
225 * @{
226 */
227VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM);
228VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM);
229VMMR3DECL(bool) HMR3IsVpidActive(PUVM pVUM);
230VMMR3DECL(bool) HMR3IsUXActive(PUVM pVUM);
231VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM);
232VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM);
233
234VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu);
235VMMR3_INT_DECL(int) HMR3Init(PVM pVM);
236VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
237VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM);
238VMMR3_INT_DECL(int) HMR3Term(PVM pVM);
239VMMR3_INT_DECL(void) HMR3Reset(PVM pVM);
240VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu);
241VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode);
242VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx);
243VMMR3_INT_DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu);
244VMMR3_INT_DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu);
245VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu);
246VMMR3_INT_DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
247VMMR3_INT_DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx);
248VMMR3_INT_DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
249VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
250VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
251VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
252VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx);
253VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM);
254
255/** @} */
256#endif /* IN_RING3 */
257
258#ifdef IN_RING0
259/** @addtogroup grp_hm_r0
260 * @{
261 */
262/** Disables preemption if required. */
263# define HM_DISABLE_PREEMPT_IF_NEEDED() \
264 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
265 bool fPreemptDisabledInternal = false; \
266 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) \
267 { \
268 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); \
269 RTThreadPreemptDisable(&PreemptStateInternal); \
270 fPreemptDisabledInternal = true; \
271 }
272
273/** Restores preemption if previously disabled by HM_DISABLE_PREEMPT(). */
274# define HM_RESTORE_PREEMPT_IF_NEEDED() \
275 do \
276 { \
277 if (fPreemptDisabledInternal) \
278 RTThreadPreemptRestore(&PreemptStateInternal); \
279 } while (0)
280
281VMMR0_INT_DECL(int) HMR0SetupVM(PVM pVM);
282VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu);
283VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu);
284VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu);
285VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu);
286VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu);
287VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
288VMMR0_INT_DECL(bool) HMR0SuspendPending(void);
289
290# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
291VMMR0_INT_DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
292VMMR0_INT_DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
293VMMR0_INT_DECL(int) HMR0TestSwitcher3264(PVM pVM);
294# endif
295
296/** @} */
297#endif /* IN_RING0 */
298
299
300/** @} */
301RT_C_DECLS_END
302
303
304#endif
305
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette