VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 73606

Last change on this file since 73606 was 73606, checked in by vboxsync, 7 years ago

VMM: Nested VMX: bugref:9180 Various bits:

  • IEM: Started VMXON, VMXOFF implementation, use IEM_OPCODE_GET_NEXT_RM.
  • IEM: Fixed INVPCID C impl, removed unused IEMExecDecodedInvpcid.
  • IEM: Updated iemCImpl_load_CrX to check for CR0/CR4 fixed bits in VMX.
  • IEM: Update offModRm to reset/re-initialize where needed.
  • CPUM: Added VMX root, non-root mode and other bits and updated a few places where they're used.
  • HM: Started adding fine-grained VMX instruction failure diagnostics.
  • HM: Made VM instruction error an enum.
  • HM: Added HMVMXAll.cpp for all context VMX code.
  • Ensure building with VBOX_WITH_NESTED_HWVIRT_[SVM|VMX] does the right thing based on host CPU.
  • CPUM: Added dumping of nested-VMX CPUMCTX state.
  • HMVMXR0: Added memory operand decoding.
  • HMVMXR0: VMX instr. privilege checks (CR0/CR4 read shadows are not consulted, so we need to do them)
  • HM: Added some more bit-field representaions.
  • Recompiler: Refuse to run when in nested-VMX guest code.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.7 KB
Line 
1/* $Id: HMAll.cpp 73606 2018-08-10 07:38:56Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/hm_vmx.h>
29#include <VBox/vmm/hm_svm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <iprt/param.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/thread.h>
37#include <iprt/x86.h>
38#include <iprt/asm-amd64-x86.h>
39
40
41/**
42 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
43 *
44 * @retval true if used.
45 * @retval false if software virtualization (raw-mode) is used.
46 * @param pVM The cross context VM structure.
47 * @sa HMIsEnabled, HMR3IsEnabled
48 * @internal
49 */
50VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
51{
52 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
53 return pVM->fHMEnabled;
54}
55
56
57/**
58 * Queues a guest page for invalidation.
59 *
60 * @returns VBox status code.
61 * @param pVCpu The cross context virtual CPU structure.
62 * @param GCVirt Page to invalidate.
63 */
64static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
65{
66 /* Nothing to do if a TLB flush is already pending */
67 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
68 return;
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
70 NOREF(GCVirt);
71}
72
73
74/**
75 * Invalidates a guest page.
76 *
77 * @returns VBox status code.
78 * @param pVCpu The cross context virtual CPU structure.
79 * @param GCVirt Page to invalidate.
80 */
81VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
82{
83 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
84#ifdef IN_RING0
85 return HMR0InvalidatePage(pVCpu, GCVirt);
86#else
87 hmQueueInvlPage(pVCpu, GCVirt);
88 return VINF_SUCCESS;
89#endif
90}
91
92
93#ifdef IN_RING0
94
95/**
96 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
97 *
98 */
99static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
100{
101 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
102 return;
103}
104
105
106/**
107 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
108 */
109static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
110{
111 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
112
113 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
114 int rc = RTMpPokeCpu(idHostCpu);
115 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
116
117 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
118 back to a less efficient implementation (broadcast). */
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
122 /* synchronous. */
123 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
124 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
125 }
126 else
127 {
128 if (rc == VINF_SUCCESS)
129 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
130 else
131 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
132
133/** @todo If more than one CPU is going to be poked, we could optimize this
134 * operation by poking them first and wait afterwards. Would require
135 * recording who to poke and their current cWorldSwitchExits values,
136 * that's something not suitable for stack... So, pVCpu->hm.s.something
137 * then. */
138 /* Spin until the VCPU has switched back (poking is async). */
139 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
140 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
141 ASMNopPause();
142
143 if (rc == VINF_SUCCESS)
144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
145 else
146 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
147 }
148}
149
150#endif /* IN_RING0 */
151#ifndef IN_RC
152
153/**
154 * Flushes the guest TLB.
155 *
156 * @returns VBox status code.
157 * @param pVCpu The cross context virtual CPU structure.
158 */
159VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
160{
161 LogFlow(("HMFlushTLB\n"));
162
163 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
164 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
165 return VINF_SUCCESS;
166}
167
168/**
169 * Poke an EMT so it can perform the appropriate TLB shootdowns.
170 *
171 * @param pVCpu The cross context virtual CPU structure of the
172 * EMT poke.
173 * @param fAccountFlushStat Whether to account the call to
174 * StatTlbShootdownFlush or StatTlbShootdown.
175 */
176static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
177{
178 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
179 {
180 if (fAccountFlushStat)
181 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
182 else
183 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
184#ifdef IN_RING0
185 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
186 if (idHostCpu != NIL_RTCPUID)
187 hmR0PokeCpu(pVCpu, idHostCpu);
188#else
189 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
190#endif
191 }
192 else
193 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
194}
195
196
197/**
198 * Invalidates a guest page on all VCPUs.
199 *
200 * @returns VBox status code.
201 * @param pVM The cross context VM structure.
202 * @param GCVirt Page to invalidate.
203 */
204VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
205{
206 /*
207 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
208 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
209 *
210 * This is the reason why we do not care about thread preemption here and just
211 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
212 */
213 VMCPUID idCurCpu = VMMGetCpuId(pVM);
214 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
215
216 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
217 {
218 PVMCPU pVCpu = &pVM->aCpus[idCpu];
219
220 /* Nothing to do if a TLB flush is already pending; the VCPU should
221 have already been poked if it were active. */
222 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
223 continue;
224
225 if (pVCpu->idCpu == idCurCpu)
226 HMInvalidatePage(pVCpu, GCVirt);
227 else
228 {
229 hmQueueInvlPage(pVCpu, GCVirt);
230 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
231 }
232 }
233
234 return VINF_SUCCESS;
235}
236
237
238/**
239 * Flush the TLBs of all VCPUs.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 */
244VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
245{
246 if (pVM->cCpus == 1)
247 return HMFlushTLB(&pVM->aCpus[0]);
248
249 VMCPUID idThisCpu = VMMGetCpuId(pVM);
250
251 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
252
253 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
254 {
255 PVMCPU pVCpu = &pVM->aCpus[idCpu];
256
257 /* Nothing to do if a TLB flush is already pending; the VCPU should
258 have already been poked if it were active. */
259 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
260 {
261 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
262 if (idThisCpu != idCpu)
263 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
264 }
265 }
266
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * Invalidates a guest page by physical address.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 * @param GCPhys Page to invalidate.
277 *
278 * @remarks Assumes the current instruction references this physical page
279 * though a virtual address!
280 */
281VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
282{
283 if (!HMIsNestedPagingActive(pVM))
284 return VINF_SUCCESS;
285
286 /*
287 * AMD-V: Doesn't support invalidation with guest physical addresses.
288 *
289 * VT-x: Doesn't support invalidation with guest physical addresses.
290 * INVVPID instruction takes only a linear address while invept only flushes by EPT
291 * not individual addresses.
292 *
293 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
294 */
295 RT_NOREF(GCPhys);
296 /** @todo Remove or figure out to way to update the Phys STAT counter. */
297 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
298 return HMFlushTLBOnAllVCpus(pVM);
299}
300
301
302/**
303 * Checks if nested paging is enabled.
304 *
305 * @returns true if nested paging is active, false otherwise.
306 * @param pVM The cross context VM structure.
307 *
308 * @remarks Works before hmR3InitFinalizeR0.
309 */
310VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
311{
312 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
313}
314
315
316/**
317 * Checks if both nested paging and unhampered guest execution are enabled.
318 *
319 * The almost complete guest execution in hardware is only applicable to VT-x.
320 *
321 * @returns true if we have both enabled, otherwise false.
322 * @param pVM The cross context VM structure.
323 *
324 * @remarks Works before hmR3InitFinalizeR0.
325 */
326VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
327{
328 return HMIsEnabled(pVM)
329 && pVM->hm.s.fNestedPaging
330 && ( pVM->hm.s.vmx.fUnrestrictedGuest
331 || pVM->hm.s.svm.fSupported);
332}
333
334
335/**
336 * Checks if this VM is using HM and is long-mode capable.
337 *
338 * Use VMR3IsLongModeAllowed() instead of this, when possible.
339 *
340 * @returns true if long mode is allowed, false otherwise.
341 * @param pVM The cross context VM structure.
342 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
343 */
344VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
345{
346 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
347}
348
349
350/**
351 * Checks if MSR bitmaps are active. It is assumed that when it's available
352 * it will be used as well.
353 *
354 * @returns true if MSR bitmaps are available, false otherwise.
355 * @param pVM The cross context VM structure.
356 */
357VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM)
358{
359 if (HMIsEnabled(pVM))
360 {
361 if (pVM->hm.s.svm.fSupported)
362 return true;
363
364 if ( pVM->hm.s.vmx.fSupported
365 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS))
366 return true;
367 }
368 return false;
369}
370
371
372/**
373 * Checks if AMD-V is active.
374 *
375 * @returns true if AMD-V is active.
376 * @param pVM The cross context VM structure.
377 *
378 * @remarks Works before hmR3InitFinalizeR0.
379 */
380VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
381{
382 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
383}
384
385
386/**
387 * Checks if VT-x is active.
388 *
389 * @returns true if VT-x is active.
390 * @param pVM The cross context VM structure.
391 *
392 * @remarks Works before hmR3InitFinalizeR0.
393 */
394VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
395{
396 return HMIsVmxSupported(pVM) && HMIsEnabled(pVM);
397}
398
399
400/**
401 * Checks if VT-x is supported by the host CPU.
402 *
403 * @returns true if VT-x is supported, false otherwise.
404 * @param pVM The cross context VM structure.
405 *
406 * @remarks Works before hmR3InitFinalizeR0.
407 */
408VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM)
409{
410 return pVM->hm.s.vmx.fSupported;
411}
412
413#endif /* !IN_RC */
414
415/**
416 * Checks if an interrupt event is currently pending.
417 *
418 * @returns Interrupt event pending state.
419 * @param pVM The cross context VM structure.
420 */
421VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
422{
423 PVMCPU pVCpu = VMMGetCpu(pVM);
424 return !!pVCpu->hm.s.Event.fPending;
425}
426
427
428/**
429 * Return the PAE PDPE entries.
430 *
431 * @returns Pointer to the PAE PDPE array.
432 * @param pVCpu The cross context virtual CPU structure.
433 */
434VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
435{
436 return &pVCpu->hm.s.aPdpes[0];
437}
438
439
440/**
441 * Sets or clears the single instruction flag.
442 *
443 * When set, HM will try its best to return to ring-3 after executing a single
444 * instruction. This can be used for debugging. See also
445 * EMR3HmSingleInstruction.
446 *
447 * @returns The old flag state.
448 * @param pVM The cross context VM structure.
449 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
450 * @param fEnable The new flag state.
451 */
452VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
453{
454 VMCPU_ASSERT_EMT(pVCpu);
455 bool fOld = pVCpu->hm.s.fSingleInstruction;
456 pVCpu->hm.s.fSingleInstruction = fEnable;
457 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
458 return fOld;
459}
460
461
462/**
463 * Notifies HM that GIM provider wants to trap \#UD.
464 *
465 * @param pVCpu The cross context virtual CPU structure.
466 */
467VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
468{
469 pVCpu->hm.s.fGIMTrapXcptUD = true;
470 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
471 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
472 else
473 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
474}
475
476
477/**
478 * Notifies HM that GIM provider no longer wants to trap \#UD.
479 *
480 * @param pVCpu The cross context virtual CPU structure.
481 */
482VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
483{
484 pVCpu->hm.s.fGIMTrapXcptUD = false;
485 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
486 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
487 else
488 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
489}
490
491
492#ifndef IN_RC
493/**
494 * Notification callback which is called whenever there is a chance that a CR3
495 * value might have changed.
496 *
497 * This is called by PGM.
498 *
499 * @param pVM The cross context VM structure.
500 * @param pVCpu The cross context virtual CPU structure.
501 * @param enmShadowMode New shadow paging mode.
502 * @param enmGuestMode New guest paging mode.
503 */
504VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
505{
506# ifdef IN_RING3
507 /* Ignore page mode changes during state loading. */
508 if (VMR3GetState(pVM) == VMSTATE_LOADING)
509 return;
510# endif
511
512 pVCpu->hm.s.enmShadowMode = enmShadowMode;
513
514 /*
515 * If the guest left protected mode VMX execution, we'll have to be
516 * extra careful if/when the guest switches back to protected mode.
517 */
518 if (enmGuestMode == PGMMODE_REAL)
519 pVCpu->hm.s.vmx.fWasInRealMode = true;
520
521# ifdef IN_RING0
522 /*
523 * We need to tickle SVM and VT-x state updates.
524 *
525 * Note! We could probably reduce this depending on what exactly changed.
526 */
527 if (VM_IS_HM_ENABLED(pVM))
528 {
529 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); /* No recursion! */
530 uint64_t fChanged = HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 | HM_CHANGED_GUEST_EFER_MSR;
531 if (pVM->hm.s.svm.fSupported)
532 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
533 else
534 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
535 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
536 }
537# endif
538
539 Log4(("HMHCPagingModeChanged: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
540 PGMGetModeName(enmShadowMode)));
541}
542#endif /* !IN_RC */
543
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette