VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 106520

Last change on this file since 106520 was 106520, checked in by vboxsync, 7 months ago

VMM/NEM-win: Unused variable g_apszWHvMemAccesstypes. jiraref:VBP-1171

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 124.5 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 106520 2024-10-20 02:30:08Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.215389.xyz.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/vmm/nem.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/pdm.h>
61#include <VBox/vmm/dbgftrace.h>
62#include "NEMInternal.h"
63#include <VBox/vmm/vmcc.h>
64
65#include <iprt/ldr.h>
66#include <iprt/path.h>
67#include <iprt/string.h>
68#include <iprt/system.h>
69#include <iprt/utf16.h>
70
71#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
72HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
73# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
74#endif
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80
81
82/*********************************************************************************************************************************
83* Global Variables *
84*********************************************************************************************************************************/
85/** @name APIs imported from WinHvPlatform.dll
86 * @{ */
87static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
88static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
89static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
90static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
91static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
92static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
93static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
94static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
95static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
96static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
97static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
98static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
99static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
100static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
101static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
102static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
103/** @} */
104
105/** The Windows build number. */
106static uint32_t g_uBuildNo = 17134;
107
108
109
110/**
111 * Import instructions.
112 */
113static const struct
114{
115 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
116 bool fOptional; /**< Set if import is optional. */
117 PFNRT *ppfn; /**< The function pointer variable. */
118 const char *pszName; /**< The function name. */
119} g_aImports[] =
120{
121#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
122 NEM_WIN_IMPORT(0, false, WHvGetCapability),
123 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
124 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
125 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
126 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
127 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
128 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
129 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
130 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
131 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
132 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
133 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
134 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
135 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
136 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
137 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
138#undef NEM_WIN_IMPORT
139};
140
141
142/*
143 * Let the preprocessor alias the APIs to import variables for better autocompletion.
144 */
145#ifndef IN_SLICKEDIT
146# define WHvGetCapability g_pfnWHvGetCapability
147# define WHvCreatePartition g_pfnWHvCreatePartition
148# define WHvSetupPartition g_pfnWHvSetupPartition
149# define WHvDeletePartition g_pfnWHvDeletePartition
150# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
151# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
152# define WHvMapGpaRange g_pfnWHvMapGpaRange
153# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
154# define WHvTranslateGva g_pfnWHvTranslateGva
155# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
156# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
157# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
158# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
159# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
160# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
161# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
162# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
163
164# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
165# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
166# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
167
168#endif
169
170#if 0 /* unused */
171/** WHV_MEMORY_ACCESS_TYPE names */
172static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
173#endif
174/** NEM_WIN_PAGE_STATE_XXX names. */
175NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
176/** HV_INTERCEPT_ACCESS_TYPE names. */
177static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
178
179
180/*********************************************************************************************************************************
181* Internal Functions *
182*********************************************************************************************************************************/
183DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
184DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
185
186NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
187 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
188
189/**
190 * Worker for nemR3NativeInit that probes and load the native API.
191 *
192 * @returns VBox status code.
193 * @param fForced Whether the HMForced flag is set and we should
194 * fail if we cannot initialize.
195 * @param pErrInfo Where to always return error info.
196 */
197static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
198{
199 /*
200 * Check that the DLL files we need are present, but without loading them.
201 * We'd like to avoid loading them unnecessarily.
202 */
203 WCHAR wszPath[MAX_PATH + 64];
204 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
205 if (cwcPath >= MAX_PATH || cwcPath < 2)
206 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
207
208 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
209 wszPath[cwcPath++] = '\\';
210 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
211 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
212 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
213
214 /*
215 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
216 */
217 /** @todo */
218
219 /** @todo would be great if we could recognize a root partition from the
220 * CPUID info, but I currently don't dare do that. */
221
222 /*
223 * Now try load the DLLs and resolve the APIs.
224 */
225 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
226 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
227 int rc = VINF_SUCCESS;
228 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
229 {
230 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
231 if (RT_FAILURE(rc2))
232 {
233 if (!RTErrInfoIsSet(pErrInfo))
234 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
235 else
236 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
237 ahMods[i] = NIL_RTLDRMOD;
238 rc = VERR_NEM_INIT_FAILED;
239 }
240 }
241 if (RT_SUCCESS(rc))
242 {
243 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
244 {
245 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
246 if (RT_SUCCESS(rc2))
247 {
248 if (g_aImports[i].fOptional)
249 LogRel(("NEM: info: Found optional import %s!%s.\n",
250 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
251 }
252 else
253 {
254 *g_aImports[i].ppfn = NULL;
255
256 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
257 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
258 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
259 if (!g_aImports[i].fOptional)
260 {
261 if (RTErrInfoIsSet(pErrInfo))
262 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
263 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
264 else
265 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
266 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
267 Assert(RT_FAILURE(rc));
268 }
269 }
270 }
271 if (RT_SUCCESS(rc))
272 {
273 Assert(!RTErrInfoIsSet(pErrInfo));
274 }
275 }
276
277 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
278 RTLdrClose(ahMods[i]);
279 return rc;
280}
281
282
283/**
284 * Wrapper for different WHvGetCapability signatures.
285 */
286DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
287{
288 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
289}
290
291
292/**
293 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
294 *
295 * @returns VBox status code.
296 * @param pVM The cross context VM structure.
297 * @param pErrInfo Where to always return error info.
298 */
299static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
300{
301#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
302#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
303#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
304
305 /*
306 * Is the hypervisor present with the desired capability?
307 *
308 * In build 17083 this translates into:
309 * - CPUID[0x00000001].HVP is set
310 * - CPUID[0x40000000] == "Microsoft Hv"
311 * - CPUID[0x40000001].eax == "Hv#1"
312 * - CPUID[0x40000003].ebx[12] is set.
313 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
314 * a non-zero value.
315 */
316 /**
317 * @todo Someone at Microsoft please explain weird API design:
318 * 1. Pointless CapabilityCode duplication int the output;
319 * 2. No output size.
320 */
321 WHV_CAPABILITY Caps;
322 RT_ZERO(Caps);
323 SetLastError(0);
324 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
325 DWORD rcWin = GetLastError();
326 if (FAILED(hrc))
327 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
328 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
329 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
330 if (!Caps.HypervisorPresent)
331 {
332 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
333 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
334 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
335 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
336 }
337 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
338
339
340 /*
341 * Check what extended VM exits are supported.
342 */
343 RT_ZERO(Caps);
344 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
345 if (FAILED(hrc))
346 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
347 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
348 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
349 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
350 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
351 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
352 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
353 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
354 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
355 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
356 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
357
358 /*
359 * Check features in case they end up defining any.
360 */
361 RT_ZERO(Caps);
362 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
363 if (FAILED(hrc))
364 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
365 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
366 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
367 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
368 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
369 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
370
371 /*
372 * Check that the CPU vendor is supported.
373 */
374 RT_ZERO(Caps);
375 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
376 if (FAILED(hrc))
377 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
378 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
379 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
380 switch (Caps.ProcessorVendor)
381 {
382 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
383 case WHvProcessorVendorArm:
384 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
385 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
386 break;
387 default:
388 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
389 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
390 }
391
392 /*
393 * CPU features, guessing these are virtual CPU features?
394 */
395 RT_ZERO(Caps);
396 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
397 if (FAILED(hrc))
398 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
399 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
400 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
401 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
402#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
403 NEM_LOG_REL_CPU_FEATURE(Asid16);
404 NEM_LOG_REL_CPU_FEATURE(TGran16);
405 NEM_LOG_REL_CPU_FEATURE(TGran64);
406 NEM_LOG_REL_CPU_FEATURE(Haf);
407 NEM_LOG_REL_CPU_FEATURE(Hdbs);
408 NEM_LOG_REL_CPU_FEATURE(Pan);
409 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
410 NEM_LOG_REL_CPU_FEATURE(Uao);
411 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
412 NEM_LOG_REL_CPU_FEATURE(Fp);
413 NEM_LOG_REL_CPU_FEATURE(FpHp);
414 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
415 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
416 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
417 NEM_LOG_REL_CPU_FEATURE(GicV41);
418 NEM_LOG_REL_CPU_FEATURE(Ras);
419 NEM_LOG_REL_CPU_FEATURE(PmuV3);
420 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
421 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
422 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
423 NEM_LOG_REL_CPU_FEATURE(Aes);
424 NEM_LOG_REL_CPU_FEATURE(PolyMul);
425 NEM_LOG_REL_CPU_FEATURE(Sha1);
426 NEM_LOG_REL_CPU_FEATURE(Sha256);
427 NEM_LOG_REL_CPU_FEATURE(Sha512);
428 NEM_LOG_REL_CPU_FEATURE(Crc32);
429 NEM_LOG_REL_CPU_FEATURE(Atomic);
430 NEM_LOG_REL_CPU_FEATURE(Rdm);
431 NEM_LOG_REL_CPU_FEATURE(Sha3);
432 NEM_LOG_REL_CPU_FEATURE(Sm3);
433 NEM_LOG_REL_CPU_FEATURE(Sm4);
434 NEM_LOG_REL_CPU_FEATURE(Dp);
435 NEM_LOG_REL_CPU_FEATURE(Fhm);
436 NEM_LOG_REL_CPU_FEATURE(DcCvap);
437 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
438 NEM_LOG_REL_CPU_FEATURE(ApaBase);
439 NEM_LOG_REL_CPU_FEATURE(ApaEp);
440 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
441 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
442 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
443 NEM_LOG_REL_CPU_FEATURE(Jscvt);
444 NEM_LOG_REL_CPU_FEATURE(Fcma);
445 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
446 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
447 NEM_LOG_REL_CPU_FEATURE(Gpa);
448 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
449 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
450
451#undef NEM_LOG_REL_CPU_FEATURE
452 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
453 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
454 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
455 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
456
457 /*
458 * The cache line flush size.
459 */
460 RT_ZERO(Caps);
461 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
462 if (FAILED(hrc))
463 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
464 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
465 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
466 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
467 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
468 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
469 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
470
471 RT_ZERO(Caps);
472 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
473 if (FAILED(hrc))
474 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
475 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
476 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
477 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
478 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
479 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
480 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
481
482
483 /*
484 * See if they've added more properties that we're not aware of.
485 */
486 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
487 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
488 {
489 static const struct
490 {
491 uint32_t iMin, iMax; } s_aUnknowns[] =
492 {
493 { 0x0004, 0x000f },
494 { 0x1003, 0x100f },
495 { 0x2000, 0x200f },
496 { 0x3000, 0x300f },
497 { 0x4000, 0x400f },
498 };
499 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
500 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
501 {
502 RT_ZERO(Caps);
503 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
504 if (SUCCEEDED(hrc))
505 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
506 }
507 }
508
509 /*
510 * For proper operation, we require CPUID exits.
511 */
512 /** @todo Any? */
513
514#undef NEM_LOG_REL_CAP_EX
515#undef NEM_LOG_REL_CAP_SUB_EX
516#undef NEM_LOG_REL_CAP_SUB
517 return VINF_SUCCESS;
518}
519
520
521/**
522 * Creates and sets up a Hyper-V (exo) partition.
523 *
524 * @returns VBox status code.
525 * @param pVM The cross context VM structure.
526 * @param pErrInfo Where to always return error info.
527 */
528static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
529{
530 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
531 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
532
533 /*
534 * Create the partition.
535 */
536 WHV_PARTITION_HANDLE hPartition;
537 HRESULT hrc = WHvCreatePartition(&hPartition);
538 if (FAILED(hrc))
539 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
540 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
541
542 int rc;
543
544 /*
545 * Set partition properties, most importantly the CPU count.
546 */
547 /**
548 * @todo Someone at Microsoft please explain another weird API:
549 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
550 * argument rather than as part of the struct. That is so weird if you've
551 * used any other NT or windows API, including WHvGetCapability().
552 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
553 * technically only need 9 bytes for setting/getting
554 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
555 WHV_PARTITION_PROPERTY Property;
556 RT_ZERO(Property);
557 Property.ProcessorCount = pVM->cCpus;
558 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
559 if (SUCCEEDED(hrc))
560 {
561 RT_ZERO(Property);
562 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
563 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
564 if (SUCCEEDED(hrc))
565 {
566 /*
567 * We'll continue setup in nemR3NativeInitAfterCPUM.
568 */
569 pVM->nem.s.fCreatedEmts = false;
570 pVM->nem.s.hPartition = hPartition;
571 LogRel(("NEM: Created partition %p.\n", hPartition));
572 return VINF_SUCCESS;
573 }
574
575 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
576 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
577 Property.ExtendedVmExits.AsUINT64, hrc);
578 }
579 else
580 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
581 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
582 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
583 WHvDeletePartition(hPartition);
584
585 Assert(!pVM->nem.s.hPartitionDevice);
586 Assert(!pVM->nem.s.hPartition);
587 return rc;
588}
589
590
591static int nemR3NativeInitSetupVm(PVM pVM)
592{
593 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
594 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
595 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
596 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
597
598 /*
599 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
600 */
601 WHV_PARTITION_PROPERTY Property;
602 HRESULT hrc;
603
604#if 0
605 /* Not sure if we really need to set the vendor.
606 Update: Apparently we don't. WHvPartitionPropertyCodeProcessorVendor was removed in 17110. */
607 RT_ZERO(Property);
608 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
609 : WHvProcessorVendorIntel;
610 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));
611 if (FAILED(hrc))
612 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
613 "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
614 Property.ProcessorVendor, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
615#endif
616
617 /* Not sure if we really need to set the cache line flush size. */
618 RT_ZERO(Property);
619 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
620 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
621 if (FAILED(hrc))
622 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
623 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
624 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
625
626 /*
627 * Sync CPU features with CPUM.
628 */
629 /** @todo sync CPU features with CPUM. */
630
631 /* Set the partition property. */
632 RT_ZERO(Property);
633 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
634 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
635 if (FAILED(hrc))
636 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
637 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
638 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
639
640 /*
641 * Set up the partition.
642 *
643 * Seems like this is where the partition is actually instantiated and we get
644 * a handle to it.
645 */
646 hrc = WHvSetupPartition(hPartition);
647 if (FAILED(hrc))
648 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
649 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
650 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
651
652 /*
653 * Setup the EMTs.
654 */
655 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
656 {
657 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
658 if (FAILED(hrc))
659 {
660 NTSTATUS const rcNtLast = RTNtLastStatusValue();
661 DWORD const dwErrLast = RTNtLastErrorValue();
662 while (idCpu-- > 0)
663 {
664 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
665 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
666 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
667 RTNtLastErrorValue()));
668 }
669 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
670 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
671 }
672
673 if (idCpu == 0)
674 {
675 /* Need to query the ID registers and populate CPUM. */
676 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
677
678#if 1
679 WHV_REGISTER_NAME aenmNames[12];
680 WHV_REGISTER_VALUE aValues[12];
681 RT_ZERO(aValues);
682
683 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
684 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
685 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
686 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
687 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
688 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
689 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
690 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
691 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
692 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
693 aenmNames[10] = WHvArm64RegisterCtrEl0;
694 aenmNames[11] = WHvArm64RegisterDczidEl0;
695
696 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
697 AssertLogRelMsgReturn(SUCCEEDED(hrc),
698 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
699 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
700 , VERR_NEM_GET_REGISTERS_FAILED);
701
702 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
703 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
704 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
705 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
706 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
707 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
708 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
709 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
710 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
711 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
712 IdRegs.u64RegCtrEl0 = aValues[10].Reg64;
713 IdRegs.u64RegDczidEl0 = aValues[11].Reg64;
714#else
715 switch (pVM->nem.s.cPhysicalAddressWidth)
716 {
717 case 32: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_32BITS); break;
718 case 36: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_36BITS); break;
719 case 40: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_40BITS); break;
720 case 42: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_42BITS); break;
721 case 44: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_44BITS); break;
722 case 48: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_48BITS); break;
723 case 52: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_52BITS); break;
724 default: AssertReleaseFailed(); break;
725 }
726#endif
727
728 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
729 if (RT_FAILURE(rc))
730 return rc;
731 }
732 }
733 pVM->nem.s.fCreatedEmts = true;
734
735 LogRel(("NEM: Successfully set up partition\n"));
736 return VINF_SUCCESS;
737}
738
739
740/**
741 * Try initialize the native API.
742 *
743 * This may only do part of the job, more can be done in
744 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
745 *
746 * @returns VBox status code.
747 * @param pVM The cross context VM structure.
748 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
749 * the latter we'll fail if we cannot initialize.
750 * @param fForced Whether the HMForced flag is set and we should
751 * fail if we cannot initialize.
752 */
753int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
754{
755 g_uBuildNo = RTSystemGetNtBuildNo();
756
757 /*
758 * Error state.
759 * The error message will be non-empty on failure and 'rc' will be set too.
760 */
761 RTERRINFOSTATIC ErrInfo;
762 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
763 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
764 if (RT_SUCCESS(rc))
765 {
766 /*
767 * Check the capabilties of the hypervisor, starting with whether it's present.
768 */
769 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
770 if (RT_SUCCESS(rc))
771 {
772 /*
773 * Create and initialize a partition.
774 */
775 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
776 if (RT_SUCCESS(rc))
777 {
778 rc = nemR3NativeInitSetupVm(pVM);
779 if (RT_SUCCESS(rc))
780 {
781 /*
782 * Set ourselves as the execution engine and make config adjustments.
783 */
784 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
785 Log(("NEM: Marked active!\n"));
786 PGMR3EnableNemMode(pVM);
787
788 /*
789 * Register release statistics
790 */
791 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
792 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
793 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
794 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
795 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
796 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
797 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
798 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
799 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
800 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
801 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
802 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
803 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
804 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
805 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
806 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
807 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
808 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
809
810 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
811 {
812 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
813 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
814 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
815 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
816 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
817 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
818 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
819 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
820 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
821 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
822 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
823 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
824 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
825 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
826 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
827 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
828 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
829 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
830 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
831 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
832 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
833 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
834 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
835 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
836 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
837 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
838 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
839 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
840 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
841 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
842 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
843 }
844
845 if (!SUPR3IsDriverless())
846 {
847 PUVM pUVM = pVM->pUVM;
848 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
849 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
850 "/NEM/R0Stats/cPagesAvailable");
851 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
852 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
853 "/NEM/R0Stats/cPagesInUse");
854 }
855 }
856
857 }
858
859 }
860 }
861
862 /*
863 * We only fail if in forced mode, otherwise just log the complaint and return.
864 */
865 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
866 if ( (fForced || !fFallback)
867 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
868 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
869
870 if (RTErrInfoIsSet(pErrInfo))
871 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
872 return VINF_SUCCESS;
873}
874
875
876/**
877 * This is called after CPUMR3Init is done.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle..
881 */
882int nemR3NativeInitAfterCPUM(PVM pVM)
883{
884 /*
885 * Validate sanity.
886 */
887 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
888
889 /** @todo */
890
891 /*
892 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
893 */
894 /** @todo stats */
895
896 /*
897 * Adjust features.
898 *
899 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
900 * the first init call.
901 */
902
903 return VINF_SUCCESS;
904}
905
906
907int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
908{
909 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
910 //AssertLogRel(fRet);
911
912 NOREF(pVM); NOREF(enmWhat);
913 return VINF_SUCCESS;
914}
915
916
917int nemR3NativeTerm(PVM pVM)
918{
919 /*
920 * Delete the partition.
921 */
922 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
923 pVM->nem.s.hPartition = NULL;
924 pVM->nem.s.hPartitionDevice = NULL;
925 if (hPartition != NULL)
926 {
927 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
928 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
929 while (idCpu-- > 0)
930 {
931 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
932 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
933 hPartition, idCpu, hrc, RTNtLastStatusValue(),
934 RTNtLastErrorValue()));
935 }
936 WHvDeletePartition(hPartition);
937 }
938 pVM->nem.s.fCreatedEmts = false;
939 return VINF_SUCCESS;
940}
941
942
943/**
944 * VM reset notification.
945 *
946 * @param pVM The cross context VM structure.
947 */
948void nemR3NativeReset(PVM pVM)
949{
950 RT_NOREF(pVM);
951}
952
953
954/**
955 * Reset CPU due to INIT IPI or hot (un)plugging.
956 *
957 * @param pVCpu The cross context virtual CPU structure of the CPU being
958 * reset.
959 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
960 */
961void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
962{
963 RT_NOREF(pVCpu, fInitIpi);
964}
965
966
967NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
968{
969 WHV_REGISTER_NAME aenmNames[128];
970 WHV_REGISTER_VALUE aValues[128];
971
972 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
973 if (!fWhat)
974 return VINF_SUCCESS;
975 uintptr_t iReg = 0;
976
977#define ADD_REG64(a_enmName, a_uValue) do { \
978 aenmNames[iReg] = (a_enmName); \
979 aValues[iReg].Reg128.High64 = 0; \
980 aValues[iReg].Reg64 = (a_uValue).x; \
981 iReg++; \
982 } while (0)
983#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
984 aenmNames[iReg] = (a_enmName); \
985 aValues[iReg].Reg128.High64 = 0; \
986 aValues[iReg].Reg64 = (a_uValue); \
987 iReg++; \
988 } while (0)
989#define ADD_REG128(a_enmName, a_uValue) do { \
990 aenmNames[iReg] = (a_enmName); \
991 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
992 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
993 iReg++; \
994 } while (0)
995
996 /* GPRs */
997 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
998 {
999 if (fWhat & CPUMCTX_EXTRN_X0)
1000 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1001 if (fWhat & CPUMCTX_EXTRN_X1)
1002 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1003 if (fWhat & CPUMCTX_EXTRN_X2)
1004 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1005 if (fWhat & CPUMCTX_EXTRN_X3)
1006 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1007 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1008 {
1009 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1010 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1011 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1012 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1013 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1014 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1015 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1016 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1017 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1018 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1019 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1020 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1021 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1022 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1023 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1024 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1025 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1026 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1027 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1028 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1029 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1030 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1031 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1032 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1033 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1034 }
1035 if (fWhat & CPUMCTX_EXTRN_LR)
1036 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1037 if (fWhat & CPUMCTX_EXTRN_FP)
1038 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1039 }
1040
1041 /* RIP & Flags */
1042 if (fWhat & CPUMCTX_EXTRN_PC)
1043 ADD_REG64_RAW(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc.u64);
1044 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1045 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1046
1047 /* Vector state. */
1048 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1049 {
1050 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1051 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1052 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1053 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1054 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1055 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1056 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1057 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1058 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1059 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1060 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1061 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1062 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1063 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1064 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1065 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1066 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1067 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1068 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1069 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1070 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1071 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1072 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1073 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1074 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1075 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1076 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1077 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1078 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1079 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1080 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1081 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1082 }
1083
1084#undef ADD_REG64
1085#undef ADD_REG64_RAW
1086#undef ADD_REG128
1087
1088 /*
1089 * Set the registers.
1090 */
1091 Assert(iReg < RT_ELEMENTS(aValues));
1092 Assert(iReg < RT_ELEMENTS(aenmNames));
1093 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1094 if (SUCCEEDED(hrc))
1095 {
1096 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1097 return VINF_SUCCESS;
1098 }
1099 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1100 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1101 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1102 return VERR_INTERNAL_ERROR;
1103}
1104
1105
1106NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1107{
1108 WHV_REGISTER_NAME aenmNames[128];
1109
1110 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1111 if (!fWhat)
1112 return VINF_SUCCESS;
1113
1114 uintptr_t iReg = 0;
1115
1116 /* GPRs */
1117 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1118 {
1119 if (fWhat & CPUMCTX_EXTRN_X0)
1120 aenmNames[iReg++] = WHvArm64RegisterX0;
1121 if (fWhat & CPUMCTX_EXTRN_X1)
1122 aenmNames[iReg++] = WHvArm64RegisterX1;
1123 if (fWhat & CPUMCTX_EXTRN_X2)
1124 aenmNames[iReg++] = WHvArm64RegisterX2;
1125 if (fWhat & CPUMCTX_EXTRN_X3)
1126 aenmNames[iReg++] = WHvArm64RegisterX3;
1127 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1128 {
1129 aenmNames[iReg++] = WHvArm64RegisterX4;
1130 aenmNames[iReg++] = WHvArm64RegisterX5;
1131 aenmNames[iReg++] = WHvArm64RegisterX6;
1132 aenmNames[iReg++] = WHvArm64RegisterX7;
1133 aenmNames[iReg++] = WHvArm64RegisterX8;
1134 aenmNames[iReg++] = WHvArm64RegisterX9;
1135 aenmNames[iReg++] = WHvArm64RegisterX10;
1136 aenmNames[iReg++] = WHvArm64RegisterX11;
1137 aenmNames[iReg++] = WHvArm64RegisterX12;
1138 aenmNames[iReg++] = WHvArm64RegisterX13;
1139 aenmNames[iReg++] = WHvArm64RegisterX14;
1140 aenmNames[iReg++] = WHvArm64RegisterX15;
1141 aenmNames[iReg++] = WHvArm64RegisterX16;
1142 aenmNames[iReg++] = WHvArm64RegisterX17;
1143 aenmNames[iReg++] = WHvArm64RegisterX18;
1144 aenmNames[iReg++] = WHvArm64RegisterX19;
1145 aenmNames[iReg++] = WHvArm64RegisterX20;
1146 aenmNames[iReg++] = WHvArm64RegisterX21;
1147 aenmNames[iReg++] = WHvArm64RegisterX22;
1148 aenmNames[iReg++] = WHvArm64RegisterX23;
1149 aenmNames[iReg++] = WHvArm64RegisterX24;
1150 aenmNames[iReg++] = WHvArm64RegisterX25;
1151 aenmNames[iReg++] = WHvArm64RegisterX26;
1152 aenmNames[iReg++] = WHvArm64RegisterX27;
1153 aenmNames[iReg++] = WHvArm64RegisterX28;
1154 }
1155 if (fWhat & CPUMCTX_EXTRN_LR)
1156 aenmNames[iReg++] = WHvArm64RegisterLr;
1157 if (fWhat & CPUMCTX_EXTRN_FP)
1158 aenmNames[iReg++] = WHvArm64RegisterFp;
1159 }
1160
1161 /* PC & Flags */
1162 if (fWhat & CPUMCTX_EXTRN_PC)
1163 aenmNames[iReg++] = WHvArm64RegisterPc;
1164 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1165 aenmNames[iReg++] = WHvArm64RegisterPstate;
1166 if (fWhat & CPUMCTX_EXTRN_SPSR)
1167 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1168 if (fWhat & CPUMCTX_EXTRN_ELR)
1169 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1170 if (fWhat & CPUMCTX_EXTRN_SP)
1171 {
1172 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1173 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1174 }
1175 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1176 {
1177 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1178 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1179 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1180 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1181 }
1182
1183 /* Vector state. */
1184 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1185 {
1186 aenmNames[iReg++] = WHvArm64RegisterQ0;
1187 aenmNames[iReg++] = WHvArm64RegisterQ1;
1188 aenmNames[iReg++] = WHvArm64RegisterQ2;
1189 aenmNames[iReg++] = WHvArm64RegisterQ3;
1190 aenmNames[iReg++] = WHvArm64RegisterQ4;
1191 aenmNames[iReg++] = WHvArm64RegisterQ5;
1192 aenmNames[iReg++] = WHvArm64RegisterQ6;
1193 aenmNames[iReg++] = WHvArm64RegisterQ7;
1194 aenmNames[iReg++] = WHvArm64RegisterQ8;
1195 aenmNames[iReg++] = WHvArm64RegisterQ9;
1196 aenmNames[iReg++] = WHvArm64RegisterQ10;
1197 aenmNames[iReg++] = WHvArm64RegisterQ11;
1198 aenmNames[iReg++] = WHvArm64RegisterQ12;
1199 aenmNames[iReg++] = WHvArm64RegisterQ13;
1200 aenmNames[iReg++] = WHvArm64RegisterQ14;
1201 aenmNames[iReg++] = WHvArm64RegisterQ15;
1202
1203 aenmNames[iReg++] = WHvArm64RegisterQ16;
1204 aenmNames[iReg++] = WHvArm64RegisterQ17;
1205 aenmNames[iReg++] = WHvArm64RegisterQ18;
1206 aenmNames[iReg++] = WHvArm64RegisterQ19;
1207 aenmNames[iReg++] = WHvArm64RegisterQ20;
1208 aenmNames[iReg++] = WHvArm64RegisterQ21;
1209 aenmNames[iReg++] = WHvArm64RegisterQ22;
1210 aenmNames[iReg++] = WHvArm64RegisterQ23;
1211 aenmNames[iReg++] = WHvArm64RegisterQ24;
1212 aenmNames[iReg++] = WHvArm64RegisterQ25;
1213 aenmNames[iReg++] = WHvArm64RegisterQ26;
1214 aenmNames[iReg++] = WHvArm64RegisterQ27;
1215 aenmNames[iReg++] = WHvArm64RegisterQ28;
1216 aenmNames[iReg++] = WHvArm64RegisterQ29;
1217 aenmNames[iReg++] = WHvArm64RegisterQ30;
1218 aenmNames[iReg++] = WHvArm64RegisterQ31;
1219 }
1220 if (fWhat & CPUMCTX_EXTRN_FPCR)
1221 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1222 if (fWhat & CPUMCTX_EXTRN_FPSR)
1223 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1224
1225 /* System registers. */
1226 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1227 {
1228 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1229 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1230 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1231 /** @todo */
1232 }
1233
1234#if 0
1235 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1236 {
1237 aenmNames[iReg++] = WHvArm64RegisterDbgbcr0El1;
1238 /** @todo */
1239 }
1240#endif
1241
1242 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1243 {
1244 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1245 /** @todo */
1246 }
1247
1248 size_t const cRegs = iReg;
1249 Assert(cRegs < RT_ELEMENTS(aenmNames));
1250
1251 /*
1252 * Get the registers.
1253 */
1254 WHV_REGISTER_VALUE aValues[128];
1255 RT_ZERO(aValues);
1256 Assert(RT_ELEMENTS(aValues) >= cRegs);
1257 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1258 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1259 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1260 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1261 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1262 , VERR_NEM_GET_REGISTERS_FAILED);
1263
1264 iReg = 0;
1265#define GET_REG64(a_DstVar, a_enmName) do { \
1266 Assert(aenmNames[iReg] == (a_enmName)); \
1267 (a_DstVar).x = aValues[iReg].Reg64; \
1268 iReg++; \
1269 } while (0)
1270#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1271 Assert(aenmNames[iReg] == (a_enmName)); \
1272 (a_DstVar) = aValues[iReg].Reg64; \
1273 iReg++; \
1274 } while (0)
1275#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1276 Assert(aenmNames[iReg] == (a_enmName)); \
1277 (a_DstVar).u64 = aValues[iReg].Reg64; \
1278 iReg++; \
1279 } while (0)
1280#define GET_REG128(a_DstVar, a_enmName) do { \
1281 Assert(aenmNames[iReg] == a_enmName); \
1282 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1283 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1284 iReg++; \
1285 } while (0)
1286
1287 /* GPRs */
1288 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1289 {
1290 if (fWhat & CPUMCTX_EXTRN_X0)
1291 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1292 if (fWhat & CPUMCTX_EXTRN_X1)
1293 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1294 if (fWhat & CPUMCTX_EXTRN_X2)
1295 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1296 if (fWhat & CPUMCTX_EXTRN_X3)
1297 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1298 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1299 {
1300 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1301 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1302 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1303 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1304 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1305 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1306 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1307 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1308 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1309 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1310 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1311 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1312 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1313 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1314 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1315 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1316 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1317 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1318 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1319 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1320 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1321 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1322 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1323 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1324 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1325 }
1326 if (fWhat & CPUMCTX_EXTRN_LR)
1327 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1328 if (fWhat & CPUMCTX_EXTRN_FP)
1329 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1330 }
1331
1332 /* RIP & Flags */
1333 if (fWhat & CPUMCTX_EXTRN_PC)
1334 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1335 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1336 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1337 if (fWhat & CPUMCTX_EXTRN_SPSR)
1338 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1339 if (fWhat & CPUMCTX_EXTRN_ELR)
1340 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1341 if (fWhat & CPUMCTX_EXTRN_SP)
1342 {
1343 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1344 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1345 }
1346 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1347 {
1348 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1349 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1350 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1351 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1352 }
1353
1354 /* Vector state. */
1355 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1356 {
1357 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1358 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1359 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1360 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1361 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1362 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1363 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1364 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1365 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1366 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1367 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1368 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1369 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1370 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1371 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1372 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1373
1374 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1375 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1376 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1377 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1378 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1379 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1380 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1381 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1382 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1383 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1384 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1385 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1386 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1387 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1388 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1389 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1390 }
1391 if (fWhat & CPUMCTX_EXTRN_FPCR)
1392 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1393 if (fWhat & CPUMCTX_EXTRN_FPSR)
1394 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1395
1396 /* System registers. */
1397 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1398 {
1399 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1400 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1401 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1402 /** @todo */
1403 }
1404
1405#if 0
1406 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1407 {
1408 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[0].Ctrl, WHvArm64RegisterDbgbcr0El1);
1409 /** @todo */
1410 }
1411#endif
1412
1413 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1414 {
1415 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1416 /** @todo */
1417 }
1418
1419 /* Almost done, just update extrn flags. */
1420 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1421 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1422 pVCpu->cpum.GstCtx.fExtrn = 0;
1423
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Interface for importing state on demand (used by IEM).
1430 *
1431 * @returns VBox status code.
1432 * @param pVCpu The cross context CPU structure.
1433 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1434 */
1435VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1436{
1437 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1438 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1439}
1440
1441
1442/**
1443 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1444 *
1445 * @returns VBox status code.
1446 * @param pVCpu The cross context CPU structure.
1447 * @param pcTicks Where to return the CPU tick count.
1448 * @param puAux Where to return the TSC_AUX register value.
1449 */
1450VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1451{
1452 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1453
1454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1455 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1456 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1457
1458#if 0 /** @todo */
1459 /* Call the offical API. */
1460 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1461 WHV_REGISTER_VALUE aValues[2] = { { {0, 0} }, { {0, 0} } };
1462 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1463 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1464 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1465 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1466 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1467 , VERR_NEM_GET_REGISTERS_FAILED);
1468 *pcTicks = aValues[0].Reg64;
1469 if (puAux)
1470 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[1].Reg64 : CPUMGetGuestTscAux(pVCpu);
1471#endif
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * Resumes CPU clock (TSC) on all virtual CPUs.
1478 *
1479 * This is called by TM when the VM is started, restored, resumed or similar.
1480 *
1481 * @returns VBox status code.
1482 * @param pVM The cross context VM structure.
1483 * @param pVCpu The cross context CPU structure of the calling EMT.
1484 * @param uPausedTscValue The TSC value at the time of pausing.
1485 */
1486VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1487{
1488 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1489 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1490
1491 /*
1492 * Call the offical API to do the job.
1493 */
1494 if (pVM->cCpus > 1)
1495 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1496
1497#if 0 /** @todo */
1498 /* Start with the first CPU. */
1499 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1500 WHV_REGISTER_VALUE Value = { {0, 0} };
1501 Value.Reg64 = uPausedTscValue;
1502 uint64_t const uFirstTsc = ASMReadTSC();
1503 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1504 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1505 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1506 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1507 , VERR_NEM_SET_TSC);
1508
1509 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1510 that we don't introduce too much drift here. */
1511 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1512 {
1513 Assert(enmName == WHvX64RegisterTsc);
1514 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1515 Value.Reg64 = uPausedTscValue + offDelta;
1516 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1517 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1518 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1519 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1520 , VERR_NEM_SET_TSC);
1521 }
1522#endif
1523
1524 return VINF_SUCCESS;
1525}
1526
1527
1528#ifdef LOG_ENABLED
1529/**
1530 * Logs the current CPU state.
1531 */
1532static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1533{
1534 if (LogIs3Enabled())
1535 {
1536 char szRegs[4096];
1537 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1538 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1539 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1540 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1541 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1542 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1543 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1544 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1545 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1546 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1547 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1548 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1549 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1550 "vbar_el1=%016VR{vbar_el1}\n"
1551 );
1552 char szInstr[256]; RT_ZERO(szInstr);
1553#if 0
1554 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1555 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1556 szInstr, sizeof(szInstr), NULL);
1557#endif
1558 Log3(("%s%s\n", szRegs, szInstr));
1559 }
1560}
1561#endif /* LOG_ENABLED */
1562
1563
1564/**
1565 * Copies register state from the (common) exit context.
1566 *
1567 * ASSUMES no state copied yet.
1568 *
1569 * @param pVCpu The cross context per CPU structure.
1570 * @param pMsgHdr The common message header.
1571 */
1572DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1573{
1574 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1575 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1576
1577 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1578 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1579
1580 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1581}
1582
1583
1584/**
1585 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1586 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1587 */
1588typedef struct NEMHCWINHMACPCCSTATE
1589{
1590 /** Input: Write access. */
1591 bool fWriteAccess;
1592 /** Output: Set if we did something. */
1593 bool fDidSomething;
1594 /** Output: Set it we should resume. */
1595 bool fCanResume;
1596} NEMHCWINHMACPCCSTATE;
1597
1598/**
1599 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1600 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1601 * NEMHCWINHMACPCCSTATE structure. }
1602 */
1603NEM_TMPL_STATIC DECLCALLBACK(int)
1604nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1605{
1606 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1607 pState->fDidSomething = false;
1608 pState->fCanResume = false;
1609
1610 /* If A20 is disabled, we may need to make another query on the masked
1611 page to get the correct protection information. */
1612 uint8_t u2State = pInfo->u2NemState;
1613 RTGCPHYS GCPhysSrc = GCPhys;
1614
1615 /*
1616 * Consolidate current page state with actual page protection and access type.
1617 * We don't really consider downgrades here, as they shouldn't happen.
1618 */
1619 int rc;
1620 switch (u2State)
1621 {
1622 case NEM_WIN_PAGE_STATE_UNMAPPED:
1623 case NEM_WIN_PAGE_STATE_NOT_SET:
1624 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1625 {
1626 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1627 return VINF_SUCCESS;
1628 }
1629
1630 /* Don't bother remapping it if it's a write request to a non-writable page. */
1631 if ( pState->fWriteAccess
1632 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1633 {
1634 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1635 return VINF_SUCCESS;
1636 }
1637
1638 /* Map the page. */
1639 rc = nemHCNativeSetPhysPage(pVM,
1640 pVCpu,
1641 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1642 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1643 pInfo->fNemProt,
1644 &u2State,
1645 true /*fBackingState*/);
1646 pInfo->u2NemState = u2State;
1647 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1648 GCPhys, g_apszPageStates[u2State], rc));
1649 pState->fDidSomething = true;
1650 pState->fCanResume = true;
1651 return rc;
1652
1653 case NEM_WIN_PAGE_STATE_READABLE:
1654 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1655 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1656 {
1657 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1658 return VINF_SUCCESS;
1659 }
1660
1661 break;
1662
1663 case NEM_WIN_PAGE_STATE_WRITABLE:
1664 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1665 {
1666 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1667 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1668 else
1669 {
1670 pState->fCanResume = true;
1671 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1672 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1673 }
1674 return VINF_SUCCESS;
1675 }
1676 break;
1677
1678 default:
1679 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1680 }
1681
1682 /*
1683 * Unmap and restart the instruction.
1684 * If this fails, which it does every so often, just unmap everything for now.
1685 */
1686 /** @todo figure out whether we mess up the state or if it's WHv. */
1687 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1688 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1689 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1690 if (SUCCEEDED(hrc))
1691 {
1692 pState->fDidSomething = true;
1693 pState->fCanResume = true;
1694 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1695 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1696 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1697 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1698 return VINF_SUCCESS;
1699 }
1700 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1701 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1702 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1703 return VERR_NEM_UNMAP_PAGES_FAILED;
1704}
1705
1706
1707/**
1708 * Returns the byte size from the given access SAS value.
1709 *
1710 * @returns Number of bytes to transfer.
1711 * @param uSas The SAS value to convert.
1712 */
1713DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
1714{
1715 switch (uSas)
1716 {
1717 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1718 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1719 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1720 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1721 default:
1722 AssertReleaseFailed();
1723 }
1724
1725 return 0;
1726}
1727
1728
1729/**
1730 * Sets the given general purpose register to the given value.
1731 *
1732 * @param pVCpu The cross context virtual CPU structure of the
1733 * calling EMT.
1734 * @param uReg The register index.
1735 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1736 * @param fSignExtend Flag whether to sign extend the value.
1737 * @param u64Val The value.
1738 */
1739DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1740{
1741 AssertReturnVoid(uReg < 31);
1742
1743 if (f64BitReg)
1744 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1745 else
1746 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1747
1748 /* Mark the register as not extern anymore. */
1749 switch (uReg)
1750 {
1751 case 0:
1752 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1753 break;
1754 case 1:
1755 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1756 break;
1757 case 2:
1758 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1759 break;
1760 case 3:
1761 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1762 break;
1763 default:
1764 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1765 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1766 }
1767}
1768
1769
1770/**
1771 * Gets the given general purpose register and returns the value.
1772 *
1773 * @returns Value from the given register.
1774 * @param pVCpu The cross context virtual CPU structure of the
1775 * calling EMT.
1776 * @param uReg The register index.
1777 */
1778DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1779{
1780 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1781
1782 if (uReg == ARMV8_AARCH64_REG_ZR)
1783 return 0;
1784
1785 /** @todo Import the register if extern. */
1786 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1787
1788 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1789}
1790
1791
1792/**
1793 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1794 *
1795 * @returns Strict VBox status code.
1796 * @param pVM The cross context VM structure.
1797 * @param pVCpu The cross context per CPU structure.
1798 * @param pExit The VM exit information to handle.
1799 * @sa nemHCWinHandleMessageMemory
1800 */
1801NEM_TMPL_STATIC VBOXSTRICTRC
1802nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1803{
1804 uint64_t const uHostTsc = ASMReadTSC();
1805 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
1806
1807 /*
1808 * Ask PGM for information about the given GCPhys. We need to check if we're
1809 * out of sync first.
1810 */
1811 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
1812 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
1813 PGMPHYSNEMPAGEINFO Info;
1814 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1815 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1816 if (RT_SUCCESS(rc))
1817 {
1818 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1819 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1820 {
1821 if (State.fCanResume)
1822 {
1823 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1824 pVCpu->idCpu, pHdr->Pc,
1825 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1826 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1827 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1828 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1829 pHdr->Pc, uHostTsc);
1830 return VINF_SUCCESS;
1831 }
1832 }
1833 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1834 pVCpu->idCpu, pHdr->Pc,
1835 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1836 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1837 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1838 }
1839 else
1840 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
1841 pVCpu->idCpu, pHdr->Pc,
1842 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
1843 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1844
1845 /*
1846 * Emulate the memory access, either access handler or special memory.
1847 */
1848 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
1849 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1850 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1851 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1852 pHdr->Pc, uHostTsc);
1853 //nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
1854 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1855 AssertRCReturn(rc, rc);
1856
1857 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
1858 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
1859 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
1860 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
1861 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1862 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1863 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1864 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1865 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1866 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1867 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1868 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
1869 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
1870 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
1871
1872 RT_NOREF(fL2Fault);
1873
1874 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1875
1876 EMHistoryAddExit(pVCpu,
1877 fWrite
1878 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1879 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1880 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1881
1882 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1883 uint64_t u64Val = 0;
1884 if (fWrite)
1885 {
1886 u64Val = nemR3WinGetGReg(pVCpu, uReg);
1887 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1888 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1889 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
1890 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1891 }
1892 else
1893 {
1894 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1895 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1896 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
1897 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1898 if (rcStrict == VINF_SUCCESS)
1899 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1900 }
1901
1902 if (rcStrict == VINF_SUCCESS)
1903 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
1904
1905 return rcStrict;
1906}
1907
1908
1909/**
1910 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
1911 *
1912 * @returns Strict VBox status code.
1913 * @param pVM The cross context VM structure.
1914 * @param pVCpu The cross context per CPU structure.
1915 * @param pExit The VM exit information to handle.
1916 * @sa nemHCWinHandleMessageUnrecoverableException
1917 */
1918NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1919{
1920#if 0
1921 /*
1922 * Just copy the state we've got and handle it in the loop for now.
1923 */
1924 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
1925 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
1926 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
1927 RT_NOREF_PV(pVM);
1928 return VINF_EM_TRIPLE_FAULT;
1929#else
1930 /*
1931 * Let IEM decide whether this is really it.
1932 */
1933 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
1934 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
1935 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
1936 AssertReleaseFailed();
1937 RT_NOREF_PV(pVM);
1938 return VINF_SUCCESS;
1939#endif
1940}
1941
1942
1943/**
1944 * Handles VM exits.
1945 *
1946 * @returns Strict VBox status code.
1947 * @param pVM The cross context VM structure.
1948 * @param pVCpu The cross context per CPU structure.
1949 * @param pExit The VM exit information to handle.
1950 * @sa nemHCWinHandleMessage
1951 */
1952NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1953{
1954 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1955 AssertRCReturn(rc, rc);
1956
1957#ifdef LOG_ENABLED
1958 if (LogIs3Enabled())
1959 nemR3WinLogState(pVM, pVCpu);
1960#endif
1961
1962 switch (pExit->ExitReason)
1963 {
1964 case WHvRunVpExitReasonUnmappedGpa:
1965 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
1966 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
1967
1968 case WHvRunVpExitReasonCanceled:
1969 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
1970 return VINF_SUCCESS;
1971
1972 case WHvRunVpExitReasonUnrecoverableException:
1973 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
1974 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
1975
1976 case WHvRunVpExitReasonUnsupportedFeature:
1977 case WHvRunVpExitReasonInvalidVpRegisterValue:
1978 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
1979 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
1980 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
1981
1982 /* Undesired exits: */
1983 case WHvRunVpExitReasonNone:
1984 default:
1985 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
1986 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
1987 }
1988}
1989
1990
1991VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1992{
1993 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
1994#ifdef LOG_ENABLED
1995 if (LogIs3Enabled())
1996 nemR3WinLogState(pVM, pVCpu);
1997#endif
1998
1999 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2000 {
2001 /*
2002 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2003 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2004 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2005 */
2006 PCCPUMIDREGS pIdRegsGst = NULL;
2007 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2008 AssertRCReturn(rc, rc);
2009
2010 WHV_REGISTER_NAME aenmNames[12];
2011 WHV_REGISTER_VALUE aValues[12];
2012
2013 uint32_t iReg = 0;
2014#define ADD_REG64(a_enmName, a_uValue) do { \
2015 aenmNames[iReg] = (a_enmName); \
2016 aValues[iReg].Reg128.High64 = 0; \
2017 aValues[iReg].Reg64 = (a_uValue); \
2018 iReg++; \
2019 } while (0)
2020
2021
2022 ADD_REG64(WHvArm64RegisterIdAa64Mmfr0El1, pIdRegsGst->u64RegIdAa64Mmfr0El1);
2023#undef ADD_REG64
2024
2025 //HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
2026 //AssertReturn(SUCCEEDED(hrc), VERR_NEM_IPE_9);
2027
2028 pVCpu->nem.s.fIdRegsSynced = true;
2029 }
2030
2031 /*
2032 * Try switch to NEM runloop state.
2033 */
2034 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2035 { /* likely */ }
2036 else
2037 {
2038 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2039 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2040 return VINF_SUCCESS;
2041 }
2042
2043 /*
2044 * The run loop.
2045 *
2046 * Current approach to state updating to use the sledgehammer and sync
2047 * everything every time. This will be optimized later.
2048 */
2049 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2050// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
2051// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
2052// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
2053 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2054 for (unsigned iLoop = 0;; iLoop++)
2055 {
2056 /*
2057 * Pending interrupts or such? Need to check and deal with this prior
2058 * to the state syncing.
2059 */
2060#if 0
2061 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_UPDATE_IRQ))
2062 {
2063 /* Try inject interrupt. */
2064 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
2065 if (rcStrict == VINF_SUCCESS)
2066 { /* likely */ }
2067 else
2068 {
2069 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2070 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2071 break;
2072 }
2073 }
2074#endif
2075
2076 /* Ensure that Hyper-V has the whole state. */
2077 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2078 AssertRCReturn(rc2, rc2);
2079
2080 /*
2081 * Poll timers and run for a bit.
2082 *
2083 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2084 * so we take the time of the next timer event and uses that as a deadline.
2085 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2086 */
2087 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2088 * the whole polling job when timers have changed... */
2089 uint64_t offDeltaIgnored;
2090 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2091 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2092 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2093 {
2094 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2095 {
2096#if 0 //def LOG_ENABLED
2097 if (LogIsFlowEnabled())
2098 {
2099 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
2100 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
2101 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { {{0, 0} } };
2102 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2103 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
2104 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
2105 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
2106 }
2107#endif
2108 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2109 TMNotifyStartOfExecution(pVM, pVCpu);
2110
2111 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2112
2113 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2114 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2115#ifdef LOG_ENABLED
2116 LogFlow(("NEM/%u: Exit @ @todo Reason=%#x\n", pVCpu->idCpu, ExitReason.ExitReason));
2117#endif
2118 if (SUCCEEDED(hrc))
2119 {
2120 /*
2121 * Deal with the message.
2122 */
2123 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2124 if (rcStrict == VINF_SUCCESS)
2125 { /* hopefully likely */ }
2126 else
2127 {
2128 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2129 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2130 break;
2131 }
2132 }
2133 else
2134 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2135 pVCpu->idCpu, hrc, GetLastError()),
2136 VERR_NEM_IPE_0);
2137
2138 /*
2139 * If no relevant FFs are pending, loop.
2140 */
2141 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2142 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2143 continue;
2144
2145 /** @todo Try handle pending flags, not just return to EM loops. Take care
2146 * not to set important RCs here unless we've handled a message. */
2147 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2148 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2149 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2150 }
2151 else
2152 {
2153 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2154 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2155 }
2156 }
2157 else
2158 {
2159 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2160 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2161 }
2162 break;
2163 } /* the run loop */
2164
2165
2166 /*
2167 * If the CPU is running, make sure to stop it before we try sync back the
2168 * state and return to EM. We don't sync back the whole state if we can help it.
2169 */
2170 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2171 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2172
2173 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2174 {
2175 /* Try anticipate what we might need. */
2176 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2177 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2178 || RT_FAILURE(rcStrict))
2179 fImport = CPUMCTX_EXTRN_ALL;
2180 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2181 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2182
2183 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2184 {
2185 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2186 if (RT_SUCCESS(rc2))
2187 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2188 else if (RT_SUCCESS(rcStrict))
2189 rcStrict = rc2;
2190 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2191 pVCpu->cpum.GstCtx.fExtrn = 0;
2192 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2193 }
2194 else
2195 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2196 }
2197 else
2198 {
2199 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2200 pVCpu->cpum.GstCtx.fExtrn = 0;
2201 }
2202
2203#if 0
2204 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel,
2205 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
2206#endif
2207 return rcStrict;
2208}
2209
2210
2211VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2212{
2213 Assert(VM_IS_NEM_ENABLED(pVM));
2214 RT_NOREF(pVM, pVCpu);
2215 return true;
2216}
2217
2218
2219bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2220{
2221 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2222 return false;
2223}
2224
2225
2226void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2227{
2228 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2229 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2230 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2231 RT_NOREF_PV(hrc);
2232 RT_NOREF_PV(fFlags);
2233}
2234
2235
2236DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2237{
2238 RT_NOREF(pVM, fUseDebugLoop);
2239 return false;
2240}
2241
2242
2243DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2244{
2245 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2246 return false;
2247}
2248
2249
2250DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2251{
2252 PGMPAGEMAPLOCK Lock;
2253 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2254 if (RT_SUCCESS(rc))
2255 PGMPhysReleasePageMappingLock(pVM, &Lock);
2256 return rc;
2257}
2258
2259
2260DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2261{
2262 PGMPAGEMAPLOCK Lock;
2263 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2264 if (RT_SUCCESS(rc))
2265 PGMPhysReleasePageMappingLock(pVM, &Lock);
2266 return rc;
2267}
2268
2269
2270VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2271 uint8_t *pu2State, uint32_t *puNemRange)
2272{
2273 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2274 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2275
2276 *pu2State = UINT8_MAX;
2277 RT_NOREF(puNemRange);
2278
2279 if (pvR3)
2280 {
2281 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2282 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2283 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2284 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2285 if (SUCCEEDED(hrc))
2286 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2287 else
2288 {
2289 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2290 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2291 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2292 return VERR_NEM_MAP_PAGES_FAILED;
2293 }
2294 }
2295 return VINF_SUCCESS;
2296}
2297
2298
2299VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2300{
2301 RT_NOREF(pVM);
2302 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2303}
2304
2305
2306VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2307 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2308{
2309 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2310 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2311 RT_NOREF(puNemRange);
2312
2313 /*
2314 * Unmap the RAM we're replacing.
2315 */
2316 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2317 {
2318 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2319 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2320 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2321 if (SUCCEEDED(hrc))
2322 { /* likely */ }
2323 else if (pvMmio2)
2324 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2325 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2326 else
2327 {
2328 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2329 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2330 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2331 return VERR_NEM_UNMAP_PAGES_FAILED;
2332 }
2333 }
2334
2335 /*
2336 * Map MMIO2 if any.
2337 */
2338 if (pvMmio2)
2339 {
2340 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2341 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2342 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2343 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2344 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2345 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2346 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2347 if (SUCCEEDED(hrc))
2348 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2349 else
2350 {
2351 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2352 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2353 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2354 return VERR_NEM_MAP_PAGES_FAILED;
2355 }
2356 }
2357 else
2358 {
2359 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2360 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2361 }
2362 RT_NOREF(pvRam);
2363 return VINF_SUCCESS;
2364}
2365
2366
2367VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2368 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2369{
2370 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2371 return VINF_SUCCESS;
2372}
2373
2374
2375VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2376 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2377{
2378 int rc = VINF_SUCCESS;
2379 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2380 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2381
2382 /*
2383 * Unmap the MMIO2 pages.
2384 */
2385 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2386 * we may have more stuff to unmap even in case of pure MMIO... */
2387 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2388 {
2389 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2390 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2391 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2392 if (FAILED(hrc))
2393 {
2394 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2395 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2396 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2397 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2398 }
2399 }
2400
2401 /*
2402 * Restore the RAM we replaced.
2403 */
2404 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2405 {
2406 AssertPtr(pvRam);
2407 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2408 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2409 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2410 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2411 if (SUCCEEDED(hrc))
2412 { /* likely */ }
2413 else
2414 {
2415 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2416 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2417 rc = VERR_NEM_MAP_PAGES_FAILED;
2418 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2419 }
2420 if (pu2State)
2421 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2422 }
2423 /* Mark the pages as unmapped if relevant. */
2424 else if (pu2State)
2425 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2426
2427 RT_NOREF(pvMmio2, puNemRange);
2428 return rc;
2429}
2430
2431
2432VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2433 void *pvBitmap, size_t cbBitmap)
2434{
2435 Assert(VM_IS_NEM_ENABLED(pVM));
2436 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2437 Assert(cbBitmap == (uint32_t)cbBitmap);
2438 RT_NOREF(uNemRange);
2439
2440 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2441 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2442 if (SUCCEEDED(hrc))
2443 return VINF_SUCCESS;
2444
2445 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2446 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2447 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2448}
2449
2450
2451VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2452 uint8_t *pu2State, uint32_t *puNemRange)
2453{
2454 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2455 *pu2State = UINT8_MAX;
2456 *puNemRange = 0;
2457
2458#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2459 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2460 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2461 {
2462 const void *pvPage;
2463 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2464 if (RT_SUCCESS(rc))
2465 {
2466 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2467 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2468 if (SUCCEEDED(hrc))
2469 { /* likely */ }
2470 else
2471 {
2472 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2473 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2474 return VERR_NEM_INIT_FAILED;
2475 }
2476 }
2477 else
2478 {
2479 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2480 return rc;
2481 }
2482 }
2483 RT_NOREF_PV(fFlags);
2484#else
2485 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2486#endif
2487 return VINF_SUCCESS;
2488}
2489
2490
2491VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2492 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2493{
2494 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2495 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2496 *pu2State = UINT8_MAX;
2497
2498 /*
2499 * (Re-)map readonly.
2500 */
2501 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2502 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2503 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2504 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2505 if (SUCCEEDED(hrc))
2506 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2507 else
2508 {
2509 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2510 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2511 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2512 return VERR_NEM_MAP_PAGES_FAILED;
2513 }
2514 RT_NOREF(fFlags, puNemRange);
2515 return VINF_SUCCESS;
2516}
2517
2518VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2519{
2520 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2521 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2522 RT_NOREF(pVCpu, fEnabled);
2523}
2524
2525
2526void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2527{
2528 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2529 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2530}
2531
2532
2533VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2534 RTR3PTR pvMemR3, uint8_t *pu2State)
2535{
2536 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2537 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2538
2539 *pu2State = UINT8_MAX;
2540 if (pvMemR3)
2541 {
2542 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2543 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2544 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2545 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2546 if (SUCCEEDED(hrc))
2547 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2548 else
2549 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2550 pvMemR3, GCPhys, cb, hrc));
2551 }
2552 RT_NOREF(enmKind);
2553}
2554
2555
2556void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2557 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2558{
2559 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2560 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2561 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2562}
2563
2564
2565/**
2566 * Worker that maps pages into Hyper-V.
2567 *
2568 * This is used by the PGM physical page notifications as well as the memory
2569 * access VMEXIT handlers.
2570 *
2571 * @returns VBox status code.
2572 * @param pVM The cross context VM structure.
2573 * @param pVCpu The cross context virtual CPU structure of the
2574 * calling EMT.
2575 * @param GCPhysSrc The source page address.
2576 * @param GCPhysDst The hyper-V destination page. This may differ from
2577 * GCPhysSrc when A20 is disabled.
2578 * @param fPageProt NEM_PAGE_PROT_XXX.
2579 * @param pu2State Our page state (input/output).
2580 * @param fBackingChanged Set if the page backing is being changed.
2581 * @thread EMT(pVCpu)
2582 */
2583NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
2584 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
2585{
2586 /*
2587 * Looks like we need to unmap a page before we can change the backing
2588 * or even modify the protection. This is going to be *REALLY* efficient.
2589 * PGM lends us two bits to keep track of the state here.
2590 */
2591 RT_NOREF(pVCpu);
2592 uint8_t const u2OldState = *pu2State;
2593 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
2594 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
2595 if ( fBackingChanged
2596 || u2NewState != u2OldState)
2597 {
2598 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
2599 {
2600 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2601 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
2602 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2603 if (SUCCEEDED(hrc))
2604 {
2605 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2606 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2607 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2608 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
2609 {
2610 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
2611 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2612 return VINF_SUCCESS;
2613 }
2614 }
2615 else
2616 {
2617 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2618 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2619 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2620 return VERR_NEM_INIT_FAILED;
2621 }
2622 }
2623 }
2624
2625 /*
2626 * Writeable mapping?
2627 */
2628 if (fPageProt & NEM_PAGE_PROT_WRITE)
2629 {
2630 void *pvPage;
2631 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
2632 if (RT_SUCCESS(rc))
2633 {
2634 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
2635 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2636 if (SUCCEEDED(hrc))
2637 {
2638 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2639 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2640 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2641 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2642 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2643 return VINF_SUCCESS;
2644 }
2645 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2646 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2647 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2648 return VERR_NEM_INIT_FAILED;
2649 }
2650 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2651 return rc;
2652 }
2653
2654 if (fPageProt & NEM_PAGE_PROT_READ)
2655 {
2656 const void *pvPage;
2657 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
2658 if (RT_SUCCESS(rc))
2659 {
2660 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
2661 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
2662 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2663 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
2664 if (SUCCEEDED(hrc))
2665 {
2666 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2667 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2668 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2669 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2670 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2671 return VINF_SUCCESS;
2672 }
2673 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2674 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2675 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2676 return VERR_NEM_INIT_FAILED;
2677 }
2678 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2679 return rc;
2680 }
2681
2682 /* We already unmapped it above. */
2683 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2684 return VINF_SUCCESS;
2685}
2686
2687
2688NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
2689{
2690 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
2691 {
2692 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
2693 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2694 return VINF_SUCCESS;
2695 }
2696
2697 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2698 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2699 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2700 if (SUCCEEDED(hrc))
2701 {
2702 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2703 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2704 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2705 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
2706 return VINF_SUCCESS;
2707 }
2708 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2709 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
2710 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2711 return VERR_NEM_IPE_6;
2712}
2713
2714
2715int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2716 PGMPAGETYPE enmType, uint8_t *pu2State)
2717{
2718 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2719 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2720 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
2721
2722 int rc;
2723 RT_NOREF_PV(fPageProt);
2724 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2725 return rc;
2726}
2727
2728
2729VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2730 PGMPAGETYPE enmType, uint8_t *pu2State)
2731{
2732 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2733 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2734 Assert(VM_IS_NEM_ENABLED(pVM));
2735 RT_NOREF(HCPhys, enmType, pvR3);
2736
2737 RT_NOREF_PV(fPageProt);
2738 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2739}
2740
2741
2742VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2743 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2744{
2745 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
2746 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
2747 Assert(VM_IS_NEM_ENABLED(pVM));
2748 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
2749
2750 RT_NOREF_PV(fPageProt);
2751 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2752}
2753
2754
2755/**
2756 * Returns features supported by the NEM backend.
2757 *
2758 * @returns Flags of features supported by the native NEM backend.
2759 * @param pVM The cross context VM structure.
2760 */
2761VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2762{
2763 RT_NOREF(pVM);
2764 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
2765 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
2766}
2767
2768
2769/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
2770 *
2771 * Open questions:
2772 * - Why can't one read and write WHvArm64RegisterId*
2773 * - WHvArm64RegisterDbgbcr0El1 is not readable?
2774 * - Getting notified about system register reads/writes (GIC)?
2775 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
2776 * - Handling of (vTimer) interrupts, how is WHvRequestInterrupt() supposed to be used?
2777 */
2778
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette