VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 109282

Last change on this file since 109282 was 109282, checked in by vboxsync, 4 days ago

VMM/VMMR3/NEMR3Native-win-armv8.cpp: Fix regression introduced with r168190 causing a corrupted guest CPU state to be imported/exported, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 128.2 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 109282 2025-05-15 12:30:08Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.215389.xyz.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79/** Our saved state version for Hyper-V specific things. */
80#define NEM_HV_SAVED_STATE_VERSION 1
81
82
83/*
84 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
85 * (there is no official SDK for this yet).
86 */
87/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
88#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
89/** No GIC present. */
90#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
91/** Hyper-V emulates a GICv3. */
92#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
93
94/**
95 * Configures the interrupt controller emulated by Hyper-V.
96 */
97typedef struct MY_WHV_ARM64_IC_PARAMETERS
98{
99 uint32_t u32EmulationMode;
100 uint32_t u32Rsvd;
101 union
102 {
103 struct
104 {
105 RTGCPHYS GCPhysGicdBase;
106 RTGCPHYS GCPhysGitsTranslaterBase;
107 uint32_t u32Rsvd;
108 uint32_t cLpiIntIdBits;
109 uint32_t u32PpiCntvOverflw;
110 uint32_t u32PpiPmu;
111 uint32_t au32Rsvd[6];
112 } GicV3;
113 } u;
114} MY_WHV_ARM64_IC_PARAMETERS;
115AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
116
117
118/**
119 * The hypercall exit context.
120 */
121typedef struct MY_WHV_HYPERCALL_CONTEXT
122{
123 WHV_INTERCEPT_MESSAGE_HEADER Header;
124 uint16_t Immediate;
125 uint16_t u16Rsvd;
126 uint32_t u32Rsvd;
127 uint64_t X[18];
128} MY_WHV_HYPERCALL_CONTEXT;
129typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
130AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
131
132
133/**
134 * The ARM64 reset context.
135 */
136typedef struct MY_WHV_ARM64_RESET_CONTEXT
137{
138 WHV_INTERCEPT_MESSAGE_HEADER Header;
139 uint32_t ResetType;
140 uint32_t u32Rsvd;
141} MY_WHV_ARM64_RESET_CONTEXT;
142typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
143AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
144
145
146#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
147#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
148
149
150/**
151 * The exit reason context for arm64, the size is different
152 * from the default SDK we build against.
153 */
154typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
155{
156 WHV_RUN_VP_EXIT_REASON ExitReason;
157 uint32_t u32Rsvd;
158 uint64_t u64Rsvd;
159 union
160 {
161 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
162 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
163 MY_WHV_HYPERCALL_CONTEXT Hypercall;
164 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
165 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
166 uint64_t au64Rsvd2[32];
167 };
168} MY_WHV_RUN_VP_EXIT_CONTEXT;
169typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
170AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
171
172#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
173#define My_WHvArm64RegisterActlrEl1 ((WHV_REGISTER_NAME)UINT32_C(0x00040003))
174
175
176/*********************************************************************************************************************************
177* Defined Constants And Macros *
178*********************************************************************************************************************************/
179
180
181/*********************************************************************************************************************************
182* Global Variables *
183*********************************************************************************************************************************/
184/** @name APIs imported from WinHvPlatform.dll
185 * @{ */
186static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
187static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
188static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
189static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
190static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
191static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
192static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
193static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
194static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
195static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
196static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
197static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
198static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
199static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
200static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
201static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
202static decltype(WHvSuspendPartitionTime) * g_pfnWHvSuspendPartitionTime;
203static decltype(WHvResumePartitionTime) * g_pfnWHvResumePartitionTime;
204decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
205decltype(WHvSetVirtualProcessorState) * g_pfnWHvSetVirtualProcessorState;
206decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
207/** @} */
208
209/** The Windows build number. */
210static uint32_t g_uBuildNo = 17134;
211
212
213
214/**
215 * Import instructions.
216 */
217static const struct
218{
219 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
220 bool fOptional; /**< Set if import is optional. */
221 PFNRT *ppfn; /**< The function pointer variable. */
222 const char *pszName; /**< The function name. */
223} g_aImports[] =
224{
225#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
226 NEM_WIN_IMPORT(0, false, WHvGetCapability),
227 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
228 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
229 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
230 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
231 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
232 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
233 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
234 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
235 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
236 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
237 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
238 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
239 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
240 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
241 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
242 NEM_WIN_IMPORT(0, false, WHvSuspendPartitionTime),
243 NEM_WIN_IMPORT(0, false, WHvResumePartitionTime),
244 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
245 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorState),
246 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
247#undef NEM_WIN_IMPORT
248};
249
250
251/*
252 * Let the preprocessor alias the APIs to import variables for better autocompletion.
253 */
254#ifndef IN_SLICKEDIT
255# define WHvGetCapability g_pfnWHvGetCapability
256# define WHvCreatePartition g_pfnWHvCreatePartition
257# define WHvSetupPartition g_pfnWHvSetupPartition
258# define WHvDeletePartition g_pfnWHvDeletePartition
259# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
260# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
261# define WHvMapGpaRange g_pfnWHvMapGpaRange
262# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
263# define WHvTranslateGva g_pfnWHvTranslateGva
264# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
265# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
266# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
267# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
268# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
269# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
270# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
271# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
272# define WHvSuspendPartitionTime g_pfnWHvSuspendPartitionTime
273# define WHvResumePartitionTime g_pfnWHvResumePartitionTime
274# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
275# define WHvSetVirtualProcessorState g_pfnWHvSetVirtualProcessorState
276# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
277#endif
278
279
280#define WHV_REGNM(a_Suffix) WHvArm64Register ## a_Suffix
281/** The general registers. */
282static const struct
283{
284 WHV_REGISTER_NAME enmWHvReg;
285 uint32_t fCpumExtrn;
286 uintptr_t offCpumCtx;
287} s_aCpumRegs[] =
288{
289#define CPUM_GREG_EMIT_X0_X3(a_Idx) { WHV_REGNM(X ## a_Idx), CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
290#define CPUM_GREG_EMIT_X4_X28(a_Idx) { WHV_REGNM(X ## a_Idx), CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
291 CPUM_GREG_EMIT_X0_X3(0),
292 CPUM_GREG_EMIT_X0_X3(1),
293 CPUM_GREG_EMIT_X0_X3(2),
294 CPUM_GREG_EMIT_X0_X3(3),
295 CPUM_GREG_EMIT_X4_X28(4),
296 CPUM_GREG_EMIT_X4_X28(5),
297 CPUM_GREG_EMIT_X4_X28(6),
298 CPUM_GREG_EMIT_X4_X28(7),
299 CPUM_GREG_EMIT_X4_X28(8),
300 CPUM_GREG_EMIT_X4_X28(9),
301 CPUM_GREG_EMIT_X4_X28(10),
302 CPUM_GREG_EMIT_X4_X28(11),
303 CPUM_GREG_EMIT_X4_X28(12),
304 CPUM_GREG_EMIT_X4_X28(13),
305 CPUM_GREG_EMIT_X4_X28(14),
306 CPUM_GREG_EMIT_X4_X28(15),
307 CPUM_GREG_EMIT_X4_X28(16),
308 CPUM_GREG_EMIT_X4_X28(17),
309 CPUM_GREG_EMIT_X4_X28(18),
310 CPUM_GREG_EMIT_X4_X28(19),
311 CPUM_GREG_EMIT_X4_X28(20),
312 CPUM_GREG_EMIT_X4_X28(21),
313 CPUM_GREG_EMIT_X4_X28(22),
314 CPUM_GREG_EMIT_X4_X28(23),
315 CPUM_GREG_EMIT_X4_X28(24),
316 CPUM_GREG_EMIT_X4_X28(25),
317 CPUM_GREG_EMIT_X4_X28(26),
318 CPUM_GREG_EMIT_X4_X28(27),
319 CPUM_GREG_EMIT_X4_X28(28),
320 { WHV_REGNM(Fp), CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
321 { WHV_REGNM(Lr), CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
322 { WHV_REGNM(Pc), CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
323 { WHV_REGNM(Fpcr), CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
324 { WHV_REGNM(Fpsr), CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
325#undef CPUM_GREG_EMIT_X0_X3
326#undef CPUM_GREG_EMIT_X4_X28
327};
328/** SIMD/FP registers. */
329static const struct
330{
331 WHV_REGISTER_NAME enmWHvReg;
332 uintptr_t offCpumCtx;
333} s_aCpumFpRegs[] =
334{
335#define CPUM_VREG_EMIT(a_Idx) { WHV_REGNM(Q ## a_Idx), RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
336 CPUM_VREG_EMIT(0),
337 CPUM_VREG_EMIT(1),
338 CPUM_VREG_EMIT(2),
339 CPUM_VREG_EMIT(3),
340 CPUM_VREG_EMIT(4),
341 CPUM_VREG_EMIT(5),
342 CPUM_VREG_EMIT(6),
343 CPUM_VREG_EMIT(7),
344 CPUM_VREG_EMIT(8),
345 CPUM_VREG_EMIT(9),
346 CPUM_VREG_EMIT(10),
347 CPUM_VREG_EMIT(11),
348 CPUM_VREG_EMIT(12),
349 CPUM_VREG_EMIT(13),
350 CPUM_VREG_EMIT(14),
351 CPUM_VREG_EMIT(15),
352 CPUM_VREG_EMIT(16),
353 CPUM_VREG_EMIT(17),
354 CPUM_VREG_EMIT(18),
355 CPUM_VREG_EMIT(19),
356 CPUM_VREG_EMIT(20),
357 CPUM_VREG_EMIT(21),
358 CPUM_VREG_EMIT(22),
359 CPUM_VREG_EMIT(23),
360 CPUM_VREG_EMIT(24),
361 CPUM_VREG_EMIT(25),
362 CPUM_VREG_EMIT(26),
363 CPUM_VREG_EMIT(27),
364 CPUM_VREG_EMIT(28),
365 CPUM_VREG_EMIT(29),
366 CPUM_VREG_EMIT(30),
367 CPUM_VREG_EMIT(31)
368#undef CPUM_VREG_EMIT
369};
370/** PAuth key system registers. */
371static const struct
372{
373 WHV_REGISTER_NAME enmWHvReg;
374 uintptr_t offCpumCtx;
375} s_aCpumPAuthKeyRegs[] =
376{
377 { WHV_REGNM(ApdAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
378 { WHV_REGNM(ApdAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
379 { WHV_REGNM(ApdBKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
380 { WHV_REGNM(ApdBKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
381 { WHV_REGNM(ApgAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
382 { WHV_REGNM(ApgAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
383 { WHV_REGNM(ApiAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
384 { WHV_REGNM(ApiAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
385 { WHV_REGNM(ApiBKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
386 { WHV_REGNM(ApiBKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
387};
388/** System registers. */
389static const struct
390{
391 WHV_REGISTER_NAME enmWHvReg;
392 uint32_t fCpumExtrn;
393 uintptr_t offCpumCtx;
394} s_aCpumSysRegs[] =
395{
396 { WHV_REGNM(SpEl0), CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
397 { WHV_REGNM(SpEl1), CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
398 { WHV_REGNM(SpsrEl1), CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
399 { WHV_REGNM(ElrEl1), CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
400 { WHV_REGNM(VbarEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
401 { WHV_REGNM(CntkctlEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
402 { WHV_REGNM(ContextidrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
403 { WHV_REGNM(CpacrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
404 { WHV_REGNM(CsselrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
405 { WHV_REGNM(EsrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
406 { WHV_REGNM(FarEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
407 { WHV_REGNM(MairEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
408 { WHV_REGNM(ParEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
409 { WHV_REGNM(TpidrroEl0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
410 { WHV_REGNM(TpidrEl0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
411 { WHV_REGNM(TpidrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
412 { My_WHvArm64RegisterActlrEl1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Actlr.u64) }
413#if 0 /* Not available in Hyper-V */
414 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
415 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
416 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
417 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
418#endif
419};
420/** Paging registers (CPUMCTX_EXTRN_SCTLR_TCR_TTBR). */
421static const struct
422{
423 WHV_REGISTER_NAME enmWHvReg;
424 uint32_t offCpumCtx;
425} s_aCpumSysRegsPg[] =
426{
427 { WHV_REGNM(SctlrEl1), RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
428 { WHV_REGNM(TcrEl1), RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
429 { WHV_REGNM(Ttbr0El1), RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
430 { WHV_REGNM(Ttbr1El1), RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
431};
432
433
434/*********************************************************************************************************************************
435* Internal Functions *
436*********************************************************************************************************************************/
437DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
438DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
439
440
441/**
442 * Worker for nemR3NativeInit that probes and load the native API.
443 *
444 * @returns VBox status code.
445 * @param fForced Whether the HMForced flag is set and we should
446 * fail if we cannot initialize.
447 * @param pErrInfo Where to always return error info.
448 */
449static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
450{
451 /*
452 * Check that the DLL files we need are present, but without loading them.
453 * We'd like to avoid loading them unnecessarily.
454 */
455 WCHAR wszPath[MAX_PATH + 64];
456 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
457 if (cwcPath >= MAX_PATH || cwcPath < 2)
458 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
459
460 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
461 wszPath[cwcPath++] = '\\';
462 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
463 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
464 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
465
466 /*
467 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
468 */
469 /** @todo */
470
471 /** @todo would be great if we could recognize a root partition from the
472 * CPUID info, but I currently don't dare do that. */
473
474 /*
475 * Now try load the DLLs and resolve the APIs.
476 */
477 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
478 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
479 int rc = VINF_SUCCESS;
480 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
481 {
482 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
483 if (RT_FAILURE(rc2))
484 {
485 if (!RTErrInfoIsSet(pErrInfo))
486 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
487 else
488 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
489 ahMods[i] = NIL_RTLDRMOD;
490 rc = VERR_NEM_INIT_FAILED;
491 }
492 }
493 if (RT_SUCCESS(rc))
494 {
495 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
496 {
497 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
498 if (RT_SUCCESS(rc2))
499 {
500 if (g_aImports[i].fOptional)
501 LogRel(("NEM: info: Found optional import %s!%s.\n",
502 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
503 }
504 else
505 {
506 *g_aImports[i].ppfn = NULL;
507
508 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
509 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
510 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
511 if (!g_aImports[i].fOptional)
512 {
513 if (RTErrInfoIsSet(pErrInfo))
514 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
515 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
516 else
517 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
518 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
519 Assert(RT_FAILURE(rc));
520 }
521 }
522 }
523 if (RT_SUCCESS(rc))
524 {
525 Assert(!RTErrInfoIsSet(pErrInfo));
526 }
527 }
528
529 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
530 RTLdrClose(ahMods[i]);
531 return rc;
532}
533
534
535/**
536 * Wrapper for different WHvGetCapability signatures.
537 */
538DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
539{
540 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
541}
542
543
544/**
545 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
546 *
547 * @returns VBox status code.
548 * @param pVM The cross context VM structure.
549 * @param pErrInfo Where to always return error info.
550 */
551static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
552{
553#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
554#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
555#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
556
557 WHV_CAPABILITY Caps;
558 RT_ZERO(Caps);
559 SetLastError(0);
560 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
561 DWORD rcWin = GetLastError();
562 if (FAILED(hrc))
563 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
564 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
565 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
566 if (!Caps.HypervisorPresent)
567 {
568 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
569 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
570 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
571 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
572 }
573 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
574
575
576 /*
577 * Check what extended VM exits are supported.
578 */
579 RT_ZERO(Caps);
580 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
581 if (FAILED(hrc))
582 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
583 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
584 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
585 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
586 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
587 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
588 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
589 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
590 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
591 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
592 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
593
594 /*
595 * Check features in case they end up defining any.
596 */
597 RT_ZERO(Caps);
598 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
599 if (FAILED(hrc))
600 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
601 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
602 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
603 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
604 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
605 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
606
607 /*
608 * Check that the CPU vendor is supported.
609 */
610 RT_ZERO(Caps);
611 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
612 if (FAILED(hrc))
613 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
614 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
615 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
616 switch (Caps.ProcessorVendor)
617 {
618 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
619 case WHvProcessorVendorArm:
620 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
621 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
622 break;
623 default:
624 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
625 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
626 }
627
628 /*
629 * CPU features, guessing these are virtual CPU features?
630 */
631 RT_ZERO(Caps);
632 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
633 if (FAILED(hrc))
634 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
635 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
636 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
637 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
638#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
639 NEM_LOG_REL_CPU_FEATURE(Asid16);
640 NEM_LOG_REL_CPU_FEATURE(TGran16);
641 NEM_LOG_REL_CPU_FEATURE(TGran64);
642 NEM_LOG_REL_CPU_FEATURE(Haf);
643 NEM_LOG_REL_CPU_FEATURE(Hdbs);
644 NEM_LOG_REL_CPU_FEATURE(Pan);
645 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
646 NEM_LOG_REL_CPU_FEATURE(Uao);
647 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
648 NEM_LOG_REL_CPU_FEATURE(Fp);
649 NEM_LOG_REL_CPU_FEATURE(FpHp);
650 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
651 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
652 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
653 NEM_LOG_REL_CPU_FEATURE(GicV41);
654 NEM_LOG_REL_CPU_FEATURE(Ras);
655 NEM_LOG_REL_CPU_FEATURE(PmuV3);
656 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
657 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
658 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
659 NEM_LOG_REL_CPU_FEATURE(Aes);
660 NEM_LOG_REL_CPU_FEATURE(PolyMul);
661 NEM_LOG_REL_CPU_FEATURE(Sha1);
662 NEM_LOG_REL_CPU_FEATURE(Sha256);
663 NEM_LOG_REL_CPU_FEATURE(Sha512);
664 NEM_LOG_REL_CPU_FEATURE(Crc32);
665 NEM_LOG_REL_CPU_FEATURE(Atomic);
666 NEM_LOG_REL_CPU_FEATURE(Rdm);
667 NEM_LOG_REL_CPU_FEATURE(Sha3);
668 NEM_LOG_REL_CPU_FEATURE(Sm3);
669 NEM_LOG_REL_CPU_FEATURE(Sm4);
670 NEM_LOG_REL_CPU_FEATURE(Dp);
671 NEM_LOG_REL_CPU_FEATURE(Fhm);
672 NEM_LOG_REL_CPU_FEATURE(DcCvap);
673 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
674 NEM_LOG_REL_CPU_FEATURE(ApaBase);
675 NEM_LOG_REL_CPU_FEATURE(ApaEp);
676 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
677 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
678 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
679 NEM_LOG_REL_CPU_FEATURE(Jscvt);
680 NEM_LOG_REL_CPU_FEATURE(Fcma);
681 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
682 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
683 NEM_LOG_REL_CPU_FEATURE(Gpa);
684 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
685 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
686
687#undef NEM_LOG_REL_CPU_FEATURE
688 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
689 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
690 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
691 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
692
693 /*
694 * The cache line flush size.
695 */
696 RT_ZERO(Caps);
697 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
698 if (FAILED(hrc))
699 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
700 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
701 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
702 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
703 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
704 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
705 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
706
707 RT_ZERO(Caps);
708 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
709 if (FAILED(hrc))
710 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
711 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
712 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
713 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
714 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
715 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
716 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
717
718
719 /*
720 * See if they've added more properties that we're not aware of.
721 */
722 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
723 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
724 {
725 static const struct
726 {
727 uint32_t iMin, iMax; } s_aUnknowns[] =
728 {
729 { 0x0004, 0x000f },
730 { 0x1003, 0x100f },
731 { 0x2000, 0x200f },
732 { 0x3000, 0x300f },
733 { 0x4000, 0x400f },
734 };
735 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
736 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
737 {
738 RT_ZERO(Caps);
739 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
740 if (SUCCEEDED(hrc))
741 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
742 }
743 }
744
745 /*
746 * For proper operation, we require CPUID exits.
747 */
748 /** @todo Any? */
749
750#undef NEM_LOG_REL_CAP_EX
751#undef NEM_LOG_REL_CAP_SUB_EX
752#undef NEM_LOG_REL_CAP_SUB
753 return VINF_SUCCESS;
754}
755
756
757/**
758 * Initializes the GIC controller emulation provided by Hyper-V.
759 *
760 * @returns VBox status code.
761 * @param pVM The cross context VM structure.
762 *
763 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
764 */
765static int nemR3WinGicCreate(PVM pVM)
766{
767 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
768 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
769
770 /*
771 * Query the MMIO ranges.
772 */
773 RTGCPHYS GCPhysMmioBaseDist = 0;
774 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
775 if (RT_FAILURE(rc))
776 return VMSetError(pVM, rc, RT_SRC_POS,
777 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
778
779 RTGCPHYS GCPhysMmioBaseReDist = 0;
780 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
781 if (RT_FAILURE(rc))
782 return VMSetError(pVM, rc, RT_SRC_POS,
783 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
784
785 RTGCPHYS GCPhysMmioBaseIts = 0;
786 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
787 if (RT_FAILURE(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND)
788 return VMSetError(pVM, rc, RT_SRC_POS,
789 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
790 rc = VINF_SUCCESS;
791
792 /*
793 * One can only set the GIC distributor base. The re-distributor regions for the individual
794 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
795 */
796 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
797
798 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
799
800 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
801 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
802 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
803 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
804 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
805 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
806 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
807 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
808 if (FAILED(hrc))
809 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
810 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
811 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
812
813 return rc;
814}
815
816
817/**
818 * Creates and sets up a Hyper-V (exo) partition.
819 *
820 * @returns VBox status code.
821 * @param pVM The cross context VM structure.
822 * @param pErrInfo Where to always return error info.
823 */
824static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
825{
826 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
827 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
828
829 /*
830 * Create the partition.
831 */
832 WHV_PARTITION_HANDLE hPartition;
833 HRESULT hrc = WHvCreatePartition(&hPartition);
834 if (FAILED(hrc))
835 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
836 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
837
838 int rc;
839
840 /*
841 * Set partition properties, most importantly the CPU count.
842 */
843 /**
844 * @todo Someone at Microsoft please explain another weird API:
845 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
846 * argument rather than as part of the struct. That is so weird if you've
847 * used any other NT or windows API, including WHvGetCapability().
848 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
849 * technically only need 9 bytes for setting/getting
850 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
851 WHV_PARTITION_PROPERTY Property;
852 RT_ZERO(Property);
853 Property.ProcessorCount = pVM->cCpus;
854 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
855 if (SUCCEEDED(hrc))
856 {
857 RT_ZERO(Property);
858 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
859 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
860 if (SUCCEEDED(hrc))
861 {
862 /*
863 * We'll continue setup in nemR3NativeInitAfterCPUM.
864 */
865 pVM->nem.s.fCreatedEmts = false;
866 pVM->nem.s.hPartition = hPartition;
867 LogRel(("NEM: Created partition %p.\n", hPartition));
868 return VINF_SUCCESS;
869 }
870
871 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
872 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
873 Property.ExtendedVmExits.AsUINT64, hrc);
874 }
875 else
876 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
877 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
878 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
879 WHvDeletePartition(hPartition);
880
881 Assert(!pVM->nem.s.hPartitionDevice);
882 Assert(!pVM->nem.s.hPartition);
883 return rc;
884}
885
886
887static int nemR3NativeInitSetupVm(PVM pVM)
888{
889 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
890 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
891 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
892 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
893
894 /*
895 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
896 */
897 WHV_PARTITION_PROPERTY Property;
898 HRESULT hrc;
899
900 /* Not sure if we really need to set the cache line flush size. */
901 RT_ZERO(Property);
902 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
903 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
904 if (FAILED(hrc))
905 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
906 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
907 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
908
909 /*
910 * Sync CPU features with CPUM.
911 */
912 /** @todo sync CPU features with CPUM. */
913
914 /* Set the partition property. */
915 RT_ZERO(Property);
916 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
917 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
918 if (FAILED(hrc))
919 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
920 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
921 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
922
923 /* Configure the GIC. */
924 int rc = nemR3WinGicCreate(pVM);
925 if (RT_FAILURE(rc))
926 return rc;
927
928 /*
929 * Set up the partition.
930 *
931 * Seems like this is where the partition is actually instantiated and we get
932 * a handle to it.
933 */
934 hrc = WHvSetupPartition(hPartition);
935 if (FAILED(hrc))
936 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
937 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
938 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
939
940 /*
941 * Setup the EMTs.
942 */
943 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
944 {
945 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
946 if (FAILED(hrc))
947 {
948 NTSTATUS const rcNtLast = RTNtLastStatusValue();
949 DWORD const dwErrLast = RTNtLastErrorValue();
950 while (idCpu-- > 0)
951 {
952 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
953 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
954 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
955 RTNtLastErrorValue()));
956 }
957 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
958 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
959 }
960
961 if (idCpu == 0)
962 {
963 /*
964 * Need to query the ID registers and populate CPUM,
965 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
966 */
967 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
968
969 WHV_REGISTER_NAME aenmNames[10];
970 WHV_REGISTER_VALUE aValues[10];
971 RT_ZERO(aValues);
972
973 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
974 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
975 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
976 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
977 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
978 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
979 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
980 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
981 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
982 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
983
984 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
985 AssertLogRelMsgReturn(SUCCEEDED(hrc),
986 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
987 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
988 , VERR_NEM_GET_REGISTERS_FAILED);
989
990 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
991 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
992 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
993 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
994 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
995 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
996 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
997 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
998 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
999 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
1000
1001 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1002 if (RT_FAILURE(rc))
1003 return rc;
1004
1005 /* Apply any overrides to the partition. */
1006 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
1007 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
1008 AssertRCReturn(rc, rc);
1009
1010 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
1011 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
1012 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
1013 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
1014 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
1015 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
1016 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
1017 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
1018 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
1019 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
1020
1021 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
1022 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1023 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1024 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1025 , VERR_NEM_SET_REGISTERS_FAILED);
1026
1027 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
1028 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
1029 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
1030 }
1031
1032 /* Configure the GIC re-distributor region for the GIC. */
1033 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
1034 WHV_REGISTER_VALUE Value;
1035 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
1036
1037 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
1038 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1039 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
1040 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1041 , VERR_NEM_SET_REGISTERS_FAILED);
1042 }
1043
1044 pVM->nem.s.fCreatedEmts = true;
1045
1046 LogRel(("NEM: Successfully set up partition\n"));
1047 return VINF_SUCCESS;
1048}
1049
1050
1051/**
1052 * Try initialize the native API.
1053 *
1054 * This may only do part of the job, more can be done in
1055 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1056 *
1057 * @returns VBox status code.
1058 * @param pVM The cross context VM structure.
1059 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1060 * the latter we'll fail if we cannot initialize.
1061 * @param fForced Whether the HMForced flag is set and we should
1062 * fail if we cannot initialize.
1063 */
1064int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1065{
1066 g_uBuildNo = RTSystemGetNtBuildNo();
1067
1068 /*
1069 * Error state.
1070 * The error message will be non-empty on failure and 'rc' will be set too.
1071 */
1072 RTERRINFOSTATIC ErrInfo;
1073 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1074 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
1075 if (RT_SUCCESS(rc))
1076 {
1077 /*
1078 * Check the capabilties of the hypervisor, starting with whether it's present.
1079 */
1080 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
1081 if (RT_SUCCESS(rc))
1082 {
1083 /*
1084 * Create and initialize a partition.
1085 */
1086 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
1087 if (RT_SUCCESS(rc))
1088 {
1089 rc = nemR3NativeInitSetupVm(pVM);
1090 if (RT_SUCCESS(rc))
1091 {
1092 /*
1093 * Set ourselves as the execution engine and make config adjustments.
1094 */
1095 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1096 Log(("NEM: Marked active!\n"));
1097 PGMR3EnableNemMode(pVM);
1098
1099 /*
1100 * Register release statistics
1101 */
1102 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
1103 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
1104 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1105 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
1106 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1107 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
1108 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1109 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
1110 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1111 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
1112 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1113 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
1114 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1115 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
1116 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1117 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
1118 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1119 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
1120
1121 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1122 {
1123 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
1124 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
1125 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
1126 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
1127 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
1128 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
1129 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
1130 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
1131 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
1132 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
1133 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1134 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1135 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1136 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1137 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1138 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1139 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1140 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1141 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1142 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1143 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1144 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1145 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1146 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1147 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1148 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1149 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1150 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1151 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1152 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1153 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1154 }
1155
1156#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1157 if (!SUPR3IsDriverless())
1158 {
1159 PUVM pUVM = pVM->pUVM;
1160 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1161 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1162 "/NEM/R0Stats/cPagesAvailable");
1163 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1164 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1165 "/NEM/R0Stats/cPagesInUse");
1166 }
1167#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1168 }
1169 }
1170 }
1171 }
1172
1173 /*
1174 * We only fail if in forced mode, otherwise just log the complaint and return.
1175 */
1176 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1177 if ( (fForced || !fFallback)
1178 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1179 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1180
1181 if (RTErrInfoIsSet(pErrInfo))
1182 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1183 return VINF_SUCCESS;
1184}
1185
1186
1187/**
1188 * This is called after CPUMR3Init is done.
1189 *
1190 * @returns VBox status code.
1191 * @param pVM The VM handle..
1192 */
1193int nemR3NativeInitAfterCPUM(PVM pVM)
1194{
1195 /*
1196 * Validate sanity.
1197 */
1198 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1199
1200 /** @todo */
1201
1202 /*
1203 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1204 */
1205 /** @todo stats */
1206
1207 /*
1208 * Adjust features.
1209 *
1210 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1211 * the first init call.
1212 */
1213
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * Execute state save operation.
1220 *
1221 * @returns VBox status code.
1222 * @param pVM The cross context VM structure.
1223 * @param pSSM SSM operation handle.
1224 */
1225static DECLCALLBACK(int) nemR3Save(PVM pVM, PSSMHANDLE pSSM)
1226{
1227 /*
1228 * Save the Hyper-V activity state for all CPUs.
1229 */
1230 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1231 {
1232 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1233
1234 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1235 WHV_REGISTER_VALUE Reg;
1236
1237 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1238 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1239 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1240 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1241 , VERR_NEM_IPE_9);
1242
1243 SSMR3PutU64(pSSM, Reg.Reg64);
1244 }
1245
1246 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1247}
1248
1249
1250/**
1251 * Execute state load operation.
1252 *
1253 * @returns VBox status code.
1254 * @param pVM The cross context VM structure.
1255 * @param pSSM SSM operation handle.
1256 * @param uVersion Data layout version.
1257 * @param uPass The data pass.
1258 */
1259static DECLCALLBACK(int) nemR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1260{
1261 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1262
1263 /*
1264 * Validate version.
1265 */
1266 if (uVersion != 1)
1267 {
1268 AssertMsgFailed(("nemR3Load: Invalid version uVersion=%u!\n", uVersion));
1269 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1270 }
1271
1272 /*
1273 * Restore the Hyper-V activity states for all vCPUs.
1274 */
1275 VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
1276 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1277 {
1278 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1279
1280 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1281 WHV_REGISTER_VALUE Reg;
1282 int rc = SSMR3GetU64(pSSM, &Reg.Reg64);
1283 if (RT_FAILURE(rc))
1284 return rc;
1285
1286 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1287 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1288 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1289 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1290 , VERR_NEM_IPE_9);
1291 }
1292
1293 /* terminator */
1294 uint32_t u32;
1295 int rc = SSMR3GetU32(pSSM, &u32);
1296 if (RT_FAILURE(rc))
1297 return rc;
1298 if (u32 != UINT32_MAX)
1299 {
1300 AssertMsgFailed(("u32=%#x\n", u32));
1301 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1302 }
1303 return VINF_SUCCESS;
1304}
1305
1306
1307int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1308{
1309 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1310 //AssertLogRel(fRet);
1311
1312 if (enmWhat == VMINITCOMPLETED_RING3)
1313 {
1314 /*
1315 * Register the saved state data unit.
1316 */
1317 int rc = SSMR3RegisterInternal(pVM, "nem-win", 1, NEM_HV_SAVED_STATE_VERSION,
1318 sizeof(uint64_t),
1319 NULL, NULL, NULL,
1320 NULL, nemR3Save, NULL,
1321 NULL, nemR3Load, NULL);
1322 if (RT_FAILURE(rc))
1323 return rc;
1324 }
1325
1326 NOREF(pVM); NOREF(enmWhat);
1327 return VINF_SUCCESS;
1328}
1329
1330
1331int nemR3NativeTerm(PVM pVM)
1332{
1333 /*
1334 * Delete the partition.
1335 */
1336 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1337 pVM->nem.s.hPartition = NULL;
1338 pVM->nem.s.hPartitionDevice = NULL;
1339 if (hPartition != NULL)
1340 {
1341 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1342 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1343 while (idCpu-- > 0)
1344 {
1345 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1346 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1347 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1348 RTNtLastErrorValue()));
1349 }
1350 WHvDeletePartition(hPartition);
1351 }
1352 pVM->nem.s.fCreatedEmts = false;
1353 return VINF_SUCCESS;
1354}
1355
1356
1357/**
1358 * VM reset notification.
1359 *
1360 * @param pVM The cross context VM structure.
1361 */
1362void nemR3NativeReset(PVM pVM)
1363{
1364 RT_NOREF(pVM);
1365}
1366
1367
1368/**
1369 * Reset CPU due to INIT IPI or hot (un)plugging.
1370 *
1371 * @param pVCpu The cross context virtual CPU structure of the CPU being
1372 * reset.
1373 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1374 */
1375void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1376{
1377 RT_NOREF(pVCpu, fInitIpi);
1378}
1379
1380
1381NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1382{
1383 WHV_REGISTER_NAME aenmNames[128];
1384 WHV_REGISTER_VALUE aValues[128];
1385
1386 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1387 if (!fWhat)
1388 return VINF_SUCCESS;
1389 uintptr_t iReg = 0;
1390
1391#define ADD_REG64(a_enmName, a_uValue) do { \
1392 aenmNames[iReg] = (a_enmName); \
1393 aValues[iReg].Reg128.High64 = 0; \
1394 aValues[iReg].Reg64 = (a_uValue).x; \
1395 iReg++; \
1396 } while (0)
1397#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1398 aenmNames[iReg] = (a_enmName); \
1399 aValues[iReg].Reg128.High64 = 0; \
1400 aValues[iReg].Reg64 = (a_uValue); \
1401 iReg++; \
1402 } while (0)
1403#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1404 aenmNames[iReg] = (a_enmName); \
1405 aValues[iReg].Reg128.High64 = 0; \
1406 aValues[iReg].Reg64 = (a_uValue).u64; \
1407 iReg++; \
1408 } while (0)
1409#define ADD_REG128(a_enmName, a_uValue) do { \
1410 aenmNames[iReg] = (a_enmName); \
1411 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1412 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1413 iReg++; \
1414 } while (0)
1415
1416 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1417 {
1418 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1419 {
1420 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
1421 {
1422 const CPUMCTXGREG *pReg = (const CPUMCTXGREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1423 ADD_REG64(s_aCpumRegs[i].enmWHvReg, *pReg);
1424 }
1425 }
1426 }
1427
1428 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1429 {
1430 /* SIMD/FP registers. */
1431 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1432 {
1433 PCCPUMCTXVREG pVReg = (PCCPUMCTXVREG)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1434 ADD_REG128(s_aCpumFpRegs[i].enmWHvReg, *pVReg);
1435 }
1436 }
1437
1438 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1439 {
1440 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1441 {
1442 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1443 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1444 }
1445
1446 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1447 {
1448 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1449 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1450 }
1451
1452 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1453 }
1454
1455 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1456 {
1457 /* PAuth registers. */
1458 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1459 {
1460 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1461 ADD_SYSREG64(s_aCpumPAuthKeyRegs[i].enmWHvReg, *pReg);
1462 }
1463 }
1464
1465 if (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1466 {
1467 /* System registers. */
1468 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1469 {
1470 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
1471 {
1472 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1473 ADD_SYSREG64(s_aCpumSysRegs[i].enmWHvReg, *pReg);
1474 }
1475 }
1476 }
1477
1478 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1479 {
1480 /* Paging related system registers. */
1481 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsPg); i++)
1482 {
1483 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegsPg[i].offCpumCtx);
1484 ADD_SYSREG64(s_aCpumSysRegsPg[i].enmWHvReg, *pReg);
1485 }
1486 }
1487
1488 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1489 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1490
1491#undef ADD_REG64
1492#undef ADD_REG64_RAW
1493#undef ADD_REG128
1494
1495 /*
1496 * Set the registers.
1497 */
1498 Assert(iReg < RT_ELEMENTS(aValues));
1499 Assert(iReg < RT_ELEMENTS(aenmNames));
1500 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1501 if (SUCCEEDED(hrc))
1502 {
1503 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1504 return VINF_SUCCESS;
1505 }
1506 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1507 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1508 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1509 return VERR_INTERNAL_ERROR;
1510}
1511
1512
1513NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1514{
1515 WHV_REGISTER_NAME aenmNames[256];
1516
1517 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1518 if (!fWhat)
1519 return VINF_SUCCESS;
1520
1521 uintptr_t iReg = 0;
1522
1523 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1524 {
1525 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1526 {
1527 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
1528 aenmNames[iReg++] = s_aCpumRegs[i].enmWHvReg;
1529 }
1530 }
1531
1532 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1533 {
1534 /* SIMD/FP registers. */
1535 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1536 {
1537 aenmNames[iReg++] = s_aCpumFpRegs[i].enmWHvReg;
1538 }
1539 }
1540
1541 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1542 {
1543 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1544 {
1545 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1546 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1547 }
1548
1549 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1550 {
1551 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1552 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1553 }
1554
1555 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1556 }
1557
1558 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1559 {
1560 /* PAuth registers. */
1561 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1562 {
1563 aenmNames[iReg++] = s_aCpumPAuthKeyRegs[i].enmWHvReg;
1564 }
1565 }
1566
1567 if (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1568 {
1569 /* System registers. */
1570 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1571 {
1572 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
1573 aenmNames[iReg++] = s_aCpumSysRegs[i].enmWHvReg;
1574 }
1575 }
1576
1577 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1578 {
1579 /* Paging related system registers. */
1580 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsPg); i++)
1581 aenmNames[iReg++] = s_aCpumSysRegsPg[i].enmWHvReg;
1582 }
1583
1584 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1585 aenmNames[iReg++] = WHvArm64RegisterPstate;
1586
1587 size_t const cRegs = iReg;
1588 Assert(cRegs < RT_ELEMENTS(aenmNames));
1589
1590 /*
1591 * Get the registers.
1592 */
1593 WHV_REGISTER_VALUE aValues[256];
1594 RT_ZERO(aValues);
1595 Assert(RT_ELEMENTS(aValues) >= cRegs);
1596 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1597 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1598 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1599 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1600 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1601 , VERR_NEM_GET_REGISTERS_FAILED);
1602
1603 iReg = 0;
1604#define GET_REG64(a_DstVar, a_enmName) do { \
1605 Assert(aenmNames[iReg] == (a_enmName)); \
1606 (a_DstVar)->x = aValues[iReg].Reg64; \
1607 iReg++; \
1608 } while (0)
1609#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1610 Assert(aenmNames[iReg] == (a_enmName)); \
1611 *(a_DstVar) = aValues[iReg].Reg64; \
1612 iReg++; \
1613 } while (0)
1614#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1615 Assert(aenmNames[iReg] == (a_enmName)); \
1616 (a_DstVar)->u64 = aValues[iReg].Reg64; \
1617 iReg++; \
1618 } while (0)
1619#define GET_REG128(a_DstVar, a_enmName) do { \
1620 Assert(aenmNames[iReg] == a_enmName); \
1621 (a_DstVar)->au64[0] = aValues[iReg].Reg128.Low64; \
1622 (a_DstVar)->au64[1] = aValues[iReg].Reg128.High64; \
1623 iReg++; \
1624 } while (0)
1625
1626 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1627 {
1628 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1629 {
1630 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
1631 {
1632 CPUMCTXGREG *pReg = (CPUMCTXGREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1633 GET_REG64(pReg, s_aCpumRegs[i].enmWHvReg);
1634 }
1635 }
1636 }
1637
1638 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1639 {
1640 /* SIMD/FP registers. */
1641 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1642 {
1643 PCPUMCTXVREG pVReg = (PCPUMCTXVREG)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1644 GET_REG128(pVReg, s_aCpumFpRegs[i].enmWHvReg);
1645 }
1646 }
1647
1648 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1649 {
1650 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1651 {
1652 GET_SYSREG64(&pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1653 GET_SYSREG64(&pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1654 }
1655
1656 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1657 {
1658 GET_SYSREG64(&pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1659 GET_SYSREG64(&pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1660 }
1661
1662 GET_SYSREG64(&pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1663 }
1664
1665 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1666 {
1667 /* PAuth registers. */
1668 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1669 {
1670 CPUMCTXSYSREG *pReg = (CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1671 GET_SYSREG64(pReg, s_aCpumPAuthKeyRegs[i].enmWHvReg);
1672 }
1673 }
1674
1675 if (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1676 {
1677 /* System registers. */
1678 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1679 {
1680 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
1681 {
1682 CPUMCTXSYSREG *pReg = (CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1683 GET_SYSREG64(pReg, s_aCpumSysRegs[i].enmWHvReg);
1684 }
1685 }
1686 }
1687
1688 /* The paging related system registers need to be treated differently as they might invoke a PGM mode change. */
1689 uint64_t u64RegSctlrEl1;
1690 uint64_t u64RegTcrEl1;
1691 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1692 {
1693 GET_REG64_RAW(&u64RegSctlrEl1, WHvArm64RegisterSctlrEl1);
1694 GET_REG64_RAW(&u64RegTcrEl1, WHvArm64RegisterTcrEl1);
1695 GET_SYSREG64(&pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1696 GET_SYSREG64(&pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1697 if ( u64RegSctlrEl1 != pVCpu->cpum.GstCtx.Sctlr.u64
1698 || u64RegTcrEl1 != pVCpu->cpum.GstCtx.Tcr.u64)
1699 {
1700 pVCpu->cpum.GstCtx.Sctlr.u64 = u64RegSctlrEl1;
1701 pVCpu->cpum.GstCtx.Tcr.u64 = u64RegTcrEl1;
1702 int rc = PGMChangeMode(pVCpu, 1 /*bEl*/, u64RegSctlrEl1, u64RegTcrEl1);
1703 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1704 }
1705 }
1706
1707 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1708 GET_REG64_RAW(&pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1709
1710 /* Almost done, just update extrn flags. */
1711 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1712 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1713 pVCpu->cpum.GstCtx.fExtrn = 0;
1714
1715 return VINF_SUCCESS;
1716}
1717
1718
1719/**
1720 * Interface for importing state on demand (used by IEM).
1721 *
1722 * @returns VBox status code.
1723 * @param pVCpu The cross context CPU structure.
1724 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1725 */
1726VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1727{
1728 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1729 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1730}
1731
1732
1733/**
1734 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1735 *
1736 * @returns VBox status code.
1737 * @param pVCpu The cross context CPU structure.
1738 * @param pcTicks Where to return the CPU tick count.
1739 * @param puAux Where to return the TSC_AUX register value.
1740 */
1741VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1742{
1743 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1744
1745 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1746 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1747 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1748
1749 /* Ensure time for the partition is suspended - it will be resumed as soon as a vCPU starts executing. */
1750 HRESULT hrc = WHvSuspendPartitionTime(pVM->nem.s.hPartition);
1751 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1752 ("WHvSuspendPartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1753 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1754 , VERR_NEM_GET_REGISTERS_FAILED);
1755
1756 /* Call the offical API. */
1757 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1758 WHV_REGISTER_VALUE Value = { { {0, 0} } };
1759 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &enmName, 1, &Value);
1760 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1761 ("WHvGetVirtualProcessorRegisters(%p, %u,{CNTVCT_EL0},1,) -> %Rhrc (Last=%#x/%u)\n",
1762 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1763 , VERR_NEM_GET_REGISTERS_FAILED);
1764 *pcTicks = Value.Reg64;
1765 LogFlow(("NEMHCQueryCpuTick: %#RX64 (host: %#RX64)\n", *pcTicks, ASMReadTSC()));
1766 if (puAux)
1767 *puAux =0;
1768
1769 return VINF_SUCCESS;
1770}
1771
1772
1773/**
1774 * Resumes CPU clock (TSC) on all virtual CPUs.
1775 *
1776 * This is called by TM when the VM is started, restored, resumed or similar.
1777 *
1778 * @returns VBox status code.
1779 * @param pVM The cross context VM structure.
1780 * @param pVCpu The cross context CPU structure of the calling EMT.
1781 * @param uPausedTscValue The TSC value at the time of pausing.
1782 */
1783VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1784{
1785 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1786 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1787
1788 /*
1789 * Call the offical API to do the job.
1790 */
1791 LogFlow(("NEMHCResumeCpuTickOnAll: %#RX64 (host: %#RX64)\n", uPausedTscValue, ASMReadTSC()));
1792
1793 /*
1794 * Now set the CNTVCT_EL0 register for each vCPU, Hyper-V will program the timer offset in
1795 * CNTVOFF_EL2 accordingly. ARM guarantees that CNTVCT_EL0 is synchronised across all CPUs,
1796 * as long as CNTVOFF_EL2 is the same everywhere. Lets just hope scheduling will not affect it
1797 * if the partition time is suspended.
1798 */
1799 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1800 {
1801 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1802 WHV_REGISTER_VALUE Value;
1803 Value.Reg64 = uPausedTscValue;
1804 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, idCpu, &enmName, 1, &Value);
1805 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1806 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTVCT_EL0},1,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1807 pVM->nem.s.hPartition, idCpu, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1808 , VERR_NEM_SET_TSC);
1809
1810 /* Make sure the CNTV_CTL_EL0 and CNTV_CVAL_EL0 registers are up to date after resuming (saved state load). */
1811 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1812 pVCpuDst->nem.s.fSyncCntvRegs = true;
1813 }
1814
1815 HRESULT hrc = WHvResumePartitionTime(pVM->nem.s.hPartition);
1816 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1817 ("WHvResumePartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1818 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1819 , VERR_NEM_SET_TSC);
1820
1821 return VINF_SUCCESS;
1822}
1823
1824
1825#ifdef LOG_ENABLED
1826/**
1827 * Logs the current CPU state.
1828 */
1829static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1830{
1831 if (LogIs3Enabled())
1832 {
1833 char szRegs[4096];
1834 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1835 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1836 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1837 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1838 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1839 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1840 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1841 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1842 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1843 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1844 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1845 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1846 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1847 "vbar_el1=%016VR{vbar_el1}\n"
1848 );
1849 char szInstr[256]; RT_ZERO(szInstr);
1850 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1851 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1852 szInstr, sizeof(szInstr), NULL);
1853 Log3(("%s%s\n", szRegs, szInstr));
1854 }
1855}
1856#endif /* LOG_ENABLED */
1857
1858
1859/**
1860 * Copies register state from the (common) exit context.
1861 *
1862 * ASSUMES no state copied yet.
1863 *
1864 * @param pVCpu The cross context per CPU structure.
1865 * @param pMsgHdr The common message header.
1866 */
1867DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1868{
1869#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1870 if (!LogIs3Enabled())
1871#endif
1872 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1873 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1874
1875 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1876 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1877
1878 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1879}
1880
1881
1882/**
1883 * Returns the byte size from the given access SAS value.
1884 *
1885 * @returns Number of bytes to transfer.
1886 * @param uSas The SAS value to convert.
1887 */
1888DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
1889{
1890 switch (uSas)
1891 {
1892 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1893 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1894 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1895 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1896 default:
1897 AssertReleaseFailed();
1898 }
1899
1900 return 0;
1901}
1902
1903
1904/**
1905 * Sets the given general purpose register to the given value.
1906 *
1907 * @param pVCpu The cross context virtual CPU structure of the
1908 * calling EMT.
1909 * @param uReg The register index.
1910 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1911 * @param fSignExtend Flag whether to sign extend the value.
1912 * @param u64Val The value.
1913 */
1914DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1915{
1916 AssertReturnVoid(uReg < 31);
1917
1918 if (f64BitReg)
1919 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1920 else
1921 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
1922
1923 /* Mark the register as not extern anymore. */
1924 switch (uReg)
1925 {
1926 case 0:
1927 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1928 break;
1929 case 1:
1930 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1931 break;
1932 case 2:
1933 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1934 break;
1935 case 3:
1936 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1937 break;
1938 default:
1939 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1940 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1941 }
1942}
1943
1944
1945/**
1946 * Gets the given general purpose register and returns the value.
1947 *
1948 * @returns Value from the given register.
1949 * @param pVCpu The cross context virtual CPU structure of the
1950 * calling EMT.
1951 * @param uReg The register index.
1952 */
1953DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1954{
1955 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
1956
1957 if (uReg == ARMV8_A64_REG_XZR)
1958 return 0;
1959
1960 /** @todo Import the register if extern. */
1961 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1962
1963 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1964}
1965
1966
1967/**
1968 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1969 *
1970 * @returns Strict VBox status code.
1971 * @param pVM The cross context VM structure.
1972 * @param pVCpu The cross context per CPU structure.
1973 * @param pExit The VM exit information to handle.
1974 * @sa nemHCWinHandleMessageMemory
1975 */
1976NEM_TMPL_STATIC VBOXSTRICTRC
1977nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1978{
1979 uint64_t const uHostTsc = ASMReadTSC();
1980 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
1981
1982 /*
1983 * Emulate the memory access, either access handler or special memory.
1984 */
1985 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
1986 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
1987 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1988 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1989 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1990 pHdr->Pc, uHostTsc);
1991 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
1992 RT_NOREF_PV(pExitRec);
1993 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1994 AssertRCReturn(rc, rc);
1995
1996#ifdef LOG_ENABLED
1997 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
1998 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
1999#endif
2000 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2001 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2002 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2003 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2004 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2005 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2006 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2007 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2008 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2009 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2010 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2011 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2012
2013 RT_NOREF(fL2Fault);
2014
2015 VBOXSTRICTRC rcStrict;
2016 if (fIsv)
2017 {
2018 EMHistoryAddExit(pVCpu,
2019 fWrite
2020 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2021 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2022 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2023
2024 uint64_t u64Val = 0;
2025 if (fWrite)
2026 {
2027 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2028 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2029 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2030 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2031 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2032 }
2033 else
2034 {
2035 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2036 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2037 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2038 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2039 if (rcStrict == VINF_SUCCESS)
2040 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2041 }
2042 }
2043 else
2044 {
2045 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2046 * when the NVRAM actually contains data:
2047 * ldrb w9, [x6, #-0x0001]!
2048 * This is too complicated for the hardware so the ISV bit is not set. Until there
2049 * is a proper IEM implementation we just handle this here for now to avoid annoying
2050 * users too much.
2051 */
2052 /* The following ASSUMES that the vCPU state is completely synced. */
2053
2054 /* Read instruction. */
2055 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2056 const void *pvPageR3 = NULL;
2057 PGMPAGEMAPLOCK PageMapLock;
2058
2059 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2060 if (rcStrict == VINF_SUCCESS)
2061 {
2062 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2063 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2064
2065 DISSTATE Dis;
2066 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2067 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2068 if (rcStrict == VINF_SUCCESS)
2069 {
2070 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2071 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2072 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2073 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2074 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2075 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2076 {
2077 /* The fault address is already the final address. */
2078 uint8_t bVal = 0;
2079 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2080 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2081 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2082 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2083 if (rcStrict == VINF_SUCCESS)
2084 {
2085 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2086 /* Update the indexed register. */
2087 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2088 }
2089 }
2090 /*
2091 * Seeing the following with the Windows 11/ARM TPM driver:
2092 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2093 */
2094 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2095 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2096 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2097 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2098 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2099 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2100 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2101 {
2102 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2103 /* The fault address is already the final address. */
2104 uint32_t u32Val1 = 0;
2105 uint32_t u32Val2 = 0;
2106 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2107 if (rcStrict == VINF_SUCCESS)
2108 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2109 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2110 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2111 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2112 if (rcStrict == VINF_SUCCESS)
2113 {
2114 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2115 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2116 }
2117 }
2118 else
2119 AssertFailedReturn(VERR_NOT_SUPPORTED);
2120 }
2121 }
2122 }
2123
2124 if (rcStrict == VINF_SUCCESS)
2125 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2126
2127 return rcStrict;
2128}
2129
2130
2131/**
2132 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2133 *
2134 * @returns Strict VBox status code.
2135 * @param pVM The cross context VM structure.
2136 * @param pVCpu The cross context per CPU structure.
2137 * @param pExit The VM exit information to handle.
2138 * @sa nemHCWinHandleMessageMemory
2139 */
2140NEM_TMPL_STATIC VBOXSTRICTRC
2141nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2142{
2143 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2144
2145 /** @todo Raise exception to EL1 if PSCI not configured. */
2146 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2147 uint32_t uFunId = pExit->Hypercall.Immediate;
2148 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2149 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2150 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2151 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2152 {
2153 switch (uFunNum)
2154 {
2155 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2156 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2157 break;
2158 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2159 rcStrict = VMR3PowerOff(pVM->pUVM);
2160 break;
2161 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2162 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2163 {
2164 bool fHaltOnReset;
2165 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2166 if (RT_SUCCESS(rc) && fHaltOnReset)
2167 {
2168 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2169 rcStrict = VINF_EM_HALT;
2170 }
2171 else
2172 {
2173 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2174 VM_FF_SET(pVM, VM_FF_RESET);
2175 rcStrict = VINF_EM_RESET;
2176 }
2177 break;
2178 }
2179 case ARM_PSCI_FUNC_ID_CPU_ON:
2180 {
2181 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2182 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2183 uint64_t u64CtxId = pExit->Hypercall.X[3];
2184 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2185 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2186 break;
2187 }
2188 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2189 {
2190 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2191 switch (u32FunNum)
2192 {
2193 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2194 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2195 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2196 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2197 case ARM_PSCI_FUNC_ID_CPU_ON:
2198 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2199 false /*f64BitReg*/, false /*fSignExtend*/,
2200 (uint64_t)ARM_PSCI_STS_SUCCESS);
2201 break;
2202 default:
2203 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2204 false /*f64BitReg*/, false /*fSignExtend*/,
2205 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2206 }
2207 break;
2208 }
2209 default:
2210 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2211 }
2212 }
2213 else
2214 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2215
2216 /** @todo What to do if immediate is != 0? */
2217
2218 if (rcStrict == VINF_SUCCESS)
2219 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2220
2221 return rcStrict;
2222}
2223
2224
2225/**
2226 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2227 *
2228 * @returns Strict VBox status code.
2229 * @param pVM The cross context VM structure.
2230 * @param pVCpu The cross context per CPU structure.
2231 * @param pExit The VM exit information to handle.
2232 * @sa nemHCWinHandleMessageUnrecoverableException
2233 */
2234NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2235{
2236#if 0
2237 /*
2238 * Just copy the state we've got and handle it in the loop for now.
2239 */
2240 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2241 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2242 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2243 RT_NOREF_PV(pVM);
2244 return VINF_EM_TRIPLE_FAULT;
2245#else
2246 /*
2247 * Let IEM decide whether this is really it.
2248 */
2249 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2250 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2251 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2252 AssertReleaseFailed();
2253 RT_NOREF_PV(pVM);
2254 return VINF_SUCCESS;
2255#endif
2256}
2257
2258
2259/**
2260 * Handles VM exits.
2261 *
2262 * @returns Strict VBox status code.
2263 * @param pVM The cross context VM structure.
2264 * @param pVCpu The cross context per CPU structure.
2265 * @param pExit The VM exit information to handle.
2266 * @sa nemHCWinHandleMessage
2267 */
2268NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2269{
2270#ifdef LOG_ENABLED
2271 if (LogIs3Enabled())
2272 {
2273 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2274 AssertRCReturn(rc, rc);
2275
2276 nemR3WinLogState(pVM, pVCpu);
2277 }
2278#endif
2279
2280 switch (pExit->ExitReason)
2281 {
2282 case WHvRunVpExitReasonUnmappedGpa:
2283 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2284 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2285
2286 case WHvRunVpExitReasonCanceled:
2287 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2288 return VINF_SUCCESS;
2289
2290 case WHvRunVpExitReasonHypercall:
2291 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2292
2293 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2294 {
2295 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2296 return VMR3PowerOff(pVM->pUVM);
2297 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2298 {
2299 VM_FF_SET(pVM, VM_FF_RESET);
2300 return VINF_EM_RESET;
2301 }
2302 else
2303 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2304 }
2305
2306 case WHvRunVpExitReasonUnrecoverableException:
2307 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2308 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2309
2310 case WHvRunVpExitReasonUnsupportedFeature:
2311 case WHvRunVpExitReasonInvalidVpRegisterValue:
2312 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2313 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2314 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2315
2316 /* Undesired exits: */
2317 case WHvRunVpExitReasonNone:
2318 default:
2319 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2320 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2321 }
2322}
2323
2324
2325VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2326{
2327 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2328#ifdef LOG_ENABLED
2329 if (LogIs3Enabled())
2330 nemR3WinLogState(pVM, pVCpu);
2331#endif
2332
2333 /*
2334 * Try switch to NEM runloop state.
2335 */
2336 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2337 { /* likely */ }
2338 else
2339 {
2340 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2341 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2342 return VINF_SUCCESS;
2343 }
2344
2345 if (pVCpu->nem.s.fSyncCntvRegs)
2346 {
2347 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2348 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)];
2349 aRegs[0].Reg64 = pVCpu->cpum.GstCtx.CntvCtlEl0;
2350 aRegs[1].Reg64 = pVCpu->cpum.GstCtx.CntvCValEl0;
2351
2352 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2353 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2354 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2355 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2356 , VERR_NEM_IPE_9);
2357 pVCpu->nem.s.fSyncCntvRegs = false;
2358 }
2359
2360
2361 /*
2362 * The run loop.
2363 *
2364 * Current approach to state updating to use the sledgehammer and sync
2365 * everything every time. This will be optimized later.
2366 */
2367 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2368 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2369 for (unsigned iLoop = 0;; iLoop++)
2370 {
2371 /*
2372 * Poll timers and run for a bit.
2373 *
2374 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2375 * so we take the time of the next timer event and uses that as a deadline.
2376 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2377 */
2378 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2379 * the whole polling job when timers have changed... */
2380 uint64_t offDeltaIgnored;
2381 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2382 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2383 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2384 {
2385 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2386 {
2387 /* Ensure that Hyper-V has the whole state. */
2388 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2389 AssertRCReturn(rc2, rc2);
2390
2391#ifdef LOG_ENABLED
2392 if (LogIsFlowEnabled())
2393 {
2394 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2395 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2396 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2397 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2398 }
2399#endif
2400
2401 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2402 TMNotifyStartOfExecution(pVM, pVCpu);
2403
2404 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2405
2406 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2407 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2408#ifdef LOG_ENABLED
2409 if (LogIsFlowEnabled())
2410 {
2411 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2412 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2413 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2414 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2415 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2416 }
2417#endif
2418 if (SUCCEEDED(hrc))
2419 {
2420 /* Always sync the CNTV_CTL_EL0/CNTV_CVAL_EL0 registers, just like we do on macOS. */
2421 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2422 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2423 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2424 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2425 ("WHvGetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2426 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2427 , VERR_NEM_IPE_9);
2428
2429 pVCpu->cpum.GstCtx.CntvCtlEl0 = aRegs[0].Reg64;
2430 pVCpu->cpum.GstCtx.CntvCValEl0 = aRegs[1].Reg64;
2431
2432 /*
2433 * Deal with the message.
2434 */
2435 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2436 if (rcStrict == VINF_SUCCESS)
2437 { /* hopefully likely */ }
2438 else
2439 {
2440 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2441 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2442 break;
2443 }
2444 }
2445 else
2446 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2447 pVCpu->idCpu, hrc, GetLastError()),
2448 VERR_NEM_IPE_0);
2449
2450 /*
2451 * If no relevant FFs are pending, loop.
2452 */
2453 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2454 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2455 continue;
2456
2457 /** @todo Try handle pending flags, not just return to EM loops. Take care
2458 * not to set important RCs here unless we've handled a message. */
2459 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2460 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2461 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2462 }
2463 else
2464 {
2465 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2466 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2467 }
2468 }
2469 else
2470 {
2471 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2472 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2473 }
2474 break;
2475 } /* the run loop */
2476
2477
2478 /*
2479 * If the CPU is running, make sure to stop it before we try sync back the
2480 * state and return to EM. We don't sync back the whole state if we can help it.
2481 */
2482 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2483 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2484
2485 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2486 {
2487 /* Try anticipate what we might need. */
2488 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2489 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2490 || RT_FAILURE(rcStrict))
2491 fImport = CPUMCTX_EXTRN_ALL;
2492 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2493 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2494
2495 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2496 {
2497 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2498 if (RT_SUCCESS(rc2))
2499 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2500 else if (RT_SUCCESS(rcStrict))
2501 rcStrict = rc2;
2502 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2503 pVCpu->cpum.GstCtx.fExtrn = 0;
2504 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2505 }
2506 else
2507 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2508 }
2509 else
2510 {
2511 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2512 pVCpu->cpum.GstCtx.fExtrn = 0;
2513 }
2514
2515#if 0
2516 UINT32 cbWritten;
2517 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2518 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2519 &IntrState, sizeof(IntrState), &cbWritten);
2520 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2521 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2522 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2523 , VERR_NEM_GET_REGISTERS_FAILED);
2524 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2525 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2526 {
2527 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2528 LogFlowFunc(("IntrState: Intr %u:\n"
2529 " Enabled=%RTbool\n"
2530 " EdgeTriggered=%RTbool\n"
2531 " Asserted=%RTbool\n"
2532 " SetPending=%RTbool\n"
2533 " Active=%RTbool\n"
2534 " Direct=%RTbool\n"
2535 " GicrIpriorityrConfigured=%u\n"
2536 " GicrIpriorityrActive=%u\n",
2537 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2538 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2539 }
2540#endif
2541
2542 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2543 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2544 return rcStrict;
2545}
2546
2547
2548VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2549{
2550 Assert(VM_IS_NEM_ENABLED(pVM));
2551 RT_NOREF(pVM, pVCpu);
2552 return true;
2553}
2554
2555
2556VMMR3_INT_DECL(int) NEMR3Halt(PVM pVM, PVMCPU pVCpu)
2557{
2558 Assert(EMGetState(pVCpu) == EMSTATE_WAIT_SIPI);
2559
2560 /*
2561 * Force the vCPU to get out of the SIPI state and into the normal runloop
2562 * as Hyper-V doesn't cause VM exits for PSCI calls so we wouldn't notice when
2563 * when the guest brings APs online.
2564 * Instead we force the EMT to run the vCPU through Hyper-V which manages the state.
2565 */
2566 RT_NOREF(pVM);
2567 EMSetState(pVCpu, EMSTATE_HALTED);
2568 return VINF_EM_RESCHEDULE;
2569}
2570
2571
2572bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2573{
2574 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2575 return false;
2576}
2577
2578
2579void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2580{
2581 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2582 if (pVM->nem.s.fCreatedEmts)
2583 {
2584 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2585 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2586 RT_NOREF_PV(hrc);
2587 }
2588 RT_NOREF_PV(fFlags);
2589}
2590
2591
2592DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2593{
2594 RT_NOREF(pVM, fUseDebugLoop);
2595 return false;
2596}
2597
2598
2599DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2600{
2601 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2602 return false;
2603}
2604
2605
2606DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2607{
2608 PGMPAGEMAPLOCK Lock;
2609 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2610 if (RT_SUCCESS(rc))
2611 PGMPhysReleasePageMappingLock(pVM, &Lock);
2612 return rc;
2613}
2614
2615
2616DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2617{
2618 PGMPAGEMAPLOCK Lock;
2619 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2620 if (RT_SUCCESS(rc))
2621 PGMPhysReleasePageMappingLock(pVM, &Lock);
2622 return rc;
2623}
2624
2625
2626VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2627 uint8_t *pu2State, uint32_t *puNemRange)
2628{
2629 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2630 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2631
2632 *pu2State = UINT8_MAX;
2633 RT_NOREF(puNemRange);
2634
2635 if (pvR3)
2636 {
2637 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2638 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2639 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2640 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2641 if (SUCCEEDED(hrc))
2642 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2643 else
2644 {
2645 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2646 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2647 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2648 return VERR_NEM_MAP_PAGES_FAILED;
2649 }
2650 }
2651 return VINF_SUCCESS;
2652}
2653
2654
2655VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2656{
2657 RT_NOREF(pVM);
2658 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2659}
2660
2661
2662VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2663 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2664{
2665 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2666 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2667 RT_NOREF(puNemRange);
2668
2669 /*
2670 * Unmap the RAM we're replacing.
2671 */
2672 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2673 {
2674 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2675 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2676 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2677 if (SUCCEEDED(hrc))
2678 { /* likely */ }
2679 else if (pvMmio2)
2680 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2681 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2682 else
2683 {
2684 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2685 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2686 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2687 return VERR_NEM_UNMAP_PAGES_FAILED;
2688 }
2689 }
2690
2691 /*
2692 * Map MMIO2 if any.
2693 */
2694 if (pvMmio2)
2695 {
2696 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2697 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2698 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2699 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2700 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2701 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2702 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2703 if (SUCCEEDED(hrc))
2704 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2705 else
2706 {
2707 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2708 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2709 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2710 return VERR_NEM_MAP_PAGES_FAILED;
2711 }
2712 }
2713 else
2714 {
2715 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2716 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2717 }
2718 RT_NOREF(pvRam);
2719 return VINF_SUCCESS;
2720}
2721
2722
2723VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2724 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2725{
2726 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2727 return VINF_SUCCESS;
2728}
2729
2730
2731VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2732 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2733{
2734 int rc = VINF_SUCCESS;
2735 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2736 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2737
2738 /*
2739 * Unmap the MMIO2 pages.
2740 */
2741 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2742 * we may have more stuff to unmap even in case of pure MMIO... */
2743 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2744 {
2745 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2746 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2747 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2748 if (FAILED(hrc))
2749 {
2750 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2751 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2752 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2753 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2754 }
2755 }
2756
2757 /*
2758 * Restore the RAM we replaced.
2759 */
2760 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2761 {
2762 AssertPtr(pvRam);
2763 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2764 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2765 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2766 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2767 if (SUCCEEDED(hrc))
2768 { /* likely */ }
2769 else
2770 {
2771 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2772 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2773 rc = VERR_NEM_MAP_PAGES_FAILED;
2774 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2775 }
2776 if (pu2State)
2777 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2778 }
2779 /* Mark the pages as unmapped if relevant. */
2780 else if (pu2State)
2781 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2782
2783 RT_NOREF(pvMmio2, puNemRange);
2784 return rc;
2785}
2786
2787
2788VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2789 void *pvBitmap, size_t cbBitmap)
2790{
2791 Assert(VM_IS_NEM_ENABLED(pVM));
2792 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2793 Assert(cbBitmap == (uint32_t)cbBitmap);
2794 RT_NOREF(uNemRange);
2795
2796 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2797 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2798 if (SUCCEEDED(hrc))
2799 return VINF_SUCCESS;
2800
2801 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2802 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2803 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2804}
2805
2806
2807VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2808 uint8_t *pu2State, uint32_t *puNemRange)
2809{
2810 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2811
2812 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2813 *pu2State = UINT8_MAX;
2814 *puNemRange = 0;
2815 return VINF_SUCCESS;
2816}
2817
2818
2819VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2820 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2821{
2822 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2823 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2824 *pu2State = UINT8_MAX;
2825
2826 /*
2827 * (Re-)map readonly.
2828 */
2829 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2830 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2831 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2832 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2833 if (SUCCEEDED(hrc))
2834 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2835 else
2836 {
2837 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2838 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2839 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2840 return VERR_NEM_MAP_PAGES_FAILED;
2841 }
2842 RT_NOREF(fFlags, puNemRange);
2843 return VINF_SUCCESS;
2844}
2845
2846VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2847{
2848 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2849 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2850 RT_NOREF(pVCpu, fEnabled);
2851}
2852
2853
2854void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2855{
2856 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2857 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2858}
2859
2860
2861VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2862 RTR3PTR pvMemR3, uint8_t *pu2State)
2863{
2864 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2865 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2866
2867 *pu2State = UINT8_MAX;
2868 if (pvMemR3)
2869 {
2870 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2871 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2872 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2873 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2874 if (SUCCEEDED(hrc))
2875 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2876 else
2877 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2878 pvMemR3, GCPhys, cb, hrc));
2879 }
2880 RT_NOREF(enmKind);
2881}
2882
2883
2884void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2885 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2886{
2887 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2888 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2889 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2890}
2891
2892
2893int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2894 PGMPAGETYPE enmType, uint8_t *pu2State)
2895{
2896 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2897 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2898 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2899
2900 AssertFailed();
2901 return VINF_SUCCESS;
2902}
2903
2904
2905VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2906 PGMPAGETYPE enmType, uint8_t *pu2State)
2907{
2908 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2909 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2910 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2911}
2912
2913
2914VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2915 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2916{
2917 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2918 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2919 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2920
2921 AssertFailed();
2922}
2923
2924
2925/**
2926 * Returns features supported by the NEM backend.
2927 *
2928 * @returns Flags of features supported by the native NEM backend.
2929 * @param pVM The cross context VM structure.
2930 */
2931VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2932{
2933 RT_NOREF(pVM);
2934 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
2935 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
2936}
2937
2938
2939/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
2940 *
2941 * Open questions:
2942 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
2943 */
2944
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette