VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher.cpp@ 14597

Last change on this file since 14597 was 14597, checked in by vboxsync, 16 years ago

Added R0 address to MMR3HyperMapHCPhys and made the MMHyperXToR0 use pvR0 for HCPhys and Locked more strickly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.4 KB
Line 
1/* $Id: VMMSwitcher.cpp 14597 2008-11-25 20:41:40Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/pgm.h>
28#include <VBox/selm.h>
29#include <VBox/mm.h>
30#include <VBox/sup.h>
31#include "VMMInternal.h"
32#include "VMMSwitcher/VMMSwitcher.h"
33#include <VBox/vm.h>
34#include <VBox/dis.h>
35
36#include <VBox/err.h>
37#include <VBox/param.h>
38#include <iprt/assert.h>
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <iprt/ctype.h>
43
44
45/*******************************************************************************
46* Global Variables *
47*******************************************************************************/
48/** Array of switcher defininitions.
49 * The type and index shall match!
50 */
51static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
52{
53 NULL, /* invalid entry */
54#ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 NULL, //&vmmR3Switcher32BitToAMD64_Def - disabled because it causes assertions.
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
61 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
62# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
63 &vmmR3SwitcherAMD64ToPAE_Def,
64# else
65 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
66# endif
67 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
68#else /* RT_ARCH_AMD64 */
69 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
70 NULL, //&vmmR3Switcher32BitToPAE_Def,
71 NULL, //&vmmR3Switcher32BitToAMD64_Def,
72 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
73 NULL, //&vmmR3SwitcherPAEToPAE_Def,
74 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
75 &vmmR3SwitcherAMD64To32Bit_Def,
76 &vmmR3SwitcherAMD64ToPAE_Def,
77 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
78#endif /* RT_ARCH_AMD64 */
79};
80
81
82/**
83 * VMMR3Init worker that initiates the switcher code (aka core code).
84 *
85 * This is core per VM code which might need fixups and/or for ease of use are
86 * put on linear contiguous backing.
87 *
88 * @returns VBox status code.
89 * @param pVM Pointer to the shared VM structure.
90 */
91int vmmR3SwitcherInit(PVM pVM)
92{
93 /*
94 * Calc the size.
95 */
96 unsigned cbCoreCode = 0;
97 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
98 {
99 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
100 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
101 if (pSwitcher)
102 {
103 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
104 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
105 }
106 }
107
108 /*
109 * Allocate continguous pages for switchers and deal with
110 * conflicts in the intermediate mapping of the code.
111 */
112 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
113 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
114 int rc = VERR_NO_MEMORY;
115 if (pVM->vmm.s.pvCoreCodeR3)
116 {
117 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
118 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
119 {
120 /* try more allocations - Solaris, Linux. */
121 const unsigned cTries = 8234;
122 struct VMMInitBadTry
123 {
124 RTR0PTR pvR0;
125 void *pvR3;
126 RTHCPHYS HCPhys;
127 RTUINT cb;
128 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
129 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
130 unsigned i = 0;
131 do
132 {
133 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
134 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
135 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
136 i++;
137 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
138 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
139 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
140 if (!pVM->vmm.s.pvCoreCodeR3)
141 break;
142 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
143 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
144 && i < cTries - 1);
145
146 /* cleanup */
147 if (RT_FAILURE(rc))
148 {
149 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
150 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
151 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
152 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
153 i++;
154 LogRel(("Failed to allocated and map core code: rc=%Rrc\n", rc));
155 }
156 while (i-- > 0)
157 {
158 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
159 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
160 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
161 }
162 RTMemTmpFree(paBadTries);
163 }
164 }
165 if (RT_SUCCESS(rc))
166 {
167 /*
168 * copy the code.
169 */
170 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
171 {
172 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
173 if (pSwitcher)
174 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
175 pSwitcher->pvCode, pSwitcher->cbCode);
176 }
177
178 /*
179 * Map the code into the GC address space.
180 */
181 RTGCPTR GCPtr;
182 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode,
183 cbCoreCode, "Core Code", &GCPtr);
184 if (RT_SUCCESS(rc))
185 {
186 pVM->vmm.s.pvCoreCodeRC = GCPtr;
187 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
188 LogRel(("CoreCode: R3=%RHv R0=%RHv RC=%RRv Phys=%RHp cb=%#x\n",
189 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
190
191 /*
192 * Finally, PGM probably have selected a switcher already but we need
193 * to get the routine addresses, so we'll reselect it.
194 * This may legally fail so, we're ignoring the rc.
195 */
196 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
197 return rc;
198 }
199
200 /* shit */
201 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
202 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
203 }
204 else
205 VMSetError(pVM, rc, RT_SRC_POS,
206 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
207 cbCoreCode);
208
209 pVM->vmm.s.pvCoreCodeR3 = NULL;
210 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
211 pVM->vmm.s.pvCoreCodeRC = 0;
212 return rc;
213}
214
215
216/**
217 * Relocate the switchers, called by VMMR#Relocate.
218 *
219 * @param pVM Pointer to the shared VM structure.
220 * @param offDelta The relocation delta.
221 */
222void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
223{
224 /*
225 * Relocate all the switchers.
226 */
227 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
228 {
229 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
230 if (pSwitcher && pSwitcher->pfnRelocate)
231 {
232 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
233 pSwitcher->pfnRelocate(pVM,
234 pSwitcher,
235 pVM->vmm.s.pvCoreCodeR0 + off,
236 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
237 pVM->vmm.s.pvCoreCodeRC + off,
238 pVM->vmm.s.HCPhysCoreCode + off);
239 }
240 }
241
242 /*
243 * Recalc the RC address for the current switcher.
244 */
245 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
246 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
247 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost;
248 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline;
249 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm;
250 pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
251 pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
252
253// AssertFailed();
254}
255
256
257/**
258 * Generic switcher code relocator.
259 *
260 * @param pVM The VM handle.
261 * @param pSwitcher The switcher definition.
262 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
263 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
264 * @param GCPtrCode The guest context address corresponding to pu8Code.
265 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
266 * @param SelCS The hypervisor CS selector.
267 * @param SelDS The hypervisor DS selector.
268 * @param SelTSS The hypervisor TSS selector.
269 * @param GCPtrGDT The GC address of the hypervisor GDT.
270 * @param SelCS64 The 64-bit mode hypervisor CS selector.
271 */
272static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
273 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
274{
275 union
276 {
277 const uint8_t *pu8;
278 const uint16_t *pu16;
279 const uint32_t *pu32;
280 const uint64_t *pu64;
281 const void *pv;
282 uintptr_t u;
283 } u;
284 u.pv = pSwitcher->pvFixups;
285
286 /*
287 * Process fixups.
288 */
289 uint8_t u8;
290 while ((u8 = *u.pu8++) != FIX_THE_END)
291 {
292 /*
293 * Get the source (where to write the fixup).
294 */
295 uint32_t offSrc = *u.pu32++;
296 Assert(offSrc < pSwitcher->cbCode);
297 union
298 {
299 uint8_t *pu8;
300 uint16_t *pu16;
301 uint32_t *pu32;
302 uint64_t *pu64;
303 uintptr_t u;
304 } uSrc;
305 uSrc.pu8 = pu8CodeR3 + offSrc;
306
307 /* The fixup target and method depends on the type. */
308 switch (u8)
309 {
310 /*
311 * 32-bit relative, source in HC and target in GC.
312 */
313 case FIX_HC_2_GC_NEAR_REL:
314 {
315 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
316 uint32_t offTrg = *u.pu32++;
317 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
318 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
319 break;
320 }
321
322 /*
323 * 32-bit relative, source in HC and target in ID.
324 */
325 case FIX_HC_2_ID_NEAR_REL:
326 {
327 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
328 uint32_t offTrg = *u.pu32++;
329 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
330 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
331 break;
332 }
333
334 /*
335 * 32-bit relative, source in GC and target in HC.
336 */
337 case FIX_GC_2_HC_NEAR_REL:
338 {
339 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
340 uint32_t offTrg = *u.pu32++;
341 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
342 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
343 break;
344 }
345
346 /*
347 * 32-bit relative, source in GC and target in ID.
348 */
349 case FIX_GC_2_ID_NEAR_REL:
350 {
351 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
352 uint32_t offTrg = *u.pu32++;
353 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
354 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
355 break;
356 }
357
358 /*
359 * 32-bit relative, source in ID and target in HC.
360 */
361 case FIX_ID_2_HC_NEAR_REL:
362 {
363 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
364 uint32_t offTrg = *u.pu32++;
365 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
366 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
367 break;
368 }
369
370 /*
371 * 32-bit relative, source in ID and target in HC.
372 */
373 case FIX_ID_2_GC_NEAR_REL:
374 {
375 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
376 uint32_t offTrg = *u.pu32++;
377 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
378 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
379 break;
380 }
381
382 /*
383 * 16:32 far jump, target in GC.
384 */
385 case FIX_GC_FAR32:
386 {
387 uint32_t offTrg = *u.pu32++;
388 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
389 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
390 *uSrc.pu16++ = SelCS;
391 break;
392 }
393
394 /*
395 * Make 32-bit GC pointer given CPUM offset.
396 */
397 case FIX_GC_CPUM_OFF:
398 {
399 uint32_t offCPUM = *u.pu32++;
400 Assert(offCPUM < sizeof(pVM->cpum));
401 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
402 break;
403 }
404
405 /*
406 * Make 32-bit GC pointer given VM offset.
407 */
408 case FIX_GC_VM_OFF:
409 {
410 uint32_t offVM = *u.pu32++;
411 Assert(offVM < sizeof(VM));
412 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, pVM) + offVM);
413 break;
414 }
415
416 /*
417 * Make 32-bit HC pointer given CPUM offset.
418 */
419 case FIX_HC_CPUM_OFF:
420 {
421 uint32_t offCPUM = *u.pu32++;
422 Assert(offCPUM < sizeof(pVM->cpum));
423 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
424 break;
425 }
426
427 /*
428 * Make 32-bit R0 pointer given VM offset.
429 */
430 case FIX_HC_VM_OFF:
431 {
432 uint32_t offVM = *u.pu32++;
433 Assert(offVM < sizeof(VM));
434 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
435 break;
436 }
437
438 /*
439 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
440 */
441 case FIX_INTER_32BIT_CR3:
442 {
443
444 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
445 break;
446 }
447
448 /*
449 * Store the PAE CR3 (32-bit) for the intermediate memory context.
450 */
451 case FIX_INTER_PAE_CR3:
452 {
453
454 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
455 break;
456 }
457
458 /*
459 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
460 */
461 case FIX_INTER_AMD64_CR3:
462 {
463
464 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
465 break;
466 }
467
468 /*
469 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
470 */
471 case FIX_HYPER_32BIT_CR3:
472 {
473
474 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
475 break;
476 }
477
478 /*
479 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
480 */
481 case FIX_HYPER_PAE_CR3:
482 {
483
484 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
485 break;
486 }
487
488 /*
489 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
490 */
491 case FIX_HYPER_AMD64_CR3:
492 {
493
494 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
495 break;
496 }
497
498 /*
499 * Store Hypervisor CS (16-bit).
500 */
501 case FIX_HYPER_CS:
502 {
503 *uSrc.pu16 = SelCS;
504 break;
505 }
506
507 /*
508 * Store Hypervisor DS (16-bit).
509 */
510 case FIX_HYPER_DS:
511 {
512 *uSrc.pu16 = SelDS;
513 break;
514 }
515
516 /*
517 * Store Hypervisor TSS (16-bit).
518 */
519 case FIX_HYPER_TSS:
520 {
521 *uSrc.pu16 = SelTSS;
522 break;
523 }
524
525 /*
526 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
527 */
528 case FIX_GC_TSS_GDTE_DW2:
529 {
530 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
531 *uSrc.pu32 = (uint32_t)GCPtr;
532 break;
533 }
534
535
536 ///@todo case FIX_CR4_MASK:
537 ///@todo case FIX_CR4_OSFSXR:
538
539 /*
540 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
541 */
542 case FIX_NO_FXSAVE_JMP:
543 {
544 uint32_t offTrg = *u.pu32++;
545 Assert(offTrg < pSwitcher->cbCode);
546 if (!CPUMSupportsFXSR(pVM))
547 {
548 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
549 *uSrc.pu32++ = offTrg - (offSrc + 5);
550 }
551 else
552 {
553 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
554 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
555 }
556 break;
557 }
558
559 /*
560 * Insert relative jump to specified target it SYSENTER isn't used by the host.
561 */
562 case FIX_NO_SYSENTER_JMP:
563 {
564 uint32_t offTrg = *u.pu32++;
565 Assert(offTrg < pSwitcher->cbCode);
566 if (!CPUMIsHostUsingSysEnter(pVM))
567 {
568 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
569 *uSrc.pu32++ = offTrg - (offSrc + 5);
570 }
571 else
572 {
573 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
574 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
575 }
576 break;
577 }
578
579 /*
580 * Insert relative jump to specified target it SYSENTER isn't used by the host.
581 */
582 case FIX_NO_SYSCALL_JMP:
583 {
584 uint32_t offTrg = *u.pu32++;
585 Assert(offTrg < pSwitcher->cbCode);
586 if (!CPUMIsHostUsingSysEnter(pVM))
587 {
588 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
589 *uSrc.pu32++ = offTrg - (offSrc + 5);
590 }
591 else
592 {
593 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
594 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
595 }
596 break;
597 }
598
599 /*
600 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
601 */
602 case FIX_HC_32BIT:
603 {
604 uint32_t offTrg = *u.pu32++;
605 Assert(offSrc < pSwitcher->cbCode);
606 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
607 *uSrc.pu32 = R0PtrCode + offTrg;
608 break;
609 }
610
611#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
612 /*
613 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
614 */
615 case FIX_HC_64BIT:
616 {
617 uint32_t offTrg = *u.pu32++;
618 Assert(offSrc < pSwitcher->cbCode);
619 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
620 *uSrc.pu64 = R0PtrCode + offTrg;
621 break;
622 }
623
624 /*
625 * 64-bit HC Code Selector (no argument).
626 */
627 case FIX_HC_64BIT_CS:
628 {
629 Assert(offSrc < pSwitcher->cbCode);
630#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
631 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
632#else
633 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
634#endif
635 break;
636 }
637
638 /*
639 * 64-bit HC pointer to the CPUM instance data (no argument).
640 */
641 case FIX_HC_64BIT_CPUM:
642 {
643 Assert(offSrc < pSwitcher->cbCode);
644 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
645 break;
646 }
647#endif
648
649 /*
650 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
651 */
652 case FIX_ID_32BIT:
653 {
654 uint32_t offTrg = *u.pu32++;
655 Assert(offSrc < pSwitcher->cbCode);
656 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
657 *uSrc.pu32 = u32IDCode + offTrg;
658 break;
659 }
660
661 /*
662 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
663 */
664 case FIX_ID_64BIT:
665 {
666 uint32_t offTrg = *u.pu32++;
667 Assert(offSrc < pSwitcher->cbCode);
668 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
669 *uSrc.pu64 = u32IDCode + offTrg;
670 break;
671 }
672
673 /*
674 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
675 */
676 case FIX_ID_FAR32_TO_64BIT_MODE:
677 {
678 uint32_t offTrg = *u.pu32++;
679 Assert(offSrc < pSwitcher->cbCode);
680 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
681 *uSrc.pu32++ = u32IDCode + offTrg;
682 *uSrc.pu16 = SelCS64;
683 AssertRelease(SelCS64);
684 break;
685 }
686
687#ifdef VBOX_WITH_NMI
688 /*
689 * 32-bit address to the APIC base.
690 */
691 case FIX_GC_APIC_BASE_32BIT:
692 {
693 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
694 break;
695 }
696#endif
697
698 default:
699 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
700 break;
701 }
702 }
703
704#ifdef LOG_ENABLED
705 /*
706 * If Log2 is enabled disassemble the switcher code.
707 *
708 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
709 */
710 if (LogIs2Enabled())
711 {
712 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
713 " R0PtrCode = %p\n"
714 " pu8CodeR3 = %p\n"
715 " GCPtrCode = %RGv\n"
716 " u32IDCode = %08x\n"
717 " pVMRC = %RRv\n"
718 " pCPUMRC = %RRv\n"
719 " pVMR3 = %p\n"
720 " pCPUMR3 = %p\n"
721 " GCPtrGDT = %RGv\n"
722 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
723 " HyperCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
724 " SelCS = %04x\n"
725 " SelDS = %04x\n"
726 " SelCS64 = %04x\n"
727 " SelTSS = %04x\n",
728 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
729 R0PtrCode,
730 pu8CodeR3,
731 GCPtrCode,
732 u32IDCode,
733 VM_RC_ADDR(pVM, pVM),
734 VM_RC_ADDR(pVM, &pVM->cpum),
735 pVM,
736 &pVM->cpum,
737 GCPtrGDT,
738 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
739 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
740 SelCS, SelDS, SelCS64, SelTSS);
741
742 uint32_t offCode = 0;
743 while (offCode < pSwitcher->cbCode)
744 {
745 /*
746 * Figure out where this is.
747 */
748 const char *pszDesc = NULL;
749 RTUINTPTR uBase;
750 uint32_t cbCode;
751 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
752 {
753 pszDesc = "HCCode0";
754 uBase = R0PtrCode;
755 offCode = pSwitcher->offHCCode0;
756 cbCode = pSwitcher->cbHCCode0;
757 }
758 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
759 {
760 pszDesc = "HCCode1";
761 uBase = R0PtrCode;
762 offCode = pSwitcher->offHCCode1;
763 cbCode = pSwitcher->cbHCCode1;
764 }
765 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
766 {
767 pszDesc = "GCCode";
768 uBase = GCPtrCode;
769 offCode = pSwitcher->offGCCode;
770 cbCode = pSwitcher->cbGCCode;
771 }
772 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
773 {
774 pszDesc = "IDCode0";
775 uBase = u32IDCode;
776 offCode = pSwitcher->offIDCode0;
777 cbCode = pSwitcher->cbIDCode0;
778 }
779 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
780 {
781 pszDesc = "IDCode1";
782 uBase = u32IDCode;
783 offCode = pSwitcher->offIDCode1;
784 cbCode = pSwitcher->cbIDCode1;
785 }
786 else
787 {
788 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
789 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
790 offCode++;
791 continue;
792 }
793
794 /*
795 * Disassemble it.
796 */
797 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
798 DISCPUSTATE Cpu;
799
800 memset(&Cpu, 0, sizeof(Cpu));
801 Cpu.mode = CPUMODE_32BIT;
802 while (cbCode > 0)
803 {
804 /* try label it */
805 if (pSwitcher->offR0HostToGuest == offCode)
806 RTLogPrintf(" *R0HostToGuest:\n");
807 if (pSwitcher->offGCGuestToHost == offCode)
808 RTLogPrintf(" *GCGuestToHost:\n");
809 if (pSwitcher->offGCCallTrampoline == offCode)
810 RTLogPrintf(" *GCCallTrampoline:\n");
811 if (pSwitcher->offGCGuestToHostAsm == offCode)
812 RTLogPrintf(" *GCGuestToHostAsm:\n");
813 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
814 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
815 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
816 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
817
818 /* disas */
819 uint32_t cbInstr = 0;
820 char szDisas[256];
821 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
822 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
823 else
824 {
825 RTLogPrintf(" %04x: %02x '%c'\n",
826 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
827 cbInstr = 1;
828 }
829 offCode += cbInstr;
830 cbCode -= RT_MIN(cbInstr, cbCode);
831 }
832 }
833 }
834#endif
835}
836
837
838/**
839 * Relocator for the 32-Bit to 32-Bit world switcher.
840 */
841DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
842{
843 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
844 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
845}
846
847
848/**
849 * Relocator for the 32-Bit to PAE world switcher.
850 */
851DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
852{
853 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
854 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
855}
856
857
858/**
859 * Relocator for the 32-Bit to AMD64 world switcher.
860 */
861DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
862{
863 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
864 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
865}
866
867
868/**
869 * Relocator for the PAE to 32-Bit world switcher.
870 */
871DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
872{
873 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
874 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
875}
876
877
878/**
879 * Relocator for the PAE to PAE world switcher.
880 */
881DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
882{
883 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
884 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
885}
886
887
888/**
889 * Relocator for the AMD64 to 32-bit world switcher.
890 */
891DECLCALLBACK(void) vmmR3SwitcherAMD64To32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
892{
893 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
894 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
895}
896
897
898/**
899 * Relocator for the AMD64 to PAE world switcher.
900 */
901DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
902{
903 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
904 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
905}
906
907
908/**
909 * Selects the switcher to be used for switching to GC.
910 *
911 * @returns VBox status code.
912 * @param pVM VM handle.
913 * @param enmSwitcher The new switcher.
914 * @remark This function may be called before the VMM is initialized.
915 */
916VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
917{
918 /*
919 * Validate input.
920 */
921 if ( enmSwitcher < VMMSWITCHER_INVALID
922 || enmSwitcher >= VMMSWITCHER_MAX)
923 {
924 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
925 return VERR_INVALID_PARAMETER;
926 }
927
928 /* Do nothing if the switcher is disabled. */
929 if (pVM->vmm.s.fSwitcherDisabled)
930 return VINF_SUCCESS;
931
932 /*
933 * Select the new switcher.
934 */
935 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
936 if (pSwitcher)
937 {
938 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
939 pVM->vmm.s.enmSwitcher = enmSwitcher;
940
941 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
942 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;
943
944 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
945 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost;
946 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline;
947 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
948 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
949 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
950 return VINF_SUCCESS;
951 }
952
953 return VERR_NOT_IMPLEMENTED;
954}
955
956
957/**
958 * Disable the switcher logic permanently.
959 *
960 * @returns VBox status code.
961 * @param pVM VM handle.
962 */
963VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
964{
965/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
966 * @code
967 * mov eax, VERR_INTERNAL_ERROR
968 * ret
969 * @endcode
970 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
971 */
972 pVM->vmm.s.fSwitcherDisabled = true;
973 return VINF_SUCCESS;
974}
975
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette