VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 45701

Last change on this file since 45701 was 45701, checked in by vboxsync, 12 years ago

VMM: SELM and VMM early HM init changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.6 KB
Line 
1/* $Id: SELMAll.cpp 45701 2013-04-24 14:21:09Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/hm.h>
30#include "SELMInternal.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/vmm/vmm.h>
36#include <iprt/x86.h>
37
38#include "SELMInline.h"
39
40
41/*******************************************************************************
42* Global Variables *
43*******************************************************************************/
44#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
45/** Segment register names. */
46static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
47#endif
48
49
50
51#ifdef VBOX_WITH_RAW_MODE_NOT_R0
52/**
53 * Converts a GC selector based address to a flat address.
54 *
55 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
56 * for that.
57 *
58 * @returns Flat address.
59 * @param pVM Pointer to the VM.
60 * @param Sel Selector part.
61 * @param Addr Address part.
62 * @remarks Don't use when in long mode.
63 */
64VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
65{
66 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
67 Assert(!HMIsEnabled(pVM));
68
69 /** @todo check the limit. */
70 X86DESC Desc;
71 if (!(Sel & X86_SEL_LDT))
72 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
73 else
74 {
75 /** @todo handle LDT pages not present! */
76 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
77 Desc = paLDT[Sel >> X86_SEL_SHIFT];
78 }
79
80 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
81}
82#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
83
84
85/**
86 * Converts a GC selector based address to a flat address.
87 *
88 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
89 * for that.
90 *
91 * @returns Flat address.
92 * @param pVM Pointer to the VM.
93 * @param SelReg Selector register
94 * @param pCtxCore CPU context
95 * @param Addr Address part.
96 */
97VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
98{
99 PCPUMSELREG pSReg;
100 PVMCPU pVCpu = VMMGetCpu(pVM);
101
102 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
103
104 /*
105 * Deal with real & v86 mode first.
106 */
107 if ( pCtxCore->eflags.Bits.u1VM
108 || CPUMIsGuestInRealMode(pVCpu))
109 {
110 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
111 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
112 uFlat += pSReg->u64Base;
113 else
114 uFlat += (RTGCUINTPTR)pSReg->Sel << 4;
115 return (RTGCPTR)uFlat;
116 }
117
118#ifdef VBOX_WITH_RAW_MODE_NOT_R0
119 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
120 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
121 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
122 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
123 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
124#else
125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
127#endif
128
129 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
130 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
131 if ( pCtxCore->cs.Attr.n.u1Long
132 && CPUMIsGuestInLongMode(pVCpu))
133 {
134 switch (SelReg)
135 {
136 case DISSELREG_FS:
137 case DISSELREG_GS:
138 return (RTGCPTR)(pSReg->u64Base + Addr);
139
140 default:
141 return Addr; /* base 0 */
142 }
143 }
144
145 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
146 Assert(pSReg->u64Base <= 0xffffffff);
147 return ((pSReg->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
148}
149
150
151/**
152 * Converts a GC selector based address to a flat address.
153 *
154 * Some basic checking is done, but not all kinds yet.
155 *
156 * @returns VBox status
157 * @param pVCpu Pointer to the VMCPU.
158 * @param SelReg Selector register.
159 * @param pCtxCore CPU context.
160 * @param Addr Address part.
161 * @param fFlags SELMTOFLAT_FLAGS_*
162 * GDT entires are valid.
163 * @param ppvGC Where to store the GC flat address.
164 */
165VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
166{
167 /*
168 * Fetch the selector first.
169 */
170 PCPUMSELREG pSReg;
171 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
172 AssertRCReturn(rc, rc); AssertPtr(pSReg);
173
174 /*
175 * Deal with real & v86 mode first.
176 */
177 if ( pCtxCore->eflags.Bits.u1VM
178 || CPUMIsGuestInRealMode(pVCpu))
179 {
180 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
181 if (ppvGC)
182 {
183 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
184 *ppvGC = pSReg->u64Base + uFlat;
185 else
186 *ppvGC = ((RTGCUINTPTR)pSReg->Sel << 4) + uFlat;
187 }
188 return VINF_SUCCESS;
189 }
190
191
192#ifdef VBOX_WITH_RAW_MODE_NOT_R0
193 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
194 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
195 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
196 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
197#else
198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
200#endif
201
202 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
203 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
204 RTGCPTR pvFlat;
205 bool fCheckLimit = true;
206 if ( pCtxCore->cs.Attr.n.u1Long
207 && CPUMIsGuestInLongMode(pVCpu))
208 {
209 fCheckLimit = false;
210 switch (SelReg)
211 {
212 case DISSELREG_FS:
213 case DISSELREG_GS:
214 pvFlat = pSReg->u64Base + Addr;
215 break;
216
217 default:
218 pvFlat = Addr;
219 break;
220 }
221 }
222 else
223 {
224 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
225 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
226 pvFlat = pSReg->u64Base + Addr;
227 pvFlat &= UINT32_C(0xffffffff);
228 }
229
230 /*
231 * Check type if present.
232 */
233 if (pSReg->Attr.n.u1Present)
234 {
235 switch (pSReg->Attr.n.u4Type)
236 {
237 /* Read only selector type. */
238 case X86_SEL_TYPE_RO:
239 case X86_SEL_TYPE_RO_ACC:
240 case X86_SEL_TYPE_RW:
241 case X86_SEL_TYPE_RW_ACC:
242 case X86_SEL_TYPE_EO:
243 case X86_SEL_TYPE_EO_ACC:
244 case X86_SEL_TYPE_ER:
245 case X86_SEL_TYPE_ER_ACC:
246 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
247 {
248 /** @todo fix this mess */
249 }
250 /* check limit. */
251 if (fCheckLimit && Addr > pSReg->u32Limit)
252 return VERR_OUT_OF_SELECTOR_BOUNDS;
253 /* ok */
254 if (ppvGC)
255 *ppvGC = pvFlat;
256 return VINF_SUCCESS;
257
258 case X86_SEL_TYPE_EO_CONF:
259 case X86_SEL_TYPE_EO_CONF_ACC:
260 case X86_SEL_TYPE_ER_CONF:
261 case X86_SEL_TYPE_ER_CONF_ACC:
262 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
263 {
264 /** @todo fix this mess */
265 }
266 /* check limit. */
267 if (fCheckLimit && Addr > pSReg->u32Limit)
268 return VERR_OUT_OF_SELECTOR_BOUNDS;
269 /* ok */
270 if (ppvGC)
271 *ppvGC = pvFlat;
272 return VINF_SUCCESS;
273
274 case X86_SEL_TYPE_RO_DOWN:
275 case X86_SEL_TYPE_RO_DOWN_ACC:
276 case X86_SEL_TYPE_RW_DOWN:
277 case X86_SEL_TYPE_RW_DOWN_ACC:
278 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
279 {
280 /** @todo fix this mess */
281 }
282 /* check limit. */
283 if (fCheckLimit)
284 {
285 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
286 return VERR_OUT_OF_SELECTOR_BOUNDS;
287 if (Addr <= pSReg->u32Limit)
288 return VERR_OUT_OF_SELECTOR_BOUNDS;
289 }
290 /* ok */
291 if (ppvGC)
292 *ppvGC = pvFlat;
293 return VINF_SUCCESS;
294
295 default:
296 return VERR_INVALID_SELECTOR;
297
298 }
299 }
300 return VERR_SELECTOR_NOT_PRESENT;
301}
302
303
304#ifdef VBOX_WITH_RAW_MODE_NOT_R0
305/**
306 * Converts a GC selector based address to a flat address.
307 *
308 * Some basic checking is done, but not all kinds yet.
309 *
310 * @returns VBox status
311 * @param pVCpu Pointer to the VMCPU.
312 * @param eflags Current eflags
313 * @param Sel Selector part.
314 * @param Addr Address part.
315 * @param fFlags SELMTOFLAT_FLAGS_*
316 * GDT entires are valid.
317 * @param ppvGC Where to store the GC flat address.
318 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
319 * the selector. NULL is allowed.
320 * @remarks Don't use when in long mode.
321 */
322VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
323 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
324{
325 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
326 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
327
328 /*
329 * Deal with real & v86 mode first.
330 */
331 if ( eflags.Bits.u1VM
332 || CPUMIsGuestInRealMode(pVCpu))
333 {
334 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
335 if (ppvGC)
336 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
337 if (pcb)
338 *pcb = 0x10000 - uFlat;
339 return VINF_SUCCESS;
340 }
341
342 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
343 X86DESC Desc;
344 PVM pVM = pVCpu->CTX_SUFF(pVM);
345 if (!(Sel & X86_SEL_LDT))
346 {
347 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
348 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
349 return VERR_INVALID_SELECTOR;
350 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
351 }
352 else
353 {
354 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
355 return VERR_INVALID_SELECTOR;
356
357 /** @todo handle LDT page(s) not present! */
358 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
359 Desc = paLDT[Sel >> X86_SEL_SHIFT];
360 }
361
362 /* calc limit. */
363 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
364
365 /* calc address assuming straight stuff. */
366 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
367
368 /* Cut the address to 32 bits. */
369 Assert(!CPUMIsGuestInLongMode(pVCpu));
370 pvFlat &= 0xffffffff;
371
372 uint8_t u1Present = Desc.Gen.u1Present;
373 uint8_t u1Granularity = Desc.Gen.u1Granularity;
374 uint8_t u1DescType = Desc.Gen.u1DescType;
375 uint8_t u4Type = Desc.Gen.u4Type;
376
377 /*
378 * Check if present.
379 */
380 if (u1Present)
381 {
382 /*
383 * Type check.
384 */
385#define BOTH(a, b) ((a << 16) | b)
386 switch (BOTH(u1DescType, u4Type))
387 {
388
389 /** Read only selector type. */
390 case BOTH(1,X86_SEL_TYPE_RO):
391 case BOTH(1,X86_SEL_TYPE_RO_ACC):
392 case BOTH(1,X86_SEL_TYPE_RW):
393 case BOTH(1,X86_SEL_TYPE_RW_ACC):
394 case BOTH(1,X86_SEL_TYPE_EO):
395 case BOTH(1,X86_SEL_TYPE_EO_ACC):
396 case BOTH(1,X86_SEL_TYPE_ER):
397 case BOTH(1,X86_SEL_TYPE_ER_ACC):
398 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
399 {
400 /** @todo fix this mess */
401 }
402 /* check limit. */
403 if ((RTGCUINTPTR)Addr > u32Limit)
404 return VERR_OUT_OF_SELECTOR_BOUNDS;
405 /* ok */
406 if (ppvGC)
407 *ppvGC = pvFlat;
408 if (pcb)
409 *pcb = u32Limit - (uint32_t)Addr + 1;
410 return VINF_SUCCESS;
411
412 case BOTH(1,X86_SEL_TYPE_EO_CONF):
413 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
414 case BOTH(1,X86_SEL_TYPE_ER_CONF):
415 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
416 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
417 {
418 /** @todo fix this mess */
419 }
420 /* check limit. */
421 if ((RTGCUINTPTR)Addr > u32Limit)
422 return VERR_OUT_OF_SELECTOR_BOUNDS;
423 /* ok */
424 if (ppvGC)
425 *ppvGC = pvFlat;
426 if (pcb)
427 *pcb = u32Limit - (uint32_t)Addr + 1;
428 return VINF_SUCCESS;
429
430 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
431 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
432 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
433 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
434 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
435 {
436 /** @todo fix this mess */
437 }
438 /* check limit. */
439 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
440 return VERR_OUT_OF_SELECTOR_BOUNDS;
441 if ((RTGCUINTPTR)Addr <= u32Limit)
442 return VERR_OUT_OF_SELECTOR_BOUNDS;
443
444 /* ok */
445 if (ppvGC)
446 *ppvGC = pvFlat;
447 if (pcb)
448 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
449 return VINF_SUCCESS;
450
451 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
452 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
453 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
454 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
455 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
456 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
457 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
458 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
459 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
460 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
461 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
462 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
463 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
464 {
465 /** @todo fix this mess */
466 }
467 /* check limit. */
468 if ((RTGCUINTPTR)Addr > u32Limit)
469 return VERR_OUT_OF_SELECTOR_BOUNDS;
470 /* ok */
471 if (ppvGC)
472 *ppvGC = pvFlat;
473 if (pcb)
474 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
475 return VINF_SUCCESS;
476
477 default:
478 return VERR_INVALID_SELECTOR;
479
480 }
481#undef BOTH
482 }
483 return VERR_SELECTOR_NOT_PRESENT;
484}
485#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
486
487
488#ifdef VBOX_WITH_RAW_MODE_NOT_R0
489
490static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
491 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
492{
493 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
494
495 /*
496 * Try read the entry.
497 */
498 X86DESC GstDesc;
499 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc));
500 if (RT_FAILURE(rc))
501 {
502 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
503 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
504 return;
505 }
506
507 /*
508 * Validate it and load it.
509 */
510 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
511 {
512 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
513 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
514 return;
515 }
516
517 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
518 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
519 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
520 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
521}
522
523
524/**
525 * CPUM helper that loads the hidden selector register from the descriptor table
526 * when executing with raw-mode.
527 *
528 * @remarks This is only used when in legacy protected mode!
529 *
530 * @param pVCpu Pointer to the current virtual CPU.
531 * @param pCtx The guest CPU context.
532 * @param pSReg The selector register.
533 *
534 * @todo Deal 100% correctly with stale selectors. What's more evil is
535 * invalid page table entries, which isn't impossible to imagine for
536 * LDT entries for instance, though unlikely. Currently, we turn a
537 * blind eye to these issues and return the old hidden registers,
538 * though we don't set the valid flag, so that we'll try loading them
539 * over and over again till we succeed loading something.
540 */
541VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
542{
543 Assert(pCtx->cr0 & X86_CR0_PE);
544 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
545
546 PVM pVM = pVCpu->CTX_SUFF(pVM);
547 Assert(pVM->cCpus == 1);
548 Assert(!HMIsEnabled(pVM));
549
550
551 /*
552 * Get the shadow descriptor table entry and validate it.
553 * Should something go amiss, try the guest table.
554 */
555 RTSEL const Sel = pSReg->Sel;
556 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
557 PCX86DESC pShwDesc;
558 if (!(Sel & X86_SEL_LDT))
559 {
560 /** @todo this shall not happen, we shall check for these things when executing
561 * LGDT */
562 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
563
564 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
565 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
566 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
567 {
568 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
569 return;
570 }
571 }
572 else
573 {
574 /** @todo this shall not happen, we shall check for these things when executing
575 * LLDT */
576 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
577
578 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
579 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
580 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
581 {
582 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
583 return;
584 }
585 }
586
587 /*
588 * All fine, load it.
589 */
590 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
591 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
592 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
593 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
594}
595
596#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
597
598/**
599 * Validates and converts a GC selector based code address to a flat
600 * address when in real or v8086 mode.
601 *
602 * @returns VINF_SUCCESS.
603 * @param pVCpu Pointer to the VMCPU.
604 * @param SelCS Selector part.
605 * @param pHidCS The hidden CS register part. Optional.
606 * @param Addr Address part.
607 * @param ppvFlat Where to store the flat address.
608 */
609DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
610 PRTGCPTR ppvFlat)
611{
612 RTGCUINTPTR uFlat = Addr & 0xffff;
613 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
614 uFlat += (RTGCUINTPTR)SelCS << 4;
615 else
616 uFlat += pSReg->u64Base;
617 *ppvFlat = uFlat;
618 return VINF_SUCCESS;
619}
620
621
622#ifdef VBOX_WITH_RAW_MODE_NOT_R0
623/**
624 * Validates and converts a GC selector based code address to a flat address
625 * when in protected/long mode using the raw-mode algorithm.
626 *
627 * @returns VBox status code.
628 * @param pVM Pointer to the VM.
629 * @param pVCpu Pointer to the VMCPU.
630 * @param SelCPL Current privilege level. Get this from SS - CS might be
631 * conforming! A full selector can be passed, we'll only
632 * use the RPL part.
633 * @param SelCS Selector part.
634 * @param Addr Address part.
635 * @param ppvFlat Where to store the flat address.
636 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
637 */
638DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
639 PRTGCPTR ppvFlat, uint32_t *pcBits)
640{
641 NOREF(pVCpu);
642 Assert(!HMIsEnabled(pVM));
643
644 /** @todo validate limit! */
645 X86DESC Desc;
646 if (!(SelCS & X86_SEL_LDT))
647 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
648 else
649 {
650 /** @todo handle LDT page(s) not present! */
651 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
652 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
653 }
654
655 /*
656 * Check if present.
657 */
658 if (Desc.Gen.u1Present)
659 {
660 /*
661 * Type check.
662 */
663 if ( Desc.Gen.u1DescType == 1
664 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
665 {
666 /*
667 * Check level.
668 */
669 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
670 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
671 ? uLevel <= Desc.Gen.u2Dpl
672 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
673 )
674 {
675 /*
676 * Limit check.
677 */
678 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
679 if ((RTGCUINTPTR)Addr <= u32Limit)
680 {
681 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
682 /* Cut the address to 32 bits. */
683 *ppvFlat &= 0xffffffff;
684
685 if (pcBits)
686 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
687 return VINF_SUCCESS;
688 }
689 return VERR_OUT_OF_SELECTOR_BOUNDS;
690 }
691 return VERR_INVALID_RPL;
692 }
693 return VERR_NOT_CODE_SELECTOR;
694 }
695 return VERR_SELECTOR_NOT_PRESENT;
696}
697#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
698
699
700/**
701 * Validates and converts a GC selector based code address to a flat address
702 * when in protected/long mode using the standard hidden selector registers
703 *
704 * @returns VBox status code.
705 * @param pVCpu Pointer to the VMCPU.
706 * @param SelCPL Current privilege level. Get this from SS - CS might be
707 * conforming! A full selector can be passed, we'll only
708 * use the RPL part.
709 * @param SelCS Selector part.
710 * @param pSRegCS The full CS selector register.
711 * @param Addr The address (think IP/EIP/RIP).
712 * @param ppvFlat Where to store the flat address upon successful return.
713 */
714DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
715 RTGCPTR Addr, PRTGCPTR ppvFlat)
716{
717 /*
718 * Check if present.
719 */
720 if (pSRegCS->Attr.n.u1Present)
721 {
722 /*
723 * Type check.
724 */
725 if ( pSRegCS->Attr.n.u1DescType == 1
726 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
727 {
728 /*
729 * Check level.
730 */
731 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
732 if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
733 ? uLevel <= pSRegCS->Attr.n.u2Dpl
734 : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */
735 )
736 {
737 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
738 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
739 if ( pSRegCS->Attr.n.u1Long
740 && CPUMIsGuestInLongMode(pVCpu))
741 {
742 *ppvFlat = Addr;
743 return VINF_SUCCESS;
744 }
745
746 /*
747 * Limit check. Note that the limit in the hidden register is the
748 * final value. The granularity bit was included in its calculation.
749 */
750 uint32_t u32Limit = pSRegCS->u32Limit;
751 if ((RTGCUINTPTR)Addr <= u32Limit)
752 {
753 *ppvFlat = Addr + pSRegCS->u64Base;
754 return VINF_SUCCESS;
755 }
756
757 return VERR_OUT_OF_SELECTOR_BOUNDS;
758 }
759 Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n",
760 pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl));
761 return VERR_INVALID_RPL;
762 }
763 return VERR_NOT_CODE_SELECTOR;
764 }
765 return VERR_SELECTOR_NOT_PRESENT;
766}
767
768
769/**
770 * Validates and converts a GC selector based code address to a flat address.
771 *
772 * @returns VBox status code.
773 * @param pVCpu Pointer to the VMCPU.
774 * @param Efl Current EFLAGS.
775 * @param SelCPL Current privilege level. Get this from SS - CS might be
776 * conforming! A full selector can be passed, we'll only
777 * use the RPL part.
778 * @param SelCS Selector part.
779 * @param pSRegCS The full CS selector register.
780 * @param Addr The address (think IP/EIP/RIP).
781 * @param ppvFlat Where to store the flat address upon successful return.
782 */
783VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
784 RTGCPTR Addr, PRTGCPTR ppvFlat)
785{
786 if ( Efl.Bits.u1VM
787 || CPUMIsGuestInRealMode(pVCpu))
788 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
789
790#ifdef VBOX_WITH_RAW_MODE_NOT_R0
791 /* Use the hidden registers when possible, updating them if outdate. */
792 if (!pSRegCS)
793 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
794
795 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
796 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
797
798 /* Undo ring compression. */
799 if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
800 SelCPL &= ~X86_SEL_RPL;
801 Assert(pSRegCS->Sel == SelCS);
802 if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
803 SelCS &= ~X86_SEL_RPL;
804#else
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
806 Assert(pSRegCS->Sel == SelCS);
807#endif
808
809 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
810}
811
812
813/**
814 * Returns Hypervisor's Trap 08 (\#DF) selector.
815 *
816 * @returns Hypervisor's Trap 08 (\#DF) selector.
817 * @param pVM Pointer to the VM.
818 */
819VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
820{
821 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
822}
823
824
825/**
826 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
827 *
828 * @param pVM Pointer to the VM.
829 * @param u32EIP EIP of Trap 08 handler.
830 */
831VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
832{
833 pVM->selm.s.TssTrap08.eip = u32EIP;
834}
835
836
837/**
838 * Sets ss:esp for ring1 in main Hypervisor's TSS.
839 *
840 * @param pVM Pointer to the VM.
841 * @param ss Ring1 SS register value. Pass 0 if invalid.
842 * @param esp Ring1 ESP register value.
843 */
844void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
845{
846 Assert(!HMIsEnabled(pVM));
847 Assert((ss & 1) || esp == 0);
848 pVM->selm.s.Tss.ss1 = ss;
849 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
850}
851
852
853#ifdef VBOX_WITH_RAW_RING1
854/**
855 * Sets ss:esp for ring1 in main Hypervisor's TSS.
856 *
857 * @param pVM Pointer to the VM.
858 * @param ss Ring2 SS register value. Pass 0 if invalid.
859 * @param esp Ring2 ESP register value.
860 */
861void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
862{
863 Assert(!HMIsEnabled(pVM));
864 Assert((ss & 3) == 2 || esp == 0);
865 pVM->selm.s.Tss.ss2 = ss;
866 pVM->selm.s.Tss.esp2 = (uint32_t)esp;
867}
868#endif
869
870#ifdef VBOX_WITH_RAW_MODE_NOT_R0
871
872/**
873 * Gets ss:esp for ring1 in main Hypervisor's TSS.
874 *
875 * Returns SS=0 if the ring-1 stack isn't valid.
876 *
877 * @returns VBox status code.
878 * @param pVM Pointer to the VM.
879 * @param pSS Ring1 SS register value.
880 * @param pEsp Ring1 ESP register value.
881 */
882VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
883{
884 Assert(!HMIsEnabled(pVM));
885 Assert(pVM->cCpus == 1);
886 PVMCPU pVCpu = &pVM->aCpus[0];
887
888#ifdef SELM_TRACK_GUEST_TSS_CHANGES
889 if (pVM->selm.s.fSyncTSSRing0Stack)
890 {
891#endif
892 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
893 int rc;
894 VBOXTSS tss;
895
896 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
897
898# ifdef IN_RC
899 bool fTriedAlready = false;
900
901l_tryagain:
902 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
903 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
904 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
905# ifdef DEBUG
906 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
907# endif
908
909 if (RT_FAILURE(rc))
910 {
911 if (!fTriedAlready)
912 {
913 /* Shadow page might be out of sync. Sync and try again */
914 /** @todo might cross page boundary */
915 fTriedAlready = true;
916 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
917 if (rc != VINF_SUCCESS)
918 return rc;
919 goto l_tryagain;
920 }
921 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
922 return rc;
923 }
924
925# else /* !IN_RC */
926 /* Reading too much. Could be cheaper than two separate calls though. */
927 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
928 if (RT_FAILURE(rc))
929 {
930 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
931 return rc;
932 }
933# endif /* !IN_RC */
934
935# ifdef LOG_ENABLED
936 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
937 uint32_t espr0 = pVM->selm.s.Tss.esp1;
938 ssr0 &= ~1;
939
940 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
941 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
942
943 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
944# endif
945 /* Update our TSS structure for the guest's ring 1 stack */
946 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
947 pVM->selm.s.fSyncTSSRing0Stack = false;
948#ifdef SELM_TRACK_GUEST_TSS_CHANGES
949 }
950#endif
951
952 *pSS = pVM->selm.s.Tss.ss1;
953 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
954
955 return VINF_SUCCESS;
956}
957
958
959/**
960 * Gets the hypervisor code selector (CS).
961 * @returns CS selector.
962 * @param pVM Pointer to the VM.
963 */
964VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
965{
966 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
967}
968
969
970/**
971 * Gets the 64-mode hypervisor code selector (CS64).
972 * @returns CS selector.
973 * @param pVM Pointer to the VM.
974 */
975VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
976{
977 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
978}
979
980
981/**
982 * Gets the hypervisor data selector (DS).
983 * @returns DS selector.
984 * @param pVM Pointer to the VM.
985 */
986VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
987{
988 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
989}
990
991
992/**
993 * Gets the hypervisor TSS selector.
994 * @returns TSS selector.
995 * @param pVM Pointer to the VM.
996 */
997VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
998{
999 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1000}
1001
1002
1003/**
1004 * Gets the hypervisor TSS Trap 8 selector.
1005 * @returns TSS Trap 8 selector.
1006 * @param pVM Pointer to the VM.
1007 */
1008VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1009{
1010 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1011}
1012
1013/**
1014 * Gets the address for the hypervisor GDT.
1015 *
1016 * @returns The GDT address.
1017 * @param pVM Pointer to the VM.
1018 * @remark This is intended only for very special use, like in the world
1019 * switchers. Don't exploit this API!
1020 */
1021VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1022{
1023 /*
1024 * Always convert this from the HC pointer since we can be
1025 * called before the first relocation and have to work correctly
1026 * without having dependencies on the relocation order.
1027 */
1028 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1029}
1030
1031#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1032
1033/**
1034 * Gets info about the current TSS.
1035 *
1036 * @returns VBox status code.
1037 * @retval VINF_SUCCESS if we've got a TSS loaded.
1038 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1039 *
1040 * @param pVM Pointer to the VM.
1041 * @param pVCpu Pointer to the VMCPU.
1042 * @param pGCPtrTss Where to store the TSS address.
1043 * @param pcbTss Where to store the TSS size limit.
1044 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1045 */
1046VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1047{
1048 NOREF(pVM);
1049
1050 /*
1051 * The TR hidden register is always valid.
1052 */
1053 CPUMSELREGHID trHid;
1054 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1055 if (!(tr & X86_SEL_MASK_OFF_RPL))
1056 return VERR_SELM_NO_TSS;
1057
1058 *pGCPtrTss = trHid.u64Base;
1059 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1060 if (pfCanHaveIOBitmap)
1061 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1062 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1063 return VINF_SUCCESS;
1064}
1065
1066
1067
1068/**
1069 * Notification callback which is called whenever there is a chance that a CR3
1070 * value might have changed.
1071 * This is called by PGM.
1072 *
1073 * @param pVM Pointer to the VM.
1074 * @param pVCpu Pointer to the VMCPU.
1075 */
1076VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1077{
1078 /** @todo SMP support!! (64-bit guest scenario, primarily) */
1079 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1080 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1081}
1082
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette