VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 45485

Last change on this file since 45485 was 45485, checked in by vboxsync, 12 years ago
  • *: Where possible, drop the #ifdef VBOX_WITH_RAW_RING1 when EMIsRawRing1Enabled is used.
  • SELM: Don't shadow TSS.esp1/ss1 unless ring-1 compression is enabled (also fixed a log statement there).
  • SELM: selmGuestToShadowDesc should not push ring-1 selectors into ring-2 unless EMIsRawRing1Enabled() holds true.
  • REM: Don't set CPU_INTERRUPT_EXTERNAL_EXIT in helper_ltr() for now.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.6 KB
Line 
1/* $Id: SELMAll.cpp 45485 2013-04-11 14:46:04Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/hm.h>
29#include "SELMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/err.h>
32#include <VBox/param.h>
33#include <iprt/assert.h>
34#include <VBox/vmm/vmm.h>
35#include <iprt/x86.h>
36
37#include "SELMInline.h"
38
39
40/*******************************************************************************
41* Global Variables *
42*******************************************************************************/
43#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
44/** Segment register names. */
45static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
46#endif
47
48
49
50#ifdef VBOX_WITH_RAW_MODE_NOT_R0
51/**
52 * Converts a GC selector based address to a flat address.
53 *
54 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
55 * for that.
56 *
57 * @returns Flat address.
58 * @param pVM Pointer to the VM.
59 * @param Sel Selector part.
60 * @param Addr Address part.
61 * @remarks Don't use when in long mode.
62 */
63VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
64{
65 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
66
67 /** @todo check the limit. */
68 X86DESC Desc;
69 if (!(Sel & X86_SEL_LDT))
70 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
71 else
72 {
73 /** @todo handle LDT pages not present! */
74 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
75 Desc = paLDT[Sel >> X86_SEL_SHIFT];
76 }
77
78 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
79}
80#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
81
82
83/**
84 * Converts a GC selector based address to a flat address.
85 *
86 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
87 * for that.
88 *
89 * @returns Flat address.
90 * @param pVM Pointer to the VM.
91 * @param SelReg Selector register
92 * @param pCtxCore CPU context
93 * @param Addr Address part.
94 */
95VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
96{
97 PCPUMSELREG pSReg;
98 PVMCPU pVCpu = VMMGetCpu(pVM);
99
100 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
101
102 /*
103 * Deal with real & v86 mode first.
104 */
105 if ( pCtxCore->eflags.Bits.u1VM
106 || CPUMIsGuestInRealMode(pVCpu))
107 {
108 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
109 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
110 uFlat += pSReg->u64Base;
111 else
112 uFlat += (RTGCUINTPTR)pSReg->Sel << 4;
113 return (RTGCPTR)uFlat;
114 }
115
116#ifdef VBOX_WITH_RAW_MODE_NOT_R0
117 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
118 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
119 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
120 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
121 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
122#else
123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
125#endif
126
127 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
128 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
129 if ( pCtxCore->cs.Attr.n.u1Long
130 && CPUMIsGuestInLongMode(pVCpu))
131 {
132 switch (SelReg)
133 {
134 case DISSELREG_FS:
135 case DISSELREG_GS:
136 return (RTGCPTR)(pSReg->u64Base + Addr);
137
138 default:
139 return Addr; /* base 0 */
140 }
141 }
142
143 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
144 Assert(pSReg->u64Base <= 0xffffffff);
145 return ((pSReg->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
146}
147
148
149/**
150 * Converts a GC selector based address to a flat address.
151 *
152 * Some basic checking is done, but not all kinds yet.
153 *
154 * @returns VBox status
155 * @param pVCpu Pointer to the VMCPU.
156 * @param SelReg Selector register.
157 * @param pCtxCore CPU context.
158 * @param Addr Address part.
159 * @param fFlags SELMTOFLAT_FLAGS_*
160 * GDT entires are valid.
161 * @param ppvGC Where to store the GC flat address.
162 */
163VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
164{
165 /*
166 * Fetch the selector first.
167 */
168 PCPUMSELREG pSReg;
169 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
170 AssertRCReturn(rc, rc); AssertPtr(pSReg);
171
172 /*
173 * Deal with real & v86 mode first.
174 */
175 if ( pCtxCore->eflags.Bits.u1VM
176 || CPUMIsGuestInRealMode(pVCpu))
177 {
178 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
179 if (ppvGC)
180 {
181 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
182 *ppvGC = pSReg->u64Base + uFlat;
183 else
184 *ppvGC = ((RTGCUINTPTR)pSReg->Sel << 4) + uFlat;
185 }
186 return VINF_SUCCESS;
187 }
188
189
190#ifdef VBOX_WITH_RAW_MODE_NOT_R0
191 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
192 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
193 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
194 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
195#else
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
198#endif
199
200 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
201 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
202 RTGCPTR pvFlat;
203 bool fCheckLimit = true;
204 if ( pCtxCore->cs.Attr.n.u1Long
205 && CPUMIsGuestInLongMode(pVCpu))
206 {
207 fCheckLimit = false;
208 switch (SelReg)
209 {
210 case DISSELREG_FS:
211 case DISSELREG_GS:
212 pvFlat = pSReg->u64Base + Addr;
213 break;
214
215 default:
216 pvFlat = Addr;
217 break;
218 }
219 }
220 else
221 {
222 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
223 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
224 pvFlat = pSReg->u64Base + Addr;
225 pvFlat &= UINT32_C(0xffffffff);
226 }
227
228 /*
229 * Check type if present.
230 */
231 if (pSReg->Attr.n.u1Present)
232 {
233 switch (pSReg->Attr.n.u4Type)
234 {
235 /* Read only selector type. */
236 case X86_SEL_TYPE_RO:
237 case X86_SEL_TYPE_RO_ACC:
238 case X86_SEL_TYPE_RW:
239 case X86_SEL_TYPE_RW_ACC:
240 case X86_SEL_TYPE_EO:
241 case X86_SEL_TYPE_EO_ACC:
242 case X86_SEL_TYPE_ER:
243 case X86_SEL_TYPE_ER_ACC:
244 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
245 {
246 /** @todo fix this mess */
247 }
248 /* check limit. */
249 if (fCheckLimit && Addr > pSReg->u32Limit)
250 return VERR_OUT_OF_SELECTOR_BOUNDS;
251 /* ok */
252 if (ppvGC)
253 *ppvGC = pvFlat;
254 return VINF_SUCCESS;
255
256 case X86_SEL_TYPE_EO_CONF:
257 case X86_SEL_TYPE_EO_CONF_ACC:
258 case X86_SEL_TYPE_ER_CONF:
259 case X86_SEL_TYPE_ER_CONF_ACC:
260 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
261 {
262 /** @todo fix this mess */
263 }
264 /* check limit. */
265 if (fCheckLimit && Addr > pSReg->u32Limit)
266 return VERR_OUT_OF_SELECTOR_BOUNDS;
267 /* ok */
268 if (ppvGC)
269 *ppvGC = pvFlat;
270 return VINF_SUCCESS;
271
272 case X86_SEL_TYPE_RO_DOWN:
273 case X86_SEL_TYPE_RO_DOWN_ACC:
274 case X86_SEL_TYPE_RW_DOWN:
275 case X86_SEL_TYPE_RW_DOWN_ACC:
276 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
277 {
278 /** @todo fix this mess */
279 }
280 /* check limit. */
281 if (fCheckLimit)
282 {
283 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
284 return VERR_OUT_OF_SELECTOR_BOUNDS;
285 if (Addr <= pSReg->u32Limit)
286 return VERR_OUT_OF_SELECTOR_BOUNDS;
287 }
288 /* ok */
289 if (ppvGC)
290 *ppvGC = pvFlat;
291 return VINF_SUCCESS;
292
293 default:
294 return VERR_INVALID_SELECTOR;
295
296 }
297 }
298 return VERR_SELECTOR_NOT_PRESENT;
299}
300
301
302#ifdef VBOX_WITH_RAW_MODE_NOT_R0
303/**
304 * Converts a GC selector based address to a flat address.
305 *
306 * Some basic checking is done, but not all kinds yet.
307 *
308 * @returns VBox status
309 * @param pVCpu Pointer to the VMCPU.
310 * @param eflags Current eflags
311 * @param Sel Selector part.
312 * @param Addr Address part.
313 * @param fFlags SELMTOFLAT_FLAGS_*
314 * GDT entires are valid.
315 * @param ppvGC Where to store the GC flat address.
316 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
317 * the selector. NULL is allowed.
318 * @remarks Don't use when in long mode.
319 */
320VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
321 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
322{
323 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
324
325 /*
326 * Deal with real & v86 mode first.
327 */
328 if ( eflags.Bits.u1VM
329 || CPUMIsGuestInRealMode(pVCpu))
330 {
331 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
332 if (ppvGC)
333 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
334 if (pcb)
335 *pcb = 0x10000 - uFlat;
336 return VINF_SUCCESS;
337 }
338
339 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
340 X86DESC Desc;
341 PVM pVM = pVCpu->CTX_SUFF(pVM);
342 if (!(Sel & X86_SEL_LDT))
343 {
344 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
345 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
346 return VERR_INVALID_SELECTOR;
347 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
348 }
349 else
350 {
351 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
352 return VERR_INVALID_SELECTOR;
353
354 /** @todo handle LDT page(s) not present! */
355 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
356 Desc = paLDT[Sel >> X86_SEL_SHIFT];
357 }
358
359 /* calc limit. */
360 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
361
362 /* calc address assuming straight stuff. */
363 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
364
365 /* Cut the address to 32 bits. */
366 Assert(!CPUMIsGuestInLongMode(pVCpu));
367 pvFlat &= 0xffffffff;
368
369 uint8_t u1Present = Desc.Gen.u1Present;
370 uint8_t u1Granularity = Desc.Gen.u1Granularity;
371 uint8_t u1DescType = Desc.Gen.u1DescType;
372 uint8_t u4Type = Desc.Gen.u4Type;
373
374 /*
375 * Check if present.
376 */
377 if (u1Present)
378 {
379 /*
380 * Type check.
381 */
382#define BOTH(a, b) ((a << 16) | b)
383 switch (BOTH(u1DescType, u4Type))
384 {
385
386 /** Read only selector type. */
387 case BOTH(1,X86_SEL_TYPE_RO):
388 case BOTH(1,X86_SEL_TYPE_RO_ACC):
389 case BOTH(1,X86_SEL_TYPE_RW):
390 case BOTH(1,X86_SEL_TYPE_RW_ACC):
391 case BOTH(1,X86_SEL_TYPE_EO):
392 case BOTH(1,X86_SEL_TYPE_EO_ACC):
393 case BOTH(1,X86_SEL_TYPE_ER):
394 case BOTH(1,X86_SEL_TYPE_ER_ACC):
395 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
396 {
397 /** @todo fix this mess */
398 }
399 /* check limit. */
400 if ((RTGCUINTPTR)Addr > u32Limit)
401 return VERR_OUT_OF_SELECTOR_BOUNDS;
402 /* ok */
403 if (ppvGC)
404 *ppvGC = pvFlat;
405 if (pcb)
406 *pcb = u32Limit - (uint32_t)Addr + 1;
407 return VINF_SUCCESS;
408
409 case BOTH(1,X86_SEL_TYPE_EO_CONF):
410 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
411 case BOTH(1,X86_SEL_TYPE_ER_CONF):
412 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
413 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
414 {
415 /** @todo fix this mess */
416 }
417 /* check limit. */
418 if ((RTGCUINTPTR)Addr > u32Limit)
419 return VERR_OUT_OF_SELECTOR_BOUNDS;
420 /* ok */
421 if (ppvGC)
422 *ppvGC = pvFlat;
423 if (pcb)
424 *pcb = u32Limit - (uint32_t)Addr + 1;
425 return VINF_SUCCESS;
426
427 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
428 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
429 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
430 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
431 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
432 {
433 /** @todo fix this mess */
434 }
435 /* check limit. */
436 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
437 return VERR_OUT_OF_SELECTOR_BOUNDS;
438 if ((RTGCUINTPTR)Addr <= u32Limit)
439 return VERR_OUT_OF_SELECTOR_BOUNDS;
440
441 /* ok */
442 if (ppvGC)
443 *ppvGC = pvFlat;
444 if (pcb)
445 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
446 return VINF_SUCCESS;
447
448 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
449 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
450 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
451 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
452 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
453 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
454 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
455 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
456 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
457 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
458 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
459 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
460 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
461 {
462 /** @todo fix this mess */
463 }
464 /* check limit. */
465 if ((RTGCUINTPTR)Addr > u32Limit)
466 return VERR_OUT_OF_SELECTOR_BOUNDS;
467 /* ok */
468 if (ppvGC)
469 *ppvGC = pvFlat;
470 if (pcb)
471 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
472 return VINF_SUCCESS;
473
474 default:
475 return VERR_INVALID_SELECTOR;
476
477 }
478#undef BOTH
479 }
480 return VERR_SELECTOR_NOT_PRESENT;
481}
482#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
483
484
485#ifdef VBOX_WITH_RAW_MODE_NOT_R0
486
487static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
488 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
489{
490 /*
491 * Try read the entry.
492 */
493 X86DESC GstDesc;
494 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc));
495 if (RT_FAILURE(rc))
496 {
497 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
498 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
499 return;
500 }
501
502 /*
503 * Validate it and load it.
504 */
505 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
506 {
507 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
508 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
509 return;
510 }
511
512 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
513 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
514 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
515 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
516}
517
518
519/**
520 * CPUM helper that loads the hidden selector register from the descriptor table
521 * when executing with raw-mode.
522 *
523 * @remarks This is only used when in legacy protected mode!
524 *
525 * @param pVCpu Pointer to the current virtual CPU.
526 * @param pCtx The guest CPU context.
527 * @param pSReg The selector register.
528 *
529 * @todo Deal 100% correctly with stale selectors. What's more evil is
530 * invalid page table entries, which isn't impossible to imagine for
531 * LDT entries for instance, though unlikely. Currently, we turn a
532 * blind eye to these issues and return the old hidden registers,
533 * though we don't set the valid flag, so that we'll try loading them
534 * over and over again till we succeed loading something.
535 */
536VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
537{
538 Assert(pCtx->cr0 & X86_CR0_PE);
539 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
540
541 PVM pVM = pVCpu->CTX_SUFF(pVM);
542 Assert(pVM->cCpus == 1);
543
544
545 /*
546 * Get the shadow descriptor table entry and validate it.
547 * Should something go amiss, try the guest table.
548 */
549 RTSEL const Sel = pSReg->Sel;
550 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
551 PCX86DESC pShwDesc;
552 if (!(Sel & X86_SEL_LDT))
553 {
554 /** @todo this shall not happen, we shall check for these things when executing
555 * LGDT */
556 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
557
558 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
559 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
560 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
561 {
562 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
563 return;
564 }
565 }
566 else
567 {
568 /** @todo this shall not happen, we shall check for these things when executing
569 * LLDT */
570 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
571
572 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
573 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
574 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
575 {
576 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
577 return;
578 }
579 }
580
581 /*
582 * All fine, load it.
583 */
584 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
585 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
586 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
587 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
588}
589
590#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
591
592/**
593 * Validates and converts a GC selector based code address to a flat
594 * address when in real or v8086 mode.
595 *
596 * @returns VINF_SUCCESS.
597 * @param pVCpu Pointer to the VMCPU.
598 * @param SelCS Selector part.
599 * @param pHidCS The hidden CS register part. Optional.
600 * @param Addr Address part.
601 * @param ppvFlat Where to store the flat address.
602 */
603DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
604 PRTGCPTR ppvFlat)
605{
606 RTGCUINTPTR uFlat = Addr & 0xffff;
607 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
608 uFlat += (RTGCUINTPTR)SelCS << 4;
609 else
610 uFlat += pSReg->u64Base;
611 *ppvFlat = uFlat;
612 return VINF_SUCCESS;
613}
614
615
616#ifdef VBOX_WITH_RAW_MODE_NOT_R0
617/**
618 * Validates and converts a GC selector based code address to a flat address
619 * when in protected/long mode using the raw-mode algorithm.
620 *
621 * @returns VBox status code.
622 * @param pVM Pointer to the VM.
623 * @param pVCpu Pointer to the VMCPU.
624 * @param SelCPL Current privilege level. Get this from SS - CS might be
625 * conforming! A full selector can be passed, we'll only
626 * use the RPL part.
627 * @param SelCS Selector part.
628 * @param Addr Address part.
629 * @param ppvFlat Where to store the flat address.
630 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
631 */
632DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
633 PRTGCPTR ppvFlat, uint32_t *pcBits)
634{
635 NOREF(pVCpu);
636 /** @todo validate limit! */
637 X86DESC Desc;
638 if (!(SelCS & X86_SEL_LDT))
639 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
640 else
641 {
642 /** @todo handle LDT page(s) not present! */
643 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
644 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
645 }
646
647 /*
648 * Check if present.
649 */
650 if (Desc.Gen.u1Present)
651 {
652 /*
653 * Type check.
654 */
655 if ( Desc.Gen.u1DescType == 1
656 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
657 {
658 /*
659 * Check level.
660 */
661 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
662 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
663 ? uLevel <= Desc.Gen.u2Dpl
664 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
665 )
666 {
667 /*
668 * Limit check.
669 */
670 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
671 if ((RTGCUINTPTR)Addr <= u32Limit)
672 {
673 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
674 /* Cut the address to 32 bits. */
675 *ppvFlat &= 0xffffffff;
676
677 if (pcBits)
678 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
679 return VINF_SUCCESS;
680 }
681 return VERR_OUT_OF_SELECTOR_BOUNDS;
682 }
683 return VERR_INVALID_RPL;
684 }
685 return VERR_NOT_CODE_SELECTOR;
686 }
687 return VERR_SELECTOR_NOT_PRESENT;
688}
689#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
690
691
692/**
693 * Validates and converts a GC selector based code address to a flat address
694 * when in protected/long mode using the standard hidden selector registers
695 *
696 * @returns VBox status code.
697 * @param pVCpu Pointer to the VMCPU.
698 * @param SelCPL Current privilege level. Get this from SS - CS might be
699 * conforming! A full selector can be passed, we'll only
700 * use the RPL part.
701 * @param SelCS Selector part.
702 * @param pSRegCS The full CS selector register.
703 * @param Addr The address (think IP/EIP/RIP).
704 * @param ppvFlat Where to store the flat address upon successful return.
705 */
706DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
707 RTGCPTR Addr, PRTGCPTR ppvFlat)
708{
709 /*
710 * Check if present.
711 */
712 if (pSRegCS->Attr.n.u1Present)
713 {
714 /*
715 * Type check.
716 */
717 if ( pSRegCS->Attr.n.u1DescType == 1
718 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
719 {
720 /*
721 * Check level.
722 */
723 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
724 if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
725 ? uLevel <= pSRegCS->Attr.n.u2Dpl
726 : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */
727 )
728 {
729 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
730 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
731 if ( pSRegCS->Attr.n.u1Long
732 && CPUMIsGuestInLongMode(pVCpu))
733 {
734 *ppvFlat = Addr;
735 return VINF_SUCCESS;
736 }
737
738 /*
739 * Limit check. Note that the limit in the hidden register is the
740 * final value. The granularity bit was included in its calculation.
741 */
742 uint32_t u32Limit = pSRegCS->u32Limit;
743 if ((RTGCUINTPTR)Addr <= u32Limit)
744 {
745 *ppvFlat = Addr + pSRegCS->u64Base;
746 return VINF_SUCCESS;
747 }
748
749 return VERR_OUT_OF_SELECTOR_BOUNDS;
750 }
751 Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n",
752 pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl));
753 return VERR_INVALID_RPL;
754 }
755 return VERR_NOT_CODE_SELECTOR;
756 }
757 return VERR_SELECTOR_NOT_PRESENT;
758}
759
760
761/**
762 * Validates and converts a GC selector based code address to a flat address.
763 *
764 * @returns VBox status code.
765 * @param pVCpu Pointer to the VMCPU.
766 * @param Efl Current EFLAGS.
767 * @param SelCPL Current privilege level. Get this from SS - CS might be
768 * conforming! A full selector can be passed, we'll only
769 * use the RPL part.
770 * @param SelCS Selector part.
771 * @param pSRegCS The full CS selector register.
772 * @param Addr The address (think IP/EIP/RIP).
773 * @param ppvFlat Where to store the flat address upon successful return.
774 */
775VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
776 RTGCPTR Addr, PRTGCPTR ppvFlat)
777{
778 if ( Efl.Bits.u1VM
779 || CPUMIsGuestInRealMode(pVCpu))
780 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
781
782#ifdef VBOX_WITH_RAW_MODE_NOT_R0
783 /* Use the hidden registers when possible, updating them if outdate. */
784 if (!pSRegCS)
785 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
786
787 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
788 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
789
790 /* Undo ring compression. */
791 if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
792 SelCPL &= ~X86_SEL_RPL;
793 Assert(pSRegCS->Sel == SelCS);
794 if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
795 SelCS &= ~X86_SEL_RPL;
796#else
797 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
798 Assert(pSRegCS->Sel == SelCS);
799#endif
800
801 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
802}
803
804
805/**
806 * Returns Hypervisor's Trap 08 (\#DF) selector.
807 *
808 * @returns Hypervisor's Trap 08 (\#DF) selector.
809 * @param pVM Pointer to the VM.
810 */
811VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
812{
813 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
814}
815
816
817/**
818 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
819 *
820 * @param pVM Pointer to the VM.
821 * @param u32EIP EIP of Trap 08 handler.
822 */
823VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
824{
825 pVM->selm.s.TssTrap08.eip = u32EIP;
826}
827
828
829/**
830 * Sets ss:esp for ring1 in main Hypervisor's TSS.
831 *
832 * @param pVM Pointer to the VM.
833 * @param ss Ring1 SS register value. Pass 0 if invalid.
834 * @param esp Ring1 ESP register value.
835 */
836void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
837{
838 Assert((ss & 1) || esp == 0);
839 pVM->selm.s.Tss.ss1 = ss;
840 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
841}
842
843
844#ifdef VBOX_WITH_RAW_RING1
845/**
846 * Sets ss:esp for ring1 in main Hypervisor's TSS.
847 *
848 * @param pVM Pointer to the VM.
849 * @param ss Ring2 SS register value. Pass 0 if invalid.
850 * @param esp Ring2 ESP register value.
851 */
852void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
853{
854 Assert((ss & 3) == 2 || esp == 0);
855 pVM->selm.s.Tss.ss2 = ss;
856 pVM->selm.s.Tss.esp2 = (uint32_t)esp;
857}
858#endif
859
860
861#ifdef VBOX_WITH_RAW_MODE_NOT_R0
862/**
863 * Gets ss:esp for ring1 in main Hypervisor's TSS.
864 *
865 * Returns SS=0 if the ring-1 stack isn't valid.
866 *
867 * @returns VBox status code.
868 * @param pVM Pointer to the VM.
869 * @param pSS Ring1 SS register value.
870 * @param pEsp Ring1 ESP register value.
871 */
872VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
873{
874 Assert(pVM->cCpus == 1);
875 PVMCPU pVCpu = &pVM->aCpus[0];
876
877#ifdef SELM_TRACK_GUEST_TSS_CHANGES
878 if (pVM->selm.s.fSyncTSSRing0Stack)
879 {
880#endif
881 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
882 int rc;
883 VBOXTSS tss;
884
885 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
886
887# ifdef IN_RC
888 bool fTriedAlready = false;
889
890l_tryagain:
891 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
892 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
893 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
894# ifdef DEBUG
895 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
896# endif
897
898 if (RT_FAILURE(rc))
899 {
900 if (!fTriedAlready)
901 {
902 /* Shadow page might be out of sync. Sync and try again */
903 /** @todo might cross page boundary */
904 fTriedAlready = true;
905 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
906 if (rc != VINF_SUCCESS)
907 return rc;
908 goto l_tryagain;
909 }
910 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
911 return rc;
912 }
913
914# else /* !IN_RC */
915 /* Reading too much. Could be cheaper than two separate calls though. */
916 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
917 if (RT_FAILURE(rc))
918 {
919 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
920 return rc;
921 }
922# endif /* !IN_RC */
923
924# ifdef LOG_ENABLED
925 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
926 uint32_t espr0 = pVM->selm.s.Tss.esp1;
927 ssr0 &= ~1;
928
929 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
930 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
931
932 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
933# endif
934 /* Update our TSS structure for the guest's ring 1 stack */
935 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
936 pVM->selm.s.fSyncTSSRing0Stack = false;
937#ifdef SELM_TRACK_GUEST_TSS_CHANGES
938 }
939#endif
940
941 *pSS = pVM->selm.s.Tss.ss1;
942 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
943
944 return VINF_SUCCESS;
945}
946#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
947
948
949/**
950 * Returns Guest TSS pointer
951 *
952 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
953 * @param pVM Pointer to the VM.
954 */
955VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
956{
957 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
958}
959
960#ifdef VBOX_WITH_RAW_MODE_NOT_R0
961
962/**
963 * Gets the hypervisor code selector (CS).
964 * @returns CS selector.
965 * @param pVM Pointer to the VM.
966 */
967VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
968{
969 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
970}
971
972
973/**
974 * Gets the 64-mode hypervisor code selector (CS64).
975 * @returns CS selector.
976 * @param pVM Pointer to the VM.
977 */
978VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
979{
980 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
981}
982
983
984/**
985 * Gets the hypervisor data selector (DS).
986 * @returns DS selector.
987 * @param pVM Pointer to the VM.
988 */
989VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
990{
991 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
992}
993
994
995/**
996 * Gets the hypervisor TSS selector.
997 * @returns TSS selector.
998 * @param pVM Pointer to the VM.
999 */
1000VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1001{
1002 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1003}
1004
1005
1006/**
1007 * Gets the hypervisor TSS Trap 8 selector.
1008 * @returns TSS Trap 8 selector.
1009 * @param pVM Pointer to the VM.
1010 */
1011VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1012{
1013 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1014}
1015
1016/**
1017 * Gets the address for the hypervisor GDT.
1018 *
1019 * @returns The GDT address.
1020 * @param pVM Pointer to the VM.
1021 * @remark This is intended only for very special use, like in the world
1022 * switchers. Don't exploit this API!
1023 */
1024VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1025{
1026 /*
1027 * Always convert this from the HC pointer since we can be
1028 * called before the first relocation and have to work correctly
1029 * without having dependencies on the relocation order.
1030 */
1031 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1032}
1033
1034#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1035
1036/**
1037 * Gets info about the current TSS.
1038 *
1039 * @returns VBox status code.
1040 * @retval VINF_SUCCESS if we've got a TSS loaded.
1041 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1042 *
1043 * @param pVM Pointer to the VM.
1044 * @param pVCpu Pointer to the VMCPU.
1045 * @param pGCPtrTss Where to store the TSS address.
1046 * @param pcbTss Where to store the TSS size limit.
1047 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1048 */
1049VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1050{
1051 NOREF(pVM);
1052
1053 /*
1054 * The TR hidden register is always valid.
1055 */
1056 CPUMSELREGHID trHid;
1057 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1058 if (!(tr & X86_SEL_MASK_OFF_RPL))
1059 return VERR_SELM_NO_TSS;
1060
1061 *pGCPtrTss = trHid.u64Base;
1062 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1063 if (pfCanHaveIOBitmap)
1064 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1065 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1066 return VINF_SUCCESS;
1067}
1068
1069
1070
1071/**
1072 * Notification callback which is called whenever there is a chance that a CR3
1073 * value might have changed.
1074 * This is called by PGM.
1075 *
1076 * @param pVM Pointer to the VM.
1077 * @param pVCpu Pointer to the VMCPU.
1078 */
1079VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1080{
1081 /** @todo SMP support!! */
1082 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1083 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1084}
1085
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette