VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 23300

Last change on this file since 23300 was 23300, checked in by vboxsync, 16 years ago

Cut addresses according to the cpu mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.4 KB
Line 
1/* $Id: SELMAll.cpp 23300 2009-09-24 16:47:59Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_SELM
27#include <VBox/selm.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pgm.h>
31#include "SELMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/x86.h>
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <iprt/assert.h>
37#include <VBox/log.h>
38#include <VBox/vmm.h>
39
40
41
42#ifndef IN_RING0
43
44/**
45 * Converts a GC selector based address to a flat address.
46 *
47 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
48 * for that.
49 *
50 * @returns Flat address.
51 * @param pVM VM Handle.
52 * @param Sel Selector part.
53 * @param Addr Address part.
54 * @remarks Don't use when in long mode.
55 */
56VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
57{
58 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
59
60 /** @todo check the limit. */
61 X86DESC Desc;
62 if (!(Sel & X86_SEL_LDT))
63 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
64 else
65 {
66 /** @todo handle LDT pages not present! */
67 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
68 Desc = paLDT[Sel >> X86_SEL_SHIFT];
69 }
70
71 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(Desc)) & 0xffffffff);
72}
73#endif /* !IN_RING0 */
74
75
76/**
77 * Converts a GC selector based address to a flat address.
78 *
79 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
80 * for that.
81 *
82 * @returns Flat address.
83 * @param pVM VM Handle.
84 * @param SelReg Selector register
85 * @param pCtxCore CPU context
86 * @param Addr Address part.
87 */
88VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DIS_SELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
89{
90 PCPUMSELREGHID pHiddenSel;
91 RTSEL Sel;
92 int rc;
93 PVMCPU pVCpu = VMMGetCpu(pVM);
94
95 rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel); AssertRC(rc);
96
97 /*
98 * Deal with real & v86 mode first.
99 */
100 if ( CPUMIsGuestInRealMode(pVCpu)
101 || pCtxCore->eflags.Bits.u1VM)
102 {
103 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
104 if (CPUMAreHiddenSelRegsValid(pVM))
105 uFlat += pHiddenSel->u64Base;
106 else
107 uFlat += ((RTGCUINTPTR)Sel << 4);
108 return (RTGCPTR)uFlat;
109 }
110
111#ifdef IN_RING0
112 Assert(CPUMAreHiddenSelRegsValid(pVM));
113#else
114 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
115 if (!CPUMAreHiddenSelRegsValid(pVM))
116 return SELMToFlatBySel(pVM, Sel, Addr);
117#endif
118
119 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
120 if ( CPUMIsGuestInLongMode(pVCpu)
121 && pCtxCore->csHid.Attr.n.u1Long)
122 {
123 switch (SelReg)
124 {
125 case DIS_SELREG_FS:
126 case DIS_SELREG_GS:
127 return (RTGCPTR)(pHiddenSel->u64Base + Addr);
128
129 default:
130 return Addr; /* base 0 */
131 }
132 }
133
134 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
135 Assert(pHiddenSel->u64Base <= 0xffffffff);
136 return ((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
137}
138
139
140/**
141 * Converts a GC selector based address to a flat address.
142 *
143 * Some basic checking is done, but not all kinds yet.
144 *
145 * @returns VBox status
146 * @param pVM VM Handle.
147 * @param SelReg Selector register
148 * @param pCtxCore CPU context
149 * @param Addr Address part.
150 * @param fFlags SELMTOFLAT_FLAGS_*
151 * GDT entires are valid.
152 * @param ppvGC Where to store the GC flat address.
153 */
154VMMDECL(int) SELMToFlatEx(PVM pVM, DIS_SELREG SelReg, PCCPUMCTXCORE pCtxCore, RTGCPTR Addr, unsigned fFlags, PRTGCPTR ppvGC)
155{
156 /*
157 * Fetch the selector first.
158 */
159 PCPUMSELREGHID pHiddenSel;
160 RTSEL Sel;
161 PVMCPU pVCpu = VMMGetCpu(pVM);
162
163 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel);
164 AssertRC(rc);
165
166 /*
167 * Deal with real & v86 mode first.
168 */
169 if ( CPUMIsGuestInRealMode(pVCpu)
170 || pCtxCore->eflags.Bits.u1VM)
171 {
172 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
173 if (ppvGC)
174 {
175 if ( pHiddenSel
176 && CPUMAreHiddenSelRegsValid(pVM))
177 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
178 else
179 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
180 }
181 return VINF_SUCCESS;
182 }
183
184
185 uint32_t u32Limit;
186 RTGCPTR pvFlat;
187 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
188
189 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
190#ifndef IN_RC
191 if ( pHiddenSel
192 && CPUMAreHiddenSelRegsValid(pVM))
193 {
194 bool fCheckLimit = true;
195
196 u1Present = pHiddenSel->Attr.n.u1Present;
197 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
198 u1DescType = pHiddenSel->Attr.n.u1DescType;
199 u4Type = pHiddenSel->Attr.n.u4Type;
200 u32Limit = pHiddenSel->u32Limit;
201
202 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
203 if ( CPUMIsGuestInLongMode(pVCpu)
204 && pCtxCore->csHid.Attr.n.u1Long)
205 {
206 fCheckLimit = false;
207 switch (SelReg)
208 {
209 case DIS_SELREG_FS:
210 case DIS_SELREG_GS:
211 pvFlat = (pHiddenSel->u64Base + Addr);
212 break;
213
214 default:
215 pvFlat = Addr;
216 break;
217 }
218 }
219 else
220 {
221 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
222 Assert(pHiddenSel->u64Base <= 0xffffffff);
223 pvFlat = (RTGCPTR)((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
224 }
225
226 /*
227 * Check if present.
228 */
229 if (u1Present)
230 {
231 /*
232 * Type check.
233 */
234 switch (u4Type)
235 {
236
237 /** Read only selector type. */
238 case X86_SEL_TYPE_RO:
239 case X86_SEL_TYPE_RO_ACC:
240 case X86_SEL_TYPE_RW:
241 case X86_SEL_TYPE_RW_ACC:
242 case X86_SEL_TYPE_EO:
243 case X86_SEL_TYPE_EO_ACC:
244 case X86_SEL_TYPE_ER:
245 case X86_SEL_TYPE_ER_ACC:
246 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
247 {
248 /** @todo fix this mess */
249 }
250 /* check limit. */
251 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
252 return VERR_OUT_OF_SELECTOR_BOUNDS;
253 /* ok */
254 if (ppvGC)
255 *ppvGC = pvFlat;
256 return VINF_SUCCESS;
257
258 case X86_SEL_TYPE_EO_CONF:
259 case X86_SEL_TYPE_EO_CONF_ACC:
260 case X86_SEL_TYPE_ER_CONF:
261 case X86_SEL_TYPE_ER_CONF_ACC:
262 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
263 {
264 /** @todo fix this mess */
265 }
266 /* check limit. */
267 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
268 return VERR_OUT_OF_SELECTOR_BOUNDS;
269 /* ok */
270 if (ppvGC)
271 *ppvGC = pvFlat;
272 return VINF_SUCCESS;
273
274 case X86_SEL_TYPE_RO_DOWN:
275 case X86_SEL_TYPE_RO_DOWN_ACC:
276 case X86_SEL_TYPE_RW_DOWN:
277 case X86_SEL_TYPE_RW_DOWN_ACC:
278 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
279 {
280 /** @todo fix this mess */
281 }
282 /* check limit. */
283 if (fCheckLimit)
284 {
285 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
286 return VERR_OUT_OF_SELECTOR_BOUNDS;
287 if ((RTGCUINTPTR)Addr <= u32Limit)
288 return VERR_OUT_OF_SELECTOR_BOUNDS;
289 }
290 /* ok */
291 if (ppvGC)
292 *ppvGC = pvFlat;
293 return VINF_SUCCESS;
294
295 default:
296 return VERR_INVALID_SELECTOR;
297
298 }
299 }
300 }
301# ifndef IN_RING0
302 else
303# endif
304#endif /* !IN_RC */
305#ifndef IN_RING0
306 {
307 X86DESC Desc;
308
309 if (!(Sel & X86_SEL_LDT))
310 {
311 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
312 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
313 return VERR_INVALID_SELECTOR;
314 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
315 }
316 else
317 {
318 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
319 return VERR_INVALID_SELECTOR;
320
321 /** @todo handle LDT page(s) not present! */
322 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
323 Desc = paLDT[Sel >> X86_SEL_SHIFT];
324 }
325
326 /* calc limit. */
327 u32Limit = X86DESC_LIMIT(Desc);
328 if (Desc.Gen.u1Granularity)
329 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
330
331 /* calc address assuming straight stuff. */
332 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
333
334 /* Cut the address to 32 bits. */
335 Assert(!CPUMIsGuestInLongMode(pVCpu))
336 pvFlat &= 0xffffffff;
337
338 u1Present = Desc.Gen.u1Present;
339 u1Granularity = Desc.Gen.u1Granularity;
340 u1DescType = Desc.Gen.u1DescType;
341 u4Type = Desc.Gen.u4Type;
342
343 /*
344 * Check if present.
345 */
346 if (u1Present)
347 {
348 /*
349 * Type check.
350 */
351# define BOTH(a, b) ((a << 16) | b)
352 switch (BOTH(u1DescType, u4Type))
353 {
354
355 /** Read only selector type. */
356 case BOTH(1,X86_SEL_TYPE_RO):
357 case BOTH(1,X86_SEL_TYPE_RO_ACC):
358 case BOTH(1,X86_SEL_TYPE_RW):
359 case BOTH(1,X86_SEL_TYPE_RW_ACC):
360 case BOTH(1,X86_SEL_TYPE_EO):
361 case BOTH(1,X86_SEL_TYPE_EO_ACC):
362 case BOTH(1,X86_SEL_TYPE_ER):
363 case BOTH(1,X86_SEL_TYPE_ER_ACC):
364 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
365 {
366 /** @todo fix this mess */
367 }
368 /* check limit. */
369 if ((RTGCUINTPTR)Addr > u32Limit)
370 return VERR_OUT_OF_SELECTOR_BOUNDS;
371 /* ok */
372 if (ppvGC)
373 *ppvGC = pvFlat;
374 return VINF_SUCCESS;
375
376 case BOTH(1,X86_SEL_TYPE_EO_CONF):
377 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
378 case BOTH(1,X86_SEL_TYPE_ER_CONF):
379 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
380 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
381 {
382 /** @todo fix this mess */
383 }
384 /* check limit. */
385 if ((RTGCUINTPTR)Addr > u32Limit)
386 return VERR_OUT_OF_SELECTOR_BOUNDS;
387 /* ok */
388 if (ppvGC)
389 *ppvGC = pvFlat;
390 return VINF_SUCCESS;
391
392 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
393 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
394 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
395 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
396 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
397 {
398 /** @todo fix this mess */
399 }
400 /* check limit. */
401 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
402 return VERR_OUT_OF_SELECTOR_BOUNDS;
403 if ((RTGCUINTPTR)Addr <= u32Limit)
404 return VERR_OUT_OF_SELECTOR_BOUNDS;
405
406 /* ok */
407 if (ppvGC)
408 *ppvGC = pvFlat;
409 return VINF_SUCCESS;
410
411 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
412 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
413 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
414 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
415 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
416 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
417 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
418 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
419 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
420 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
421 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
422 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
423 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
424 {
425 /** @todo fix this mess */
426 }
427 /* check limit. */
428 if ((RTGCUINTPTR)Addr > u32Limit)
429 return VERR_OUT_OF_SELECTOR_BOUNDS;
430 /* ok */
431 if (ppvGC)
432 *ppvGC = pvFlat;
433 return VINF_SUCCESS;
434
435 default:
436 return VERR_INVALID_SELECTOR;
437
438 }
439# undef BOTH
440 }
441 }
442#endif /* !IN_RING0 */
443 return VERR_SELECTOR_NOT_PRESENT;
444}
445
446
447#ifndef IN_RING0
448/**
449 * Converts a GC selector based address to a flat address.
450 *
451 * Some basic checking is done, but not all kinds yet.
452 *
453 * @returns VBox status
454 * @param pVM VM Handle.
455 * @param eflags Current eflags
456 * @param Sel Selector part.
457 * @param Addr Address part.
458 * @param pHiddenSel Hidden selector register (can be NULL)
459 * @param fFlags SELMTOFLAT_FLAGS_*
460 * GDT entires are valid.
461 * @param ppvGC Where to store the GC flat address.
462 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
463 * the selector. NULL is allowed.
464 * @remarks Don't use when in long mode.
465 */
466VMMDECL(int) SELMToFlatBySelEx(PVM pVM, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, CPUMSELREGHID *pHiddenSel, unsigned fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
467{
468 PVMCPU pVCpu = VMMGetCpu(pVM);
469
470 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! */
471
472 /*
473 * Deal with real & v86 mode first.
474 */
475 if ( CPUMIsGuestInRealMode(pVCpu)
476 || eflags.Bits.u1VM)
477 {
478 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
479 if (ppvGC)
480 {
481 if ( pHiddenSel
482 && CPUMAreHiddenSelRegsValid(pVM))
483 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
484 else
485 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
486 }
487 if (pcb)
488 *pcb = 0x10000 - uFlat;
489 return VINF_SUCCESS;
490 }
491
492
493 uint32_t u32Limit;
494 RTGCPTR pvFlat;
495 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
496
497 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
498 if ( pHiddenSel
499 && CPUMAreHiddenSelRegsValid(pVM))
500 {
501 u1Present = pHiddenSel->Attr.n.u1Present;
502 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
503 u1DescType = pHiddenSel->Attr.n.u1DescType;
504 u4Type = pHiddenSel->Attr.n.u4Type;
505
506 u32Limit = pHiddenSel->u32Limit;
507 pvFlat = (RTGCPTR)(pHiddenSel->u64Base + (RTGCUINTPTR)Addr);
508
509 if ( !CPUMIsGuestInLongMode(pVCpu)
510 || !pHiddenSel->Attr.n.u1Long)
511 {
512 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
513 pvFlat &= 0xffffffff;
514 }
515 }
516 else
517 {
518 X86DESC Desc;
519
520 if (!(Sel & X86_SEL_LDT))
521 {
522 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
523 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
524 return VERR_INVALID_SELECTOR;
525 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
526 }
527 else
528 {
529 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
530 return VERR_INVALID_SELECTOR;
531
532 /** @todo handle LDT page(s) not present! */
533 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
534 Desc = paLDT[Sel >> X86_SEL_SHIFT];
535 }
536
537 /* calc limit. */
538 u32Limit = X86DESC_LIMIT(Desc);
539 if (Desc.Gen.u1Granularity)
540 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
541
542 /* calc address assuming straight stuff. */
543 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
544
545 /* Cut the address to 32 bits. */
546 Assert(!CPUMIsGuestInLongMode(pVCpu))
547 pvFlat &= 0xffffffff;
548
549 u1Present = Desc.Gen.u1Present;
550 u1Granularity = Desc.Gen.u1Granularity;
551 u1DescType = Desc.Gen.u1DescType;
552 u4Type = Desc.Gen.u4Type;
553 }
554
555 /*
556 * Check if present.
557 */
558 if (u1Present)
559 {
560 /*
561 * Type check.
562 */
563#define BOTH(a, b) ((a << 16) | b)
564 switch (BOTH(u1DescType, u4Type))
565 {
566
567 /** Read only selector type. */
568 case BOTH(1,X86_SEL_TYPE_RO):
569 case BOTH(1,X86_SEL_TYPE_RO_ACC):
570 case BOTH(1,X86_SEL_TYPE_RW):
571 case BOTH(1,X86_SEL_TYPE_RW_ACC):
572 case BOTH(1,X86_SEL_TYPE_EO):
573 case BOTH(1,X86_SEL_TYPE_EO_ACC):
574 case BOTH(1,X86_SEL_TYPE_ER):
575 case BOTH(1,X86_SEL_TYPE_ER_ACC):
576 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
577 {
578 /** @todo fix this mess */
579 }
580 /* check limit. */
581 if ((RTGCUINTPTR)Addr > u32Limit)
582 return VERR_OUT_OF_SELECTOR_BOUNDS;
583 /* ok */
584 if (ppvGC)
585 *ppvGC = pvFlat;
586 if (pcb)
587 *pcb = u32Limit - (uint32_t)Addr + 1;
588 return VINF_SUCCESS;
589
590 case BOTH(1,X86_SEL_TYPE_EO_CONF):
591 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
592 case BOTH(1,X86_SEL_TYPE_ER_CONF):
593 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
594 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
595 {
596 /** @todo fix this mess */
597 }
598 /* check limit. */
599 if ((RTGCUINTPTR)Addr > u32Limit)
600 return VERR_OUT_OF_SELECTOR_BOUNDS;
601 /* ok */
602 if (ppvGC)
603 *ppvGC = pvFlat;
604 if (pcb)
605 *pcb = u32Limit - (uint32_t)Addr + 1;
606 return VINF_SUCCESS;
607
608 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
609 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
610 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
611 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
612 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
613 {
614 /** @todo fix this mess */
615 }
616 /* check limit. */
617 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
618 return VERR_OUT_OF_SELECTOR_BOUNDS;
619 if ((RTGCUINTPTR)Addr <= u32Limit)
620 return VERR_OUT_OF_SELECTOR_BOUNDS;
621
622 /* ok */
623 if (ppvGC)
624 *ppvGC = pvFlat;
625 if (pcb)
626 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
627 return VINF_SUCCESS;
628
629 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
630 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
631 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
632 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
633 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
634 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
635 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
636 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
637 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
638 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
639 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
640 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
641 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
642 {
643 /** @todo fix this mess */
644 }
645 /* check limit. */
646 if ((RTGCUINTPTR)Addr > u32Limit)
647 return VERR_OUT_OF_SELECTOR_BOUNDS;
648 /* ok */
649 if (ppvGC)
650 *ppvGC = pvFlat;
651 if (pcb)
652 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
653 return VINF_SUCCESS;
654
655 default:
656 return VERR_INVALID_SELECTOR;
657
658 }
659#undef BOTH
660 }
661 return VERR_SELECTOR_NOT_PRESENT;
662}
663#endif /* !IN_RING0 */
664
665
666/**
667 * Validates and converts a GC selector based code address to a flat
668 * address when in real or v8086 mode.
669 *
670 * @returns VINF_SUCCESS.
671 * @param pVM VM Handle.
672 * @param SelCS Selector part.
673 * @param pHidCS The hidden CS register part. Optional.
674 * @param Addr Address part.
675 * @param ppvFlat Where to store the flat address.
676 */
677DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVM pVM, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat)
678{
679 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
680 if (!pHidCS || !CPUMAreHiddenSelRegsValid(pVM))
681 uFlat += ((RTGCUINTPTR)SelCS << 4);
682 else
683 uFlat += pHidCS->u64Base;
684 *ppvFlat = (RTGCPTR)uFlat;
685 return VINF_SUCCESS;
686}
687
688
689#ifndef IN_RING0
690/**
691 * Validates and converts a GC selector based code address to a flat
692 * address when in protected/long mode using the standard algorithm.
693 *
694 * @returns VBox status code.
695 * @param pVM VM Handle.
696 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
697 * A full selector can be passed, we'll only use the RPL part.
698 * @param SelCS Selector part.
699 * @param Addr Address part.
700 * @param ppvFlat Where to store the flat address.
701 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
702 */
703DECLINLINE(int) selmValidateAndConvertCSAddrStd(PVM pVM, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
704{
705 Assert(!CPUMAreHiddenSelRegsValid(pVM));
706
707 /** @todo validate limit! */
708 X86DESC Desc;
709 if (!(SelCS & X86_SEL_LDT))
710 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
711 else
712 {
713 /** @todo handle LDT page(s) not present! */
714 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
715 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
716 }
717
718 /*
719 * Check if present.
720 */
721 if (Desc.Gen.u1Present)
722 {
723 /*
724 * Type check.
725 */
726 if ( Desc.Gen.u1DescType == 1
727 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
728 {
729 /*
730 * Check level.
731 */
732 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
733 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
734 ? uLevel <= Desc.Gen.u2Dpl
735 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
736 )
737 {
738 /*
739 * Limit check.
740 */
741 uint32_t u32Limit = X86DESC_LIMIT(Desc);
742 if (Desc.Gen.u1Granularity)
743 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
744 if ((RTGCUINTPTR)Addr <= u32Limit)
745 {
746 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
747 /* Cut the address to 32 bits. */
748 Assert(!CPUMIsGuestInLongMode(pVCpu))
749 *ppvFlat &= 0xffffffff;
750
751 if (pcBits)
752 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
753 return VINF_SUCCESS;
754 }
755 return VERR_OUT_OF_SELECTOR_BOUNDS;
756 }
757 return VERR_INVALID_RPL;
758 }
759 return VERR_NOT_CODE_SELECTOR;
760 }
761 return VERR_SELECTOR_NOT_PRESENT;
762}
763#endif /* !IN_RING0 */
764
765
766/**
767 * Validates and converts a GC selector based code address to a flat
768 * address when in protected/long mode using the standard algorithm.
769 *
770 * @returns VBox status code.
771 * @param pVCpu VMCPU Handle.
772 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
773 * A full selector can be passed, we'll only use the RPL part.
774 * @param SelCS Selector part.
775 * @param Addr Address part.
776 * @param ppvFlat Where to store the flat address.
777 */
778DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat)
779{
780 /*
781 * Check if present.
782 */
783 if (pHidCS->Attr.n.u1Present)
784 {
785 /*
786 * Type check.
787 */
788 if ( pHidCS->Attr.n.u1DescType == 1
789 && (pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
790 {
791 /*
792 * Check level.
793 */
794 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
795 if ( !(pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
796 ? uLevel <= pHidCS->Attr.n.u2Dpl
797 : uLevel >= pHidCS->Attr.n.u2Dpl /* hope I got this right now... */
798 )
799 {
800 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
801 if ( CPUMIsGuestInLongMode(pVCpu)
802 && pHidCS->Attr.n.u1Long)
803 {
804 *ppvFlat = Addr;
805 return VINF_SUCCESS;
806 }
807
808 /*
809 * Limit check. Note that the limit in the hidden register is the
810 * final value. The granularity bit was included in its calculation.
811 */
812 uint32_t u32Limit = pHidCS->u32Limit;
813 if ((RTGCUINTPTR)Addr <= u32Limit)
814 {
815 *ppvFlat = (RTGCPTR)( (RTGCUINTPTR)Addr + pHidCS->u64Base );
816 return VINF_SUCCESS;
817 }
818 return VERR_OUT_OF_SELECTOR_BOUNDS;
819 }
820 Log(("Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n", pHidCS->Attr.n.u4Type, uLevel, pHidCS->Attr.n.u2Dpl));
821 return VERR_INVALID_RPL;
822 }
823 return VERR_NOT_CODE_SELECTOR;
824 }
825 return VERR_SELECTOR_NOT_PRESENT;
826}
827
828
829#ifdef IN_RC
830/**
831 * Validates and converts a GC selector based code address to a flat address.
832 *
833 * This is like SELMValidateAndConvertCSAddr + SELMIsSelector32Bit but with
834 * invalid hidden CS data. It's customized for dealing efficiently with CS
835 * at GC trap time.
836 *
837 * @returns VBox status code.
838 * @param pVM VM Handle.
839 * @param eflags Current eflags
840 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
841 * A full selector can be passed, we'll only use the RPL part.
842 * @param SelCS Selector part.
843 * @param Addr Address part.
844 * @param ppvFlat Where to store the flat address.
845 * @param pcBits Where to store the 64-bit/32-bit/16-bit indicator.
846 */
847VMMDECL(int) SELMValidateAndConvertCSAddrGCTrap(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
848{
849 Assert(pVM->cCpus == 1);
850 PVMCPU pVCpu = &pVM->aCpus[0];
851
852 if ( CPUMIsGuestInRealMode(pVCpu)
853 || eflags.Bits.u1VM)
854 {
855 *pcBits = 16;
856 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, NULL, Addr, ppvFlat);
857 }
858 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, pcBits);
859}
860#endif /* IN_RC */
861
862
863/**
864 * Validates and converts a GC selector based code address to a flat address.
865 *
866 * @returns VBox status code.
867 * @param pVM VM Handle.
868 * @param eflags Current eflags
869 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
870 * A full selector can be passed, we'll only use the RPL part.
871 * @param SelCS Selector part.
872 * @param pHiddenSel The hidden CS selector register.
873 * @param Addr Address part.
874 * @param ppvFlat Where to store the flat address.
875 */
876VMMDECL(int) SELMValidateAndConvertCSAddr(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, CPUMSELREGHID *pHiddenCSSel, RTGCPTR Addr, PRTGCPTR ppvFlat)
877{
878 PVMCPU pVCpu = VMMGetCpu(pVM);
879
880 if ( CPUMIsGuestInRealMode(pVCpu)
881 || eflags.Bits.u1VM)
882 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, pHiddenCSSel, Addr, ppvFlat);
883
884#ifdef IN_RING0
885 Assert(CPUMAreHiddenSelRegsValid(pVM));
886#else
887 /** @todo when we're in 16 bits mode, we should cut off the address as well? (like in selmValidateAndConvertCSAddrRealMode) */
888 if (!CPUMAreHiddenSelRegsValid(pVM))
889 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, NULL);
890#endif
891 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pHiddenCSSel, Addr, ppvFlat);
892}
893
894
895#ifndef IN_RING0
896/**
897 * Return the cpu mode corresponding to the (CS) selector
898 *
899 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
900 * @param pVM VM Handle.
901 * @param Sel The selector.
902 */
903static DISCPUMODE selmGetCpuModeFromSelector(PVM pVM, RTSEL Sel)
904{
905 Assert(!CPUMAreHiddenSelRegsValid(pVM));
906
907 /** @todo validate limit! */
908 X86DESC Desc;
909 if (!(Sel & X86_SEL_LDT))
910 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
911 else
912 {
913 /** @todo handle LDT page(s) not present! */
914 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
915 Desc = paLDT[Sel >> X86_SEL_SHIFT];
916 }
917 return (Desc.Gen.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT;
918}
919#endif /* !IN_RING0 */
920
921
922/**
923 * Return the cpu mode corresponding to the (CS) selector
924 *
925 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
926 * @param pVM VM Handle.
927 * @param eflags Current eflags register
928 * @param Sel The selector.
929 * @param pHiddenSel The hidden selector register.
930 */
931VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVM pVM, X86EFLAGS eflags, RTSEL Sel, CPUMSELREGHID *pHiddenSel)
932{
933 PVMCPU pVCpu = VMMGetCpu(pVM);
934#ifdef IN_RING0
935 Assert(CPUMAreHiddenSelRegsValid(pVM));
936#else /* !IN_RING0 */
937 if (!CPUMAreHiddenSelRegsValid(pVM))
938 {
939 /*
940 * Deal with real & v86 mode first.
941 */
942 if ( CPUMIsGuestInRealMode(pVCpu)
943 || eflags.Bits.u1VM)
944 return CPUMODE_16BIT;
945
946 return selmGetCpuModeFromSelector(pVM, Sel);
947 }
948#endif /* !IN_RING0 */
949 if ( CPUMIsGuestInLongMode(pVCpu)
950 && pHiddenSel->Attr.n.u1Long)
951 return CPUMODE_64BIT;
952
953 /* Else compatibility or 32 bits mode. */
954 return (pHiddenSel->Attr.n.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT;
955}
956
957
958/**
959 * Returns Hypervisor's Trap 08 (\#DF) selector.
960 *
961 * @returns Hypervisor's Trap 08 (\#DF) selector.
962 * @param pVM VM Handle.
963 */
964VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
965{
966 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
967}
968
969
970/**
971 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
972 *
973 * @param pVM VM Handle.
974 * @param u32EIP EIP of Trap 08 handler.
975 */
976VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
977{
978 pVM->selm.s.TssTrap08.eip = u32EIP;
979}
980
981
982/**
983 * Sets ss:esp for ring1 in main Hypervisor's TSS.
984 *
985 * @param pVM VM Handle.
986 * @param ss Ring1 SS register value. Pass 0 if invalid.
987 * @param esp Ring1 ESP register value.
988 */
989void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
990{
991 Assert((ss & 1) || esp == 0);
992 pVM->selm.s.Tss.ss1 = ss;
993 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
994}
995
996
997#ifndef IN_RING0
998/**
999 * Gets ss:esp for ring1 in main Hypervisor's TSS.
1000 *
1001 * Returns SS=0 if the ring-1 stack isn't valid.
1002 *
1003 * @returns VBox status code.
1004 * @param pVM VM Handle.
1005 * @param pSS Ring1 SS register value.
1006 * @param pEsp Ring1 ESP register value.
1007 */
1008VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
1009{
1010 Assert(pVM->cCpus == 1);
1011 PVMCPU pVCpu = &pVM->aCpus[0];
1012
1013 if (pVM->selm.s.fSyncTSSRing0Stack)
1014 {
1015 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
1016 int rc;
1017 VBOXTSS tss;
1018
1019 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
1020
1021# ifdef IN_RC
1022 bool fTriedAlready = false;
1023
1024l_tryagain:
1025 rc = MMGCRamRead(pVM, &tss.ss0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, ss0)), sizeof(tss.ss0));
1026 rc |= MMGCRamRead(pVM, &tss.esp0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, esp0)), sizeof(tss.esp0));
1027# ifdef DEBUG
1028 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, offIoBitmap)), sizeof(tss.offIoBitmap));
1029# endif
1030
1031 if (RT_FAILURE(rc))
1032 {
1033 if (!fTriedAlready)
1034 {
1035 /* Shadow page might be out of sync. Sync and try again */
1036 /** @todo might cross page boundary */
1037 fTriedAlready = true;
1038 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
1039 if (rc != VINF_SUCCESS)
1040 return rc;
1041 goto l_tryagain;
1042 }
1043 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1044 return rc;
1045 }
1046
1047# else /* !IN_RC */
1048 /* Reading too much. Could be cheaper than two seperate calls though. */
1049 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
1050 if (RT_FAILURE(rc))
1051 {
1052 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1053 return rc;
1054 }
1055# endif /* !IN_RC */
1056
1057# ifdef LOG_ENABLED
1058 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
1059 uint32_t espr0 = pVM->selm.s.Tss.esp1;
1060 ssr0 &= ~1;
1061
1062 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1063 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1064
1065 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1066# endif
1067 /* Update our TSS structure for the guest's ring 1 stack */
1068 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
1069 pVM->selm.s.fSyncTSSRing0Stack = false;
1070 }
1071
1072 *pSS = pVM->selm.s.Tss.ss1;
1073 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
1074
1075 return VINF_SUCCESS;
1076}
1077#endif /* !IN_RING0 */
1078
1079
1080/**
1081 * Returns Guest TSS pointer
1082 *
1083 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
1084 * @param pVM VM Handle.
1085 */
1086VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
1087{
1088 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
1089}
1090
1091
1092#ifndef IN_RING0
1093
1094/**
1095 * Gets the hypervisor code selector (CS).
1096 * @returns CS selector.
1097 * @param pVM The VM handle.
1098 */
1099VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
1100{
1101 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
1102}
1103
1104
1105/**
1106 * Gets the 64-mode hypervisor code selector (CS64).
1107 * @returns CS selector.
1108 * @param pVM The VM handle.
1109 */
1110VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
1111{
1112 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
1113}
1114
1115
1116/**
1117 * Gets the hypervisor data selector (DS).
1118 * @returns DS selector.
1119 * @param pVM The VM handle.
1120 */
1121VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
1122{
1123 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
1124}
1125
1126
1127/**
1128 * Gets the hypervisor TSS selector.
1129 * @returns TSS selector.
1130 * @param pVM The VM handle.
1131 */
1132VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1133{
1134 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1135}
1136
1137
1138/**
1139 * Gets the hypervisor TSS Trap 8 selector.
1140 * @returns TSS Trap 8 selector.
1141 * @param pVM The VM handle.
1142 */
1143VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1144{
1145 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1146}
1147
1148/**
1149 * Gets the address for the hypervisor GDT.
1150 *
1151 * @returns The GDT address.
1152 * @param pVM The VM handle.
1153 * @remark This is intended only for very special use, like in the world
1154 * switchers. Don't exploit this API!
1155 */
1156VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1157{
1158 /*
1159 * Always convert this from the HC pointer since we can be
1160 * called before the first relocation and have to work correctly
1161 * without having dependencies on the relocation order.
1162 */
1163 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1164}
1165
1166#endif /* !IN_RING0 */
1167
1168/**
1169 * Gets info about the current TSS.
1170 *
1171 * @returns VBox status code.
1172 * @retval VINF_SUCCESS if we've got a TSS loaded.
1173 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1174 *
1175 * @param pVM The VM handle.
1176 * @param pVCpu VMCPU Handle.
1177 * @param pGCPtrTss Where to store the TSS address.
1178 * @param pcbTss Where to store the TSS size limit.
1179 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1180 */
1181VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1182{
1183 /*
1184 * The TR hidden register is always valid.
1185 */
1186 CPUMSELREGHID trHid;
1187 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1188 if (!(tr & X86_SEL_MASK))
1189 return VERR_SELM_NO_TSS;
1190
1191 *pGCPtrTss = trHid.u64Base;
1192 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1193 if (pfCanHaveIOBitmap)
1194 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1195 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1196 return VINF_SUCCESS;
1197}
1198
1199
1200
1201/**
1202 * Notification callback which is called whenever there is a chance that a CR3
1203 * value might have changed.
1204 * This is called by PGM.
1205 *
1206 * @param pVM The VM handle
1207 * @param pVCpu The VMCPU handle
1208 */
1209VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1210{
1211 /** @todo SMP support!! */
1212 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1213 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1214}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette