VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 28.3 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** Maximum number of bytes we try to lock down in one go.
47 * This is supposed to have a limit right below 256MB, but this appears
48 * to actually be much lower. The values here have been determined experimentally.
49 */
50#ifdef RT_ARCH_X86
51# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
52#endif
53#ifdef RT_ARCH_AMD64
54# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * The NT version of the memory object structure.
63 */
64typedef struct RTR0MEMOBJNT
65{
66 /** The core structure. */
67 RTR0MEMOBJINTERNAL Core;
68#ifndef IPRT_TARGET_NT4
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71#endif
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
82{
83 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
84
85 /*
86 * Deal with it on a per type basis (just as a variation).
87 */
88 switch (pMemNt->Core.enmType)
89 {
90 case RTR0MEMOBJTYPE_LOW:
91#ifndef IPRT_TARGET_NT4
92 if (pMemNt->fAllocatedPagesForMdl)
93 {
94 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
95 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
96 pMemNt->Core.pv = NULL;
97 if (pMemNt->pvSecureMem)
98 {
99 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
100 pMemNt->pvSecureMem = NULL;
101 }
102
103 MmFreePagesFromMdl(pMemNt->apMdls[0]);
104 ExFreePool(pMemNt->apMdls[0]);
105 pMemNt->apMdls[0] = NULL;
106 pMemNt->cMdls = 0;
107 break;
108 }
109#endif
110 AssertFailed();
111 break;
112
113 case RTR0MEMOBJTYPE_PAGE:
114 Assert(pMemNt->Core.pv);
115 ExFreePool(pMemNt->Core.pv);
116 pMemNt->Core.pv = NULL;
117
118 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
119 IoFreeMdl(pMemNt->apMdls[0]);
120 pMemNt->apMdls[0] = NULL;
121 pMemNt->cMdls = 0;
122 break;
123
124 case RTR0MEMOBJTYPE_CONT:
125 Assert(pMemNt->Core.pv);
126 MmFreeContiguousMemory(pMemNt->Core.pv);
127 pMemNt->Core.pv = NULL;
128
129 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
130 IoFreeMdl(pMemNt->apMdls[0]);
131 pMemNt->apMdls[0] = NULL;
132 pMemNt->cMdls = 0;
133 break;
134
135 case RTR0MEMOBJTYPE_PHYS:
136 case RTR0MEMOBJTYPE_PHYS_NC:
137#ifndef IPRT_TARGET_NT4
138 if (pMemNt->fAllocatedPagesForMdl)
139 {
140 MmFreePagesFromMdl(pMemNt->apMdls[0]);
141 ExFreePool(pMemNt->apMdls[0]);
142 pMemNt->apMdls[0] = NULL;
143 pMemNt->cMdls = 0;
144 break;
145 }
146#endif
147 AssertFailed();
148 break;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 if (pMemNt->pvSecureMem)
152 {
153 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
154 pMemNt->pvSecureMem = NULL;
155 }
156 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
157 {
158 MmUnlockPages(pMemNt->apMdls[i]);
159 IoFreeMdl(pMemNt->apMdls[i]);
160 pMemNt->apMdls[i] = NULL;
161 }
162 break;
163
164 case RTR0MEMOBJTYPE_RES_VIRT:
165/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
166 {
167 }
168 else
169 {
170 }*/
171 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
172 return VERR_INTERNAL_ERROR;
173 break;
174
175 case RTR0MEMOBJTYPE_MAPPING:
176 {
177 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
178 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
179 Assert(pMemNtParent);
180 if (pMemNtParent->cMdls)
181 {
182 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
183 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
184 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
185 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
186 }
187 else
188 {
189 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
190 && !pMemNtParent->Core.u.Phys.fAllocated);
191 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
192 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
193 }
194 pMemNt->Core.pv = NULL;
195 break;
196 }
197
198 default:
199 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
200 return VERR_INTERNAL_ERROR;
201 }
202
203 return VINF_SUCCESS;
204}
205
206
207int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
208{
209 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
210
211 /*
212 * Try allocate the memory and create an MDL for them so
213 * we can query the physical addresses and do mappings later
214 * without running into out-of-memory conditions and similar problems.
215 */
216 int rc = VERR_NO_PAGE_MEMORY;
217 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
218 if (pv)
219 {
220 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
221 if (pMdl)
222 {
223 MmBuildMdlForNonPagedPool(pMdl);
224#ifdef RT_ARCH_AMD64
225 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
226#endif
227
228 /*
229 * Create the IPRT memory object.
230 */
231 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
232 if (pMemNt)
233 {
234 pMemNt->cMdls = 1;
235 pMemNt->apMdls[0] = pMdl;
236 *ppMem = &pMemNt->Core;
237 return VINF_SUCCESS;
238 }
239
240 rc = VERR_NO_MEMORY;
241 IoFreeMdl(pMdl);
242 }
243 ExFreePool(pv);
244 }
245 return rc;
246}
247
248
249int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
250{
251 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
252
253 /*
254 * Try see if we get lucky first...
255 * (We could probably just assume we're lucky on NT4.)
256 */
257 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
258 if (RT_SUCCESS(rc))
259 {
260 size_t iPage = cb >> PAGE_SHIFT;
261 while (iPage-- > 0)
262 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
263 {
264 rc = VERR_NO_MEMORY;
265 break;
266 }
267 if (RT_SUCCESS(rc))
268 return rc;
269
270 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
271 RTR0MemObjFree(*ppMem, false);
272 *ppMem = NULL;
273 }
274
275#ifndef IPRT_TARGET_NT4
276 /*
277 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
278 */
279 PHYSICAL_ADDRESS Zero;
280 Zero.QuadPart = 0;
281 PHYSICAL_ADDRESS HighAddr;
282 HighAddr.QuadPart = _4G - 1;
283 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
284 if (pMdl)
285 {
286 if (MmGetMdlByteCount(pMdl) >= cb)
287 {
288 __try
289 {
290 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
291 FALSE /* no bug check on failure */, NormalPagePriority);
292 if (pv)
293 {
294 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
295 if (pMemNt)
296 {
297 pMemNt->fAllocatedPagesForMdl = true;
298 pMemNt->cMdls = 1;
299 pMemNt->apMdls[0] = pMdl;
300 *ppMem = &pMemNt->Core;
301 return VINF_SUCCESS;
302 }
303 MmUnmapLockedPages(pv, pMdl);
304 }
305 }
306 __except(EXCEPTION_EXECUTE_HANDLER)
307 {
308 NTSTATUS rcNt = GetExceptionCode();
309 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
310 /* nothing */
311 }
312 }
313 MmFreePagesFromMdl(pMdl);
314 ExFreePool(pMdl);
315 }
316#endif /* !IPRT_TARGET_NT4 */
317
318 /*
319 * Fall back on contiguous memory...
320 */
321 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
322}
323
324
325/**
326 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
327 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
328 * to what rtR0MemObjNativeAllocCont() does.
329 *
330 * @returns IPRT status code.
331 * @param ppMem Where to store the pointer to the ring-0 memory object.
332 * @param cb The size.
333 * @param fExecutable Whether the mapping should be executable or not.
334 * @param PhysHighest The highest physical address for the pages in allocation.
335 * @param uAlignment The alignment of the physical memory to allocate.
336 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
337 */
338static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
339 size_t uAlignment)
340{
341 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
342#ifdef TARGET_NT4
343 if (uAlignment != PAGE_SIZE)
344 return VERR_NOT_SUPPORTED;
345#endif
346
347 /*
348 * Allocate the memory and create an MDL for it.
349 */
350 PHYSICAL_ADDRESS PhysAddrHighest;
351 PhysAddrHighest.QuadPart = PhysHighest;
352#ifndef TARGET_NT4
353 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
354 PhysAddrLowest.QuadPart = 0;
355 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
356 void *pv = MmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
357#else
358 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
359#endif
360 if (!pv)
361 return VERR_NO_MEMORY;
362
363 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
364 if (pMdl)
365 {
366 MmBuildMdlForNonPagedPool(pMdl);
367#ifdef RT_ARCH_AMD64
368 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
369#endif
370
371 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
372 if (pMemNt)
373 {
374 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
375 pMemNt->cMdls = 1;
376 pMemNt->apMdls[0] = pMdl;
377 *ppMem = &pMemNt->Core;
378 return VINF_SUCCESS;
379 }
380
381 IoFreeMdl(pMdl);
382 }
383 MmFreeContiguousMemory(pv);
384 return VERR_NO_MEMORY;
385}
386
387
388int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
389{
390 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
391}
392
393
394int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
395{
396#ifndef IPRT_TARGET_NT4
397 /*
398 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
399 *
400 * This is preferable to using MmAllocateContiguousMemory because there are
401 * a few situations where the memory shouldn't be mapped, like for instance
402 * VT-x control memory. Since these are rather small allocations (one or
403 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
404 * request.
405 *
406 * If the allocation is big, the chances are *probably* not very good. The
407 * current limit is kind of random...
408 */
409 if ( cb < _128K
410 && uAlignment == PAGE_SIZE)
411
412 {
413 PHYSICAL_ADDRESS Zero;
414 Zero.QuadPart = 0;
415 PHYSICAL_ADDRESS HighAddr;
416 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
417 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
418 if (pMdl)
419 {
420 if (MmGetMdlByteCount(pMdl) >= cb)
421 {
422 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
423 PFN_NUMBER Pfn = paPfns[0] + 1;
424 const size_t cPages = cb >> PAGE_SHIFT;
425 size_t iPage;
426 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
427 if (paPfns[iPage] != Pfn)
428 break;
429 if (iPage >= cPages)
430 {
431 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
432 if (pMemNt)
433 {
434 pMemNt->Core.u.Phys.fAllocated = true;
435 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
436 pMemNt->fAllocatedPagesForMdl = true;
437 pMemNt->cMdls = 1;
438 pMemNt->apMdls[0] = pMdl;
439 *ppMem = &pMemNt->Core;
440 return VINF_SUCCESS;
441 }
442 }
443 }
444 MmFreePagesFromMdl(pMdl);
445 ExFreePool(pMdl);
446 }
447 }
448#endif /* !IPRT_TARGET_NT4 */
449
450 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
451}
452
453
454int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
455{
456#ifndef IPRT_TARGET_NT4
457 PHYSICAL_ADDRESS Zero;
458 Zero.QuadPart = 0;
459 PHYSICAL_ADDRESS HighAddr;
460 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
461 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
462 if (pMdl)
463 {
464 if (MmGetMdlByteCount(pMdl) >= cb)
465 {
466 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
467 if (pMemNt)
468 {
469 pMemNt->fAllocatedPagesForMdl = true;
470 pMemNt->cMdls = 1;
471 pMemNt->apMdls[0] = pMdl;
472 *ppMem = &pMemNt->Core;
473 return VINF_SUCCESS;
474 }
475 }
476 MmFreePagesFromMdl(pMdl);
477 ExFreePool(pMdl);
478 }
479 return VERR_NO_MEMORY;
480#else /* IPRT_TARGET_NT4 */
481 return VERR_NOT_SUPPORTED;
482#endif /* IPRT_TARGET_NT4 */
483}
484
485
486int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, unsigned CachePolicy)
487{
488 AssertReturn(CachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_IMPLEMENTED);
489
490 /*
491 * Validate the address range and create a descriptor for it.
492 */
493 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
494 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
495 return VERR_ADDRESS_TOO_BIG;
496
497 /*
498 * Create the IPRT memory object.
499 */
500 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
501 if (pMemNt)
502 {
503 pMemNt->Core.u.Phys.PhysBase = Phys;
504 pMemNt->Core.u.Phys.fAllocated = false;
505 *ppMem = &pMemNt->Core;
506 return VINF_SUCCESS;
507 }
508 return VERR_NO_MEMORY;
509}
510
511
512/**
513 * Internal worker for locking down pages.
514 *
515 * @return IPRT status code.
516 *
517 * @param ppMem Where to store the memory object pointer.
518 * @param pv First page.
519 * @param cb Number of bytes.
520 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
521 * and RTMEM_PROT_WRITE.
522 * @param R0Process The process \a pv and \a cb refers to.
523 */
524static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
525{
526 /*
527 * Calc the number of MDLs we need and allocate the memory object structure.
528 */
529 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
530 if (cb % MAX_LOCK_MEM_SIZE)
531 cMdls++;
532 if (cMdls >= UINT32_MAX)
533 return VERR_OUT_OF_RANGE;
534 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
535 RTR0MEMOBJTYPE_LOCK, pv, cb);
536 if (!pMemNt)
537 return VERR_NO_MEMORY;
538
539 /*
540 * Loop locking down the sub parts of the memory.
541 */
542 int rc = VINF_SUCCESS;
543 size_t cbTotal = 0;
544 uint8_t *pb = (uint8_t *)pv;
545 uint32_t iMdl;
546 for (iMdl = 0; iMdl < cMdls; iMdl++)
547 {
548 /*
549 * Calc the Mdl size and allocate it.
550 */
551 size_t cbCur = cb - cbTotal;
552 if (cbCur > MAX_LOCK_MEM_SIZE)
553 cbCur = MAX_LOCK_MEM_SIZE;
554 AssertMsg(cbCur, ("cbCur: 0!\n"));
555 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
556 if (!pMdl)
557 {
558 rc = VERR_NO_MEMORY;
559 break;
560 }
561
562 /*
563 * Lock the pages.
564 */
565 __try
566 {
567 MmProbeAndLockPages(pMdl,
568 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
569 fAccess == RTMEM_PROT_READ
570 ? IoReadAccess
571 : fAccess == RTMEM_PROT_WRITE
572 ? IoWriteAccess
573 : IoModifyAccess);
574
575 pMemNt->apMdls[iMdl] = pMdl;
576 pMemNt->cMdls++;
577 }
578 __except(EXCEPTION_EXECUTE_HANDLER)
579 {
580 IoFreeMdl(pMdl);
581 rc = VERR_LOCK_FAILED;
582 break;
583 }
584
585 if (R0Process != NIL_RTR0PROCESS)
586 {
587 /* Make sure the user process can't change the allocation. */
588 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
589 fAccess & RTMEM_PROT_WRITE
590 ? PAGE_READWRITE
591 : PAGE_READONLY);
592 if (!pMemNt->pvSecureMem)
593 {
594 rc = VERR_NO_MEMORY;
595 break;
596 }
597 }
598
599 /* next */
600 cbTotal += cbCur;
601 pb += cbCur;
602 }
603 if (RT_SUCCESS(rc))
604 {
605 Assert(pMemNt->cMdls == cMdls);
606 pMemNt->Core.u.Lock.R0Process = R0Process;
607 *ppMem = &pMemNt->Core;
608 return rc;
609 }
610
611 /*
612 * We failed, perform cleanups.
613 */
614 while (iMdl-- > 0)
615 {
616 MmUnlockPages(pMemNt->apMdls[iMdl]);
617 IoFreeMdl(pMemNt->apMdls[iMdl]);
618 pMemNt->apMdls[iMdl] = NULL;
619 }
620 if (pMemNt->pvSecureMem)
621 {
622 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
623 pMemNt->pvSecureMem = NULL;
624 }
625
626 rtR0MemObjDelete(&pMemNt->Core);
627 return rc;
628}
629
630
631int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
632{
633 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
634 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
635 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
636}
637
638
639int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
640{
641 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
642}
643
644
645int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
646{
647 /*
648 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
649 */
650 return VERR_NOT_IMPLEMENTED;
651}
652
653
654int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
655{
656 /*
657 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
658 */
659 return VERR_NOT_IMPLEMENTED;
660}
661
662
663/**
664 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
665 *
666 * @returns IPRT status code.
667 * @param ppMem Where to store the memory object for the mapping.
668 * @param pMemToMap The memory object to map.
669 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
670 * @param uAlignment The alignment requirement for the mapping.
671 * @param fProt The desired page protection for the mapping.
672 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
673 * If not nil, it's the current process.
674 */
675static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
676 unsigned fProt, RTR0PROCESS R0Process)
677{
678 int rc = VERR_MAP_FAILED;
679
680 /*
681 * Check that the specified alignment is supported.
682 */
683 if (uAlignment > PAGE_SIZE)
684 return VERR_NOT_SUPPORTED;
685
686 /*
687 * There are two basic cases here, either we've got an MDL and can
688 * map it using MmMapLockedPages, or we've got a contiguous physical
689 * range (MMIO most likely) and can use MmMapIoSpace.
690 */
691 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
692 if (pMemNtToMap->cMdls)
693 {
694 /* don't attempt map locked regions with more than one mdl. */
695 if (pMemNtToMap->cMdls != 1)
696 return VERR_NOT_SUPPORTED;
697
698#ifdef IPRT_TARGET_NT4
699 /* NT SP0 can't map to a specific address. */
700 if (pvFixed != (void *)-1)
701 return VERR_NOT_SUPPORTED;
702#endif
703
704 /* we can't map anything to the first page, sorry. */
705 if (pvFixed == 0)
706 return VERR_NOT_SUPPORTED;
707
708 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
709 if ( pMemNtToMap->Core.uRel.Parent.cMappings
710 && R0Process == NIL_RTR0PROCESS)
711 return VERR_NOT_SUPPORTED;
712
713 __try
714 {
715 /** @todo uAlignment */
716 /** @todo How to set the protection on the pages? */
717#ifdef IPRT_TARGET_NT4
718 void *pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
719 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
720#else
721 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
722 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
723 MmCached,
724 pvFixed != (void *)-1 ? pvFixed : NULL,
725 FALSE /* no bug check on failure */,
726 NormalPagePriority);
727#endif
728 if (pv)
729 {
730 NOREF(fProt);
731
732 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
733 pMemNtToMap->Core.cb);
734 if (pMemNt)
735 {
736 pMemNt->Core.u.Mapping.R0Process = R0Process;
737 *ppMem = &pMemNt->Core;
738 return VINF_SUCCESS;
739 }
740
741 rc = VERR_NO_MEMORY;
742 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
743 }
744 }
745 __except(EXCEPTION_EXECUTE_HANDLER)
746 {
747 NTSTATUS rcNt = GetExceptionCode();
748 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
749
750 /* nothing */
751 rc = VERR_MAP_FAILED;
752 }
753
754 }
755 else
756 {
757 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
758 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
759
760 /* cannot map phys mem to user space (yet). */
761 if (R0Process != NIL_RTR0PROCESS)
762 return VERR_NOT_SUPPORTED;
763
764 /** @todo uAlignment */
765 /** @todo How to set the protection on the pages? */
766 PHYSICAL_ADDRESS Phys;
767 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
768 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
769 if (pv)
770 {
771 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
772 pMemNtToMap->Core.cb);
773 if (pMemNt)
774 {
775 pMemNt->Core.u.Mapping.R0Process = R0Process;
776 *ppMem = &pMemNt->Core;
777 return VINF_SUCCESS;
778 }
779
780 rc = VERR_NO_MEMORY;
781 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
782 }
783 }
784
785 NOREF(uAlignment); NOREF(fProt);
786 return rc;
787}
788
789
790int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
791 unsigned fProt, size_t offSub, size_t cbSub)
792{
793 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
794 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
795}
796
797
798int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
799{
800 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
801 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
802}
803
804
805int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
806{
807 NOREF(pMem);
808 NOREF(offSub);
809 NOREF(cbSub);
810 NOREF(fProt);
811 return VERR_NOT_SUPPORTED;
812}
813
814
815RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
816{
817 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
818
819 if (pMemNt->cMdls)
820 {
821 if (pMemNt->cMdls == 1)
822 {
823 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
824 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
825 }
826
827 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
828 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
829 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
830 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
831 }
832
833 switch (pMemNt->Core.enmType)
834 {
835 case RTR0MEMOBJTYPE_MAPPING:
836 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
837
838 case RTR0MEMOBJTYPE_PHYS:
839 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
840
841 case RTR0MEMOBJTYPE_PAGE:
842 case RTR0MEMOBJTYPE_PHYS_NC:
843 case RTR0MEMOBJTYPE_LOW:
844 case RTR0MEMOBJTYPE_CONT:
845 case RTR0MEMOBJTYPE_LOCK:
846 default:
847 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
848 case RTR0MEMOBJTYPE_RES_VIRT:
849 return NIL_RTHCPHYS;
850 }
851}
852
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette