--- trunk/reactos/ntoskrnl/mm/mdl.c 2005-01-07 15:36:40 UTC (rev 12870)
+++ trunk/reactos/ntoskrnl/mm/mdl.c 2005-01-07 16:10:48 UTC (rev 12871)
@@ -40,6 +40,7 @@
/* FUNCTIONS *****************************************************************/
+
/*
* @unimplemented
*/
@@ -54,6 +55,7 @@
return STATUS_NOT_IMPLEMENTED;
}
+
VOID INIT_FUNCTION
MmInitializeMdlImplementation(VOID)
{
@@ -92,6 +94,7 @@
KeInitializeSpinLock(&MiMdlMappingRegionLock);
}
+
PVOID
MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
{
@@ -170,154 +173,7 @@
}
-
/*
- * @implemented
- */
-PVOID STDCALL
-MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
-/*
- * FUNCTION: Maps the physical pages described by a given MDL
- * ARGUMENTS:
- * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool
- * or IoBuildPartialMdl.
- * AccessMode = Specifies the portion of the address space to map the
- * pages.
- * RETURNS: The base virtual address that maps the locked pages for the
- * range described by the MDL
- *
- * If mapping into user space, pages are mapped into current address space.
- */
-{
- PVOID Base;
- PULONG MdlPages;
- KIRQL oldIrql;
- ULONG PageCount;
- ULONG StartingOffset;
- PEPROCESS CurrentProcess;
- NTSTATUS Status;
-
- DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
-
- /* Calculate the number of pages required. */
- PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
-
- if (AccessMode == UserMode)
- {
- MEMORY_AREA *Result;
- LARGE_INTEGER BoundaryAddressMultiple;
- NTSTATUS Status;
-
- /* pretty sure you can't map partial mdl's to user space */
- ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
-
- BoundaryAddressMultiple.QuadPart = 0;
- Base = NULL;
-
- CurrentProcess = PsGetCurrentProcess();
-
- MmLockAddressSpace(&CurrentProcess->AddressSpace);
- Status = MmCreateMemoryArea(CurrentProcess,
- &CurrentProcess->AddressSpace,
- MEMORY_AREA_MDL_MAPPING,
- &Base,
- PageCount * PAGE_SIZE,
- 0, /* PAGE_READWRITE? */
- &Result,
- FALSE,
- FALSE,
- BoundaryAddressMultiple);
- MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
- if (!NT_SUCCESS(Status))
- {
- if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
- {
- return NULL;
- }
-
- KEBUGCHECK(0);
- /* FIXME: handle this? */
- }
-
- Mdl->Process = CurrentProcess;
- }
- else /* if (AccessMode == KernelMode) */
- {
- /* can't map mdl twice */
- ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
- /* can't map mdl buildt from non paged pool into kernel space */
- ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
-
- CurrentProcess = NULL;
-
- /* Allocate that number of pages from the mdl mapping region. */
- KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
-
- StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
-
- if (StartingOffset == 0xffffffff)
- {
- KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
-
- DPRINT1("Out of MDL mapping space\n");
-
- if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
- {
- return NULL;
- }
-
- KEBUGCHECK(0);
- }
-
- Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
-
- if (MiMdlMappingRegionHint == StartingOffset)
- {
- MiMdlMappingRegionHint += PageCount;
- }
-
- KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
- }
-
-
-
- /* Set the virtual mappings for the MDL pages. */
- MdlPages = (PULONG)(Mdl + 1);
-
- Status = MmCreateVirtualMapping(CurrentProcess,
- Base,
- PAGE_READWRITE,
- MdlPages,
- PageCount);
- if (!NT_SUCCESS(Status))
- {
- DbgPrint("Unable to create virtual mapping\n");
- if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
- {
- return NULL;
- }
- KEBUGCHECK(0);
- }
-
- /* Mark the MDL has having being mapped. */
- if (AccessMode == KernelMode)
- {
- if (Mdl->MdlFlags & MDL_PARTIAL)
- {
- Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
- }
- else
- {
- Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
- }
- Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
- }
-
- return((char*)Base + Mdl->ByteOffset);
-}
-
-
-/*
* @unimplemented
*/
PVOID
@@ -333,6 +189,7 @@
return 0;
}
+
/*
* @implemented
*/
@@ -423,6 +280,7 @@
}
+
/*
* @unimplemented
*/
@@ -447,6 +305,7 @@
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
}
+
/*
* @unimplemented
*/
@@ -461,6 +320,7 @@
return STATUS_NOT_IMPLEMENTED;
}
+
/*
* @unimplemented
*/
@@ -477,7 +337,7 @@
/*
- * @unimplemented
+ * @implemented
*/
VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
KPROCESSOR_MODE AccessMode,
@@ -603,6 +463,7 @@
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
}
+
/*
* @unimplemented
*/
@@ -618,6 +479,7 @@
UNIMPLEMENTED;
}
+
/*
* @unimplemented
*/
@@ -633,6 +495,7 @@
UNIMPLEMENTED;
}
+
/*
* @implemented
*/
@@ -732,6 +595,7 @@
return(Mdl);
}
+
/*
* @unimplemented
*/
@@ -745,8 +609,9 @@
UNIMPLEMENTED;
}
+
/*
- * @unimplemented
+ * @implemented
*/
PMDL STDCALL
MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
@@ -776,15 +641,61 @@
*/
+ PMDL Mdl;
+ PPFN_TYPE Pages;
+ ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
+ ULONG Ret;
+
+ DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
+ "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
+ LowAddress.QuadPart, HighAddress.QuadPart,
+ SkipBytes.QuadPart, Totalbytes);
+
/* SkipBytes must be a multiple of the page size */
ASSERT((SkipBytes.QuadPart % PAGE_SIZE) == 0);
-
- UNIMPLEMENTED;
- return(NULL);
+
+ /* Allocate memory for the MDL */
+ Mdl = MmCreateMdl(NULL, 0, Totalbytes);
+ if (Mdl == NULL)
+ {
+ return NULL;
+ }
+
+ /* Allocate pages into the MDL */
+ NumberOfPagesAllocated = 0;
+ NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
+ Pages = (PPFN_TYPE)(Mdl + 1);
+ while (NumberOfPagesWanted > 0)
+ {
+ Ret = MmAllocPagesSpecifyRange(
+ MC_NPPOOL,
+ LowAddress,
+ HighAddress,
+ NumberOfPagesWanted,
+ Pages + NumberOfPagesAllocated);
+ if (Ret == -1)
+ break;
+
+ NumberOfPagesAllocated += Ret;
+ NumberOfPagesWanted -= Ret;
+
+ if (SkipBytes.QuadPart == 0)
+ break;
+ LowAddress.QuadPart += SkipBytes.QuadPart;
+ HighAddress.QuadPart += SkipBytes.QuadPart;
+ }
+
+ if (NumberOfPagesAllocated == 0)
+ {
+ ExFreePool(Mdl);
+ Mdl = NULL;
+ }
+ return Mdl;
}
+
/*
- * @unimplemented
+ * @implemented
*/
VOID STDCALL
MmFreePagesFromMdl ( IN PMDL Mdl )
@@ -801,12 +712,21 @@
Konstantin Gusev
*/
-
- UNIMPLEMENTED;
+ PPFN_TYPE Pages;
+ LONG NumberOfPages;
+
+ NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
+ Pages = (PPFN_TYPE)(Mdl + 1);
+
+ while (--NumberOfPages >= 0)
+ {
+ MmDereferencePage(Pages[NumberOfPages]);
+ }
}
+
/*
- * @unimplemented
+ * @implemented
*/
PVOID STDCALL
MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
@@ -814,12 +734,184 @@
IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress,
IN ULONG BugCheckOnFailure,
- IN MM_PAGE_PRIORITY Priority )
+ IN MM_PAGE_PRIORITY Priority)
{
- UNIMPLEMENTED;
- return MmMapLockedPages (Mdl, AccessMode);
+ PVOID Base;
+ PULONG MdlPages;
+ KIRQL oldIrql;
+ ULONG PageCount;
+ ULONG StartingOffset;
+ PEPROCESS CurrentProcess;
+ NTSTATUS Status;
+ ULONG Protect;
+
+ DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
+ "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
+ Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
+
+ /* FIXME: Implement Priority */
+ (void) Priority;
+
+ /* Calculate the number of pages required. */
+ PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
+
+ if (AccessMode == UserMode)
+ {
+ MEMORY_AREA *Result;
+ LARGE_INTEGER BoundaryAddressMultiple;
+ NTSTATUS Status;
+
+ /* pretty sure you can't map partial mdl's to user space */
+ ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
+
+ BoundaryAddressMultiple.QuadPart = 0;
+ Base = BaseAddress;
+
+ CurrentProcess = PsGetCurrentProcess();
+
+ MmLockAddressSpace(&CurrentProcess->AddressSpace);
+ Status = MmCreateMemoryArea(CurrentProcess,
+ &CurrentProcess->AddressSpace,
+ MEMORY_AREA_MDL_MAPPING,
+ &Base,
+ PageCount * PAGE_SIZE,
+ 0, /* PAGE_READWRITE? */
+ &Result,
+ (Base != NULL),
+ FALSE,
+ BoundaryAddressMultiple);
+ MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
+ if (!NT_SUCCESS(Status))
+ {
+ if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
+ {
+ return NULL;
+ }
+
+ /* FIXME: Raise an exception instead of bugchecking */
+ KEBUGCHECK(0);
+ }
+
+ Mdl->Process = CurrentProcess;
+ }
+ else /* if (AccessMode == KernelMode) */
+ {
+ /* can't map mdl twice */
+ ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
+ /* can't map mdl buildt from non paged pool into kernel space */
+ ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
+
+ CurrentProcess = NULL;
+
+ /* Allocate that number of pages from the mdl mapping region. */
+ KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
+
+ StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
+
+ if (StartingOffset == 0xffffffff)
+ {
+ KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
+
+ DPRINT1("Out of MDL mapping space\n");
+
+ if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
+ {
+ return NULL;
+ }
+
+ KEBUGCHECK(0);
+ }
+
+ Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
+
+ if (MiMdlMappingRegionHint == StartingOffset)
+ {
+ MiMdlMappingRegionHint += PageCount;
+ }
+
+ KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
+ }
+
+ /* Set the virtual mappings for the MDL pages. */
+ MdlPages = (PULONG)(Mdl + 1);
+
+ Protect = PAGE_READWRITE;
+ if (CacheType == MmNonCached)
+ Protect |= PAGE_NOCACHE;
+ else if (CacheType == MmWriteCombined)
+ DPRINT("CacheType MmWriteCombined not supported!\n");
+ Status = MmCreateVirtualMapping(CurrentProcess,
+ Base,
+ Protect,
+ MdlPages,
+ PageCount);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
+ {
+ return NULL;
+ }
+ if (AccessMode == UserMode)
+ {
+ /* FIXME: Raise an exception */
+ return NULL;
+ }
+ else /* AccessMode == KernelMode */
+ {
+ if (!BugCheckOnFailure)
+ return NULL;
+
+ /* FIXME: Use some bugcheck code instead of 0 */
+ KEBUGCHECK(0);
+ }
+ }
+
+ /* Mark the MDL has having being mapped. */
+ if (AccessMode == KernelMode)
+ {
+ if (Mdl->MdlFlags & MDL_PARTIAL)
+ {
+ Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
+ }
+ else
+ {
+ Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+ }
+ Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
+ }
+
+ return((char*)Base + Mdl->ByteOffset);
}
+
+/*
+ * @implemented
+ */
+PVOID STDCALL
+MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
+/*
+ * FUNCTION: Maps the physical pages described by a given MDL
+ * ARGUMENTS:
+ * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
+ * MmAllocatePagesForMdl or IoBuildPartialMdl.
+ * AccessMode = Specifies the portion of the address space to map the
+ * pages.
+ * RETURNS: The base virtual address that maps the locked pages for the
+ * range described by the MDL
+ *
+ * If mapping into user space, pages are mapped into current address space.
+ */
+{
+ return MmMapLockedPagesSpecifyCache(Mdl,
+ AccessMode,
+ MmCached,
+ NULL,
+ TRUE,
+ NormalPagePriority);
+}
+
+
/* EOF */