https://git.reactos.org/?p=reactos.git;a=commitdiff;h=e0759a5e350e9f44e7e7b…
commit e0759a5e350e9f44e7e7b6fb28249adc78c2d0f6
Author:     Doug Lyons <douglyons(a)douglyons.com>
AuthorDate: Mon Nov 18 02:44:51 2024 -0600
Commit:     GitHub <noreply(a)github.com>
CommitDate: Mon Nov 18 02:44:51 2024 -0600
    [NTOS:MM] Finish MmAllocateMappingAddress and MmFreeMappingAddress and fix test
failures. (#7491)
    * [NTOS:MM] Fix MmAllocateMappingAddress and MmFreeMappingAddress and their regression
test failures.
    Follow up of #7260.
    This fixes kmtest:MmReservedMapping failures and hang.
    Based on mm-implement-mappingaddress.patch by Thomas Faber and some changes by Oleg
Dubinskiy.
    kmtest:MmReservedMapping revisions and updates to Vista+ method by Timo Kreuzer.
    Signed-off-by: Oleg Dubinskiy <oleg.dubinskij30(a)gmail.com>
    Signed-off-by: Timo Kreuzer <timo.kreuzer(a)reactos.org>
    CORE-10147, CORE-14635, CORE-17409, CORE-19318
---
 .../rostests/kmtests/ntos_mm/MmReservedMapping.c   |  27 ++-
 ntoskrnl/ex/init.c                                 |   3 +
 ntoskrnl/mm/ARM3/mdlsup.c                          | 221 +++++++++++++++++++--
 ntoskrnl/mm/ARM3/miarm.h                           |  15 +-
 ntoskrnl/mm/ARM3/pool.c                            |   4 +
 ntoskrnl/mm/freelist.c                             |   3 +-
 6 files changed, 251 insertions(+), 22 deletions(-)
diff --git a/modules/rostests/kmtests/ntos_mm/MmReservedMapping.c
b/modules/rostests/kmtests/ntos_mm/MmReservedMapping.c
index a8204b5a3c0..d0cd4c605ab 100644
--- a/modules/rostests/kmtests/ntos_mm/MmReservedMapping.c
+++ b/modules/rostests/kmtests/ntos_mm/MmReservedMapping.c
@@ -9,6 +9,8 @@
 #include <kmt_test.h>
 static BOOLEAN g_IsPae;
+static ULONG g_OsVersion;
+static BOOLEAN g_IsReactOS;
 #ifdef _M_IX86
@@ -76,7 +78,7 @@ ValidateMapping(
     BOOLEAN Valid = TRUE;
 #if defined(_M_IX86) || defined(_M_AMD64)
     PUCHAR CurrentAddress;
-    ULONGLONG PteValue;
+    ULONGLONG PteValue, ExpectedValue;
     ULONG i;
     for (i = 0; i < ValidPtes; i++)
@@ -110,10 +112,26 @@ ValidateMapping(
                CurrentAddress, PteValue, PoolTag & ~1);
     CurrentAddress = (PUCHAR)BaseAddress - 2 * PAGE_SIZE;
     PteValue = GET_PTE_VALUE(CurrentAddress);
+
+    if (g_IsReactOS || g_OsVersion >= 0x0600)
+    {
+        /* On ReactOS and on Vista+ the size is stored in
+         * the NextEntry field of a MMPTE_LIST structure */
+#ifdef _M_IX86
+        ExpectedValue = (TotalPtes + 2) << 12;
+#elif defined(_M_AMD64)
+        ExpectedValue = ((ULONG64)TotalPtes + 2) << 32;
+#endif
+    }
+    else
+    {
+        /* On Windows 2003 the size is shifted by 1 bit only */
+        ExpectedValue = (TotalPtes + 2) * 2;
+    }
     Valid = Valid &&
-            ok(PteValue == (TotalPtes + 2) * 2,
+            ok(PteValue == ExpectedValue,
                "PTE for %p contains 0x%I64x, expected %x\n",
-               CurrentAddress, PteValue, (TotalPtes + 2) * 2);
+               CurrentAddress, PteValue, ExpectedValue);
 #endif
     return Valid;
@@ -281,6 +299,9 @@ START_TEST(MmReservedMapping)
     PVOID Mapping;
     g_IsPae = ExIsProcessorFeaturePresent(PF_PAE_ENABLED);
+    g_OsVersion = SharedUserData->NtMajorVersion << 8 |
SharedUserData->NtMinorVersion;
+    g_IsReactOS = *(PULONG)(KI_USER_SHARED_DATA + PAGE_SIZE - sizeof(ULONG)) ==
0x8eac705;
+    ok(g_IsReactOS == 1, "Not reactos\n");
     pMmAllocatePagesForMdlEx =
KmtGetSystemRoutineAddress(L"MmAllocatePagesForMdlEx");
diff --git a/ntoskrnl/ex/init.c b/ntoskrnl/ex/init.c
index 00e5f453dd8..433a635d7d2 100644
--- a/ntoskrnl/ex/init.c
+++ b/ntoskrnl/ex/init.c
@@ -1330,6 +1330,9 @@ ExpInitializeExecutive(IN ULONG Cpu,
     /* Set the machine type */
     SharedUserData->ImageNumberLow = IMAGE_FILE_MACHINE_NATIVE;
     SharedUserData->ImageNumberHigh = IMAGE_FILE_MACHINE_NATIVE;
+
+    /* ReactOS magic */
+    *(PULONG)(KI_USER_SHARED_DATA + PAGE_SIZE - sizeof(ULONG)) = 0x8eac705;
 }
 VOID
diff --git a/ntoskrnl/mm/ARM3/mdlsup.c b/ntoskrnl/mm/ARM3/mdlsup.c
index 76b06f0c72e..1bd22c1e78a 100644
--- a/ntoskrnl/mm/ARM3/mdlsup.c
+++ b/ntoskrnl/mm/ARM3/mdlsup.c
@@ -529,7 +529,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
     else
     {
         //
-        // Conver to internal caching attribute
+        // Convert to internal caching attribute
         //
         CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
     }
@@ -1622,29 +1622,224 @@ MmAdvanceMdl(IN PMDL Mdl,
 }
 /*
- * @unimplemented
+ * @implemented
  */
 PVOID
 NTAPI
-MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
-                                    IN ULONG PoolTag,
-                                    IN PMDL MemoryDescriptorList,
-                                    IN MEMORY_CACHING_TYPE CacheType)
+MmMapLockedPagesWithReservedMapping(
+    _In_ PVOID MappingAddress,
+    _In_ ULONG PoolTag,
+    _In_ PMDL Mdl,
+    _In_ MEMORY_CACHING_TYPE CacheType)
 {
-    UNIMPLEMENTED;
-    return 0;
+    PPFN_NUMBER MdlPages, LastPage;
+    PFN_COUNT PageCount;
+    BOOLEAN IsIoMapping;
+    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
+    PMMPTE PointerPte;
+    MMPTE TempPte;
+
+    ASSERT(Mdl->ByteCount != 0);
+
+    // Get the list of pages and count
+    MdlPages = MmGetMdlPfnArray(Mdl);
+    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl),
+                                               Mdl->ByteCount);
+    LastPage = MdlPages + PageCount;
+
+    // Sanity checks
+    ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
+                             MDL_SOURCE_IS_NONPAGED_POOL |
+                             MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
+    ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
+
+    // Get the correct cache type
+    IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
+    CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
+
+    // Get the first PTE we reserved
+    ASSERT(MappingAddress);
+    PointerPte = MiAddressToPte(MappingAddress) - 2;
+    ASSERT(!PointerPte[0].u.Hard.Valid &&
+           !PointerPte[1].u.Hard.Valid);
+
+    // Verify that the pool tag matches
+    TempPte.u.Long = PoolTag;
+    TempPte.u.Hard.Valid = 0;
+    if (PointerPte[1].u.Long != TempPte.u.Long)
+    {
+        KeBugCheckEx(SYSTEM_PTE_MISUSE,
+                     PTE_MAPPING_ADDRESS_NOT_OWNED, /* Trying to map an address it does
not own */
+                     (ULONG_PTR)MappingAddress,
+                     PoolTag,
+                     PointerPte[1].u.Long);
+    }
+
+    // We must have a size, and our helper PTEs must be invalid
+    if (PointerPte[0].u.List.NextEntry < 3)
+    {
+        KeBugCheckEx(SYSTEM_PTE_MISUSE,
+                     PTE_MAPPING_ADDRESS_INVALID, /* Trying to map an invalid address */
+                     (ULONG_PTR)MappingAddress,
+                     PoolTag,
+                     (ULONG_PTR)_ReturnAddress());
+    }
+
+    // If the mapping isn't big enough, fail
+    if (PointerPte[0].u.List.NextEntry - 2 < PageCount)
+    {
+        DPRINT1("Reserved mapping too small. Need %Iu pages, have %Iu\n",
+                        PageCount,
+                        PointerPte[0].u.List.NextEntry - 2);
+        return NULL;
+    }
+    // Skip our two helper PTEs
+    PointerPte += 2;
+
+    // Get the template
+    TempPte = ValidKernelPte;
+    switch (CacheAttribute)
+    {
+        case MiNonCached:
+            // Disable caching
+            MI_PAGE_DISABLE_CACHE(&TempPte);
+            MI_PAGE_WRITE_THROUGH(&TempPte);
+            break;
+
+        case MiWriteCombined:
+            // Enable write combining
+            MI_PAGE_DISABLE_CACHE(&TempPte);
+            MI_PAGE_WRITE_COMBINED(&TempPte);
+            break;
+
+        default:
+            // Nothing to do
+            break;
+    }
+
+    // Loop all PTEs
+    for (; (MdlPages < LastPage) && (*MdlPages != LIST_HEAD); ++MdlPages)
+    {
+        // Write the PTE
+        TempPte.u.Hard.PageFrameNumber = *MdlPages;
+        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
+    }
+
+    // Mark it as mapped
+    ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
+    Mdl->MappedSystemVa = MappingAddress;
+    Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+
+    // Check if it was partial
+    if (Mdl->MdlFlags & MDL_PARTIAL)
+    {
+        // Write the appropriate flag here too
+        Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
+    }
+
+    // Return the mapped address
+    return (PVOID)((ULONG_PTR)MappingAddress + Mdl->ByteOffset);
 }
 /*
- * @unimplemented
+ * @implemented
  */
 VOID
 NTAPI
-MmUnmapReservedMapping(IN PVOID BaseAddress,
-                       IN ULONG PoolTag,
-                       IN PMDL MemoryDescriptorList)
+MmUnmapReservedMapping(
+    _In_ PVOID BaseAddress,
+    _In_ ULONG PoolTag,
+    _In_ PMDL Mdl)
 {
-    UNIMPLEMENTED;
+    PVOID Base;
+    PFN_COUNT PageCount, ExtraPageCount;
+    PPFN_NUMBER MdlPages;
+    PMMPTE PointerPte;
+    MMPTE TempPte;
+
+    // Sanity check
+    ASSERT(Mdl->ByteCount != 0);
+    ASSERT(BaseAddress > MM_HIGHEST_USER_ADDRESS);
+
+    // Get base and count information
+    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
+    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
+
+    // Sanity checks
+    ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
+    ASSERT(PageCount != 0);
+    ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
+
+    // Get the first PTE we reserved
+    PointerPte = MiAddressToPte(BaseAddress) - 2;
+    ASSERT(!PointerPte[0].u.Hard.Valid &&
+           !PointerPte[1].u.Hard.Valid);
+
+    // Verify that the pool tag matches
+    TempPte.u.Long = PoolTag;
+    TempPte.u.Hard.Valid = 0;
+    if (PointerPte[1].u.Long != TempPte.u.Long)
+    {
+        KeBugCheckEx(SYSTEM_PTE_MISUSE,
+                     PTE_UNMAPPING_ADDRESS_NOT_OWNED, /* Trying to unmap an address it
does not own */
+                     (ULONG_PTR)BaseAddress,
+                     PoolTag,
+                     PointerPte[1].u.Long);
+    }
+
+    // We must have a size
+    if (PointerPte[0].u.List.NextEntry < 3)
+    {
+        KeBugCheckEx(SYSTEM_PTE_MISUSE,
+                     PTE_MAPPING_ADDRESS_EMPTY, /* Mapping apparently empty */
+                     (ULONG_PTR)BaseAddress,
+                     PoolTag,
+                     (ULONG_PTR)_ReturnAddress());
+    }
+
+    // Skip our two helper PTEs
+    PointerPte += 2;
+
+    // This should be a resident system PTE
+    ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
+    ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
+    ASSERT(PointerPte->u.Hard.Valid == 1);
+
+    // TODO: check the MDL range makes sense with regard to the mapping range
+    // TODO: check if any of them are already zero
+    // TODO: check if any outside the MDL range are nonzero
+    // TODO: find out what to do with extra PTEs
+
+    // Check if the caller wants us to free advanced pages
+    if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
+    {
+        // Get the MDL page array
+        MdlPages = MmGetMdlPfnArray(Mdl);
+
+        /* Number of extra pages stored after the PFN array */
+        ExtraPageCount = MdlPages[PageCount];
+
+        // Do the math
+        PageCount += ExtraPageCount;
+        PointerPte -= ExtraPageCount;
+        ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
+        ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
+
+        // Get the new base address
+        BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
+                              (ExtraPageCount << PAGE_SHIFT));
+    }
+
+    // Zero the PTEs
+    RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE));
+
+    // Flush the TLB
+    KeFlushEntireTb(TRUE, TRUE);
+
+    // Remove flags
+    Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
+                       MDL_PARTIAL_HAS_BEEN_MAPPED |
+                       MDL_FREE_EXTRA_PTES);
 }
 /*
diff --git a/ntoskrnl/mm/ARM3/miarm.h b/ntoskrnl/mm/ARM3/miarm.h
index 7515b4c506b..8e5fc41b89c 100644
--- a/ntoskrnl/mm/ARM3/miarm.h
+++ b/ntoskrnl/mm/ARM3/miarm.h
@@ -156,11 +156,17 @@ C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
 //
 // Some internal SYSTEM_PTE_MISUSE bugcheck subcodes
+// These names were created by Oleg Dubinskiy and Doug Lyons for ReactOS. For reference,
see
+//
https://learn.microsoft.com/en-us/windows-hardware/drivers/debugger/bug-che…
 //
-#define PTE_MAPPING_NONE        0x100
-#define PTE_MAPPING_NOT_OWNED   0x101
-#define PTE_MAPPING_EMPTY       0x102
-#define PTE_MAPPING_RESERVED    0x103
+#define PTE_MAPPING_NONE                0x100
+#define PTE_MAPPING_NOT_OWNED           0x101
+#define PTE_MAPPING_EMPTY               0x102
+#define PTE_MAPPING_RESERVED            0x103
+#define PTE_MAPPING_ADDRESS_NOT_OWNED   0x104
+#define PTE_MAPPING_ADDRESS_INVALID     0x105
+#define PTE_UNMAPPING_ADDRESS_NOT_OWNED 0x108
+#define PTE_MAPPING_ADDRESS_EMPTY       0x109
 //
 // Mask for image section page protection
@@ -1002,7 +1008,6 @@ MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte,
 {
     /* Write the invalid PTE */
     ASSERT(InvalidPte.u.Hard.Valid == 0);
-    ASSERT(InvalidPte.u.Long != 0);
     *PointerPte = InvalidPte;
 }
diff --git a/ntoskrnl/mm/ARM3/pool.c b/ntoskrnl/mm/ARM3/pool.c
index badc8a3e4d7..aca50bb2149 100644
--- a/ntoskrnl/mm/ARM3/pool.c
+++ b/ntoskrnl/mm/ARM3/pool.c
@@ -1580,6 +1580,10 @@ MmAllocateMappingAddress(
     PMMPTE PointerPte;
     MMPTE TempPte;
+    /* Fast exit if PoolTag is NULL */
+    if (!PoolTag)
+        return NULL;
+
     /* How many PTEs does the caller want? */
     SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
     if (SizeInPages == 0)
diff --git a/ntoskrnl/mm/freelist.c b/ntoskrnl/mm/freelist.c
index 95d73eb3cd4..6f2f99bfc3c 100644
--- a/ntoskrnl/mm/freelist.c
+++ b/ntoskrnl/mm/freelist.c
@@ -192,8 +192,9 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
     KIRQL OldIrql;
     PMMPFN Pfn1;
     INT LookForZeroedPages;
+
     ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
-    DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n",
LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
+    DPRINT("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n",
LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
     //
     // Convert the low address into a PFN