Author: sir_richard
Date: Sat Jun 5 20:26:15 2010
New Revision: 47598
URL: http://svn.reactos.org/svn/reactos?rev=47598&view=rev
Log:
[NTOS]: Fix Exp*PoolList macros. Also make then non-inlined, so we can see who called them in a stack trace.
[NTOS]: Enable them.
This boots on my system -- if it doesn't boot on yours, someone is corrupting your nonpaged pool. Reverting this patch is NOT the solution to your woes.
Modified:
trunk/reactos/ntoskrnl/mm/ARM3/expool.c
Modified: trunk/reactos/ntoskrnl/mm/ARM3/expool.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/ARM3/expool.c?…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] Sat Jun 5 20:26:15 2010
@@ -39,30 +39,33 @@
* Pool list access debug macros, similar to Arthur's pfnlist.c work.
* Microsoft actually implements similar checks in the Windows Server 2003 SP1
* pool code, but only for checked builds.
+ *
* As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
* that these checks are done even on retail builds, due to the increasing
* number of kernel-mode attacks which depend on dangling list pointers and other
* kinds of list-based attacks.
+ *
* For now, I will leave these checks on all the time, but later they are likely
* to be DBG-only, at least until there are enough kernel-mode security attacks
* against ReactOS to warrant the performance hit.
*
- */
-FORCEINLINE
+ * For now, these are not made inline, so we can get good stack traces.
+ */
+NTAPI
PLIST_ENTRY
ExpDecodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
}
-FORCEINLINE
+NTAPI
PLIST_ENTRY
ExpEncodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
}
-FORCEINLINE
+NTAPI
VOID
ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
{
@@ -77,52 +80,56 @@
}
}
-FORCEINLINE
+NTAPI
VOID
ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
{
ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
}
-FORCEINLINE
+NTAPI
BOOLEAN
ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
{
return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
}
-FORCEINLINE
+NTAPI
VOID
ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
{
PLIST_ENTRY Blink, Flink;
Flink = ExpDecodePoolLink(Entry->Flink);
Blink = ExpDecodePoolLink(Entry->Blink);
+ Flink->Blink = ExpEncodePoolLink(Blink);
Blink->Flink = ExpEncodePoolLink(Flink);
- Flink->Blink = ExpEncodePoolLink(Blink);
-}
-
-FORCEINLINE
+}
+
+NTAPI
PLIST_ENTRY
ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
{
- PLIST_ENTRY Head;
- Head = ExpDecodePoolLink(ListHead->Flink);
- ExpRemovePoolEntryList(Head);
- return Head;
-}
-
-FORCEINLINE
+ PLIST_ENTRY Entry, Flink;
+ Entry = ExpDecodePoolLink(ListHead->Flink);
+ Flink = ExpDecodePoolLink(Entry->Flink);
+ ListHead->Flink = ExpEncodePoolLink(Flink);
+ Flink->Blink = ExpEncodePoolLink(ListHead);
+ return Entry;
+}
+
+NTAPI
PLIST_ENTRY
ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
{
- PLIST_ENTRY Tail;
- Tail = ExpDecodePoolLink(ListHead->Blink);
- ExpRemovePoolEntryList(Tail);
- return Tail;
-}
-
-FORCEINLINE
+ PLIST_ENTRY Entry, Blink;
+ Entry = ExpDecodePoolLink(ListHead->Blink);
+ Blink = ExpDecodePoolLink(Entry->Blink);
+ ListHead->Blink = ExpEncodePoolLink(Blink);
+ Blink->Flink = ExpEncodePoolLink(ListHead);
+ return Entry;
+}
+
+NTAPI
VOID
ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY Entry)
@@ -137,14 +144,14 @@
ExpCheckPoolLinks(ListHead);
}
-FORCEINLINE
+NTAPI
VOID
ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY Entry)
{
PLIST_ENTRY Flink;
ExpCheckPoolLinks(ListHead);
- Flink = ExpDecodePoolLink(ListHead->Blink);
+ Flink = ExpDecodePoolLink(ListHead->Flink);
Entry->Flink = ExpEncodePoolLink(Flink);
Entry->Blink = ExpEncodePoolLink(ListHead);
Flink->Blink = ExpEncodePoolLink(Entry);
@@ -194,7 +201,7 @@
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
while (NextEntry < LastEntry)
{
- InitializeListHead(NextEntry);
+ ExpInitializePoolListHead(NextEntry);
NextEntry++;
}
}
@@ -379,7 +386,7 @@
//
// Are there any free entries available on this list?
//
- if (!IsListEmpty(ListHead))
+ if (!ExpIsPoolListEmpty(ListHead))
{
//
// Acquire the pool lock now
@@ -389,7 +396,7 @@
//
// And make sure the list still has entries
//
- if (IsListEmpty(ListHead))
+ if (ExpIsPoolListEmpty(ListHead))
{
//
// Someone raced us (and won) before we had a chance to acquire
@@ -408,7 +415,9 @@
// there is a guarantee that any block on this list will either be
// of the correct size, or perhaps larger.
//
- Entry = POOL_ENTRY(RemoveHeadList(ListHead));
+ ExpCheckPoolLinks(ListHead);
+ Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
+ ExpCheckPoolLinks(ListHead);
ASSERT(Entry->BlockSize >= i);
ASSERT(Entry->PoolType == 0);
@@ -508,13 +517,15 @@
// "full" entry, which contains enough bytes for a linked list
// and thus can be used for allocations (up to 8 bytes...)
//
+ ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
if (BlockSize != 1)
{
//
// Insert the free entry into the free list for this size
//
- InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- POOL_FREE_BLOCK(FragmentEntry));
+ ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
+ POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
}
}
@@ -571,8 +582,10 @@
//
// And insert the free entry into the free list for this block size
//
- InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
+ ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
+ POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
//
// Release the pool lock
@@ -690,7 +703,10 @@
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
+ ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
@@ -727,7 +743,10 @@
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
+ ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
@@ -787,7 +806,8 @@
//
// Insert this new free block, and release the pool lock
//
- InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
+ ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
ExUnlockPool(PoolDesc, OldIrql);
}
Author: sir_richard
Date: Sat Jun 5 20:02:45 2010
New Revision: 47596
URL: http://svn.reactos.org/svn/reactos?rev=47596&view=rev
Log:
[NTOS]: Add some paranoid-invariant list access checks to the pool code. They serve a dual purpose: catch pool corruption by broken drivers/kernel code, as well as catch malicious modification of the pool links as part of a kernel-mode exploit.
[NTOS]: Not yet used, thanks to Arthur for the idea.
See comment for more information.
Modified:
trunk/reactos/ntoskrnl/mm/ARM3/expool.c
Modified: trunk/reactos/ntoskrnl/mm/ARM3/expool.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/ARM3/expool.c?…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] Sat Jun 5 20:02:45 2010
@@ -34,7 +34,124 @@
#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -(x)->PreviousSize)
-
+
+/*
+ * Pool list access debug macros, similar to Arthur's pfnlist.c work.
+ * Microsoft actually implements similar checks in the Windows Server 2003 SP1
+ * pool code, but only for checked builds.
+ * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
+ * that these checks are done even on retail builds, due to the increasing
+ * number of kernel-mode attacks which depend on dangling list pointers and other
+ * kinds of list-based attacks.
+ * For now, I will leave these checks on all the time, but later they are likely
+ * to be DBG-only, at least until there are enough kernel-mode security attacks
+ * against ReactOS to warrant the performance hit.
+ *
+ */
+FORCEINLINE
+PLIST_ENTRY
+ExpDecodePoolLink(IN PLIST_ENTRY Link)
+{
+ return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
+}
+
+FORCEINLINE
+PLIST_ENTRY
+ExpEncodePoolLink(IN PLIST_ENTRY Link)
+{
+ return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
+}
+
+FORCEINLINE
+VOID
+ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
+{
+ if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
+ (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
+ {
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 3,
+ (ULONG_PTR)ListHead,
+ (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
+ (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
+ }
+}
+
+FORCEINLINE
+VOID
+ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
+{
+ ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
+}
+
+FORCEINLINE
+BOOLEAN
+ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
+{
+ return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
+}
+
+FORCEINLINE
+VOID
+ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
+{
+ PLIST_ENTRY Blink, Flink;
+ Flink = ExpDecodePoolLink(Entry->Flink);
+ Blink = ExpDecodePoolLink(Entry->Blink);
+ Blink->Flink = ExpEncodePoolLink(Flink);
+ Flink->Blink = ExpEncodePoolLink(Blink);
+}
+
+FORCEINLINE
+PLIST_ENTRY
+ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
+{
+ PLIST_ENTRY Head;
+ Head = ExpDecodePoolLink(ListHead->Flink);
+ ExpRemovePoolEntryList(Head);
+ return Head;
+}
+
+FORCEINLINE
+PLIST_ENTRY
+ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
+{
+ PLIST_ENTRY Tail;
+ Tail = ExpDecodePoolLink(ListHead->Blink);
+ ExpRemovePoolEntryList(Tail);
+ return Tail;
+}
+
+FORCEINLINE
+VOID
+ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
+ IN PLIST_ENTRY Entry)
+{
+ PLIST_ENTRY Blink;
+ ExpCheckPoolLinks(ListHead);
+ Blink = ExpDecodePoolLink(ListHead->Blink);
+ Entry->Flink = ExpEncodePoolLink(ListHead);
+ Entry->Blink = ExpEncodePoolLink(Blink);
+ Blink->Flink = ExpEncodePoolLink(Entry);
+ ListHead->Blink = ExpEncodePoolLink(Entry);
+ ExpCheckPoolLinks(ListHead);
+}
+
+FORCEINLINE
+VOID
+ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
+ IN PLIST_ENTRY Entry)
+{
+ PLIST_ENTRY Flink;
+ ExpCheckPoolLinks(ListHead);
+ Flink = ExpDecodePoolLink(ListHead->Blink);
+ Entry->Flink = ExpEncodePoolLink(Flink);
+ Entry->Blink = ExpEncodePoolLink(ListHead);
+ Flink->Blink = ExpEncodePoolLink(Entry);
+ ListHead->Flink = ExpEncodePoolLink(Entry);
+ ExpCheckPoolLinks(ListHead);
+}
+
/* PRIVATE FUNCTIONS **********************************************************/
VOID
Author: sir_richard
Date: Sat Jun 5 19:53:17 2010
New Revision: 47594
URL: http://svn.reactos.org/svn/reactos?rev=47594&view=rev
Log:
[NTOS]: Use logical math operations on the various block<->entry<->free_list_head operations in the pool code, instead of works-by-chance-and-assumption pointer math operations. This will now allow pool implementations where the pool header is not the size of a pool block (and the size of a LIST_ENTRY, by definition, although, even that, could change, if we choose to implement a cache-aligned overhead).
Modified:
trunk/reactos/ntoskrnl/mm/ARM3/expool.c
Modified: trunk/reactos/ntoskrnl/mm/ARM3/expool.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/ARM3/expool.c?…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/ARM3/expool.c [iso-8859-1] Sat Jun 5 19:53:17 2010
@@ -28,6 +28,13 @@
PVOID PoolTrackTable;
PKGUARDED_MUTEX ExpPagedPoolMutex;
+/* Pool block/header/list access macros */
+#define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)x - sizeof(POOL_HEADER))
+#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)x + sizeof(POOL_HEADER))
+#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)x + ((i) * POOL_BLOCK_SIZE))
+#define POOL_NEXT_BLOCK(x) POOL_BLOCK(x, x->BlockSize)
+#define POOL_PREV_BLOCK(x) POOL_BLOCK(x, -x->PreviousSize)
+
/* PRIVATE FUNCTIONS **********************************************************/
VOID
@@ -68,7 +75,11 @@
//
NextEntry = PoolDescriptor->ListHeads;
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
- while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
+ while (NextEntry < LastEntry)
+ {
+ InitializeListHead(NextEntry);
+ NextEntry++;
+ }
}
VOID
@@ -239,8 +250,7 @@
// request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
// the direct allocation of pages.
//
- i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
- sizeof(POOL_HEADER);
+ i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
//
// Loop in the free lists looking for a block if this size. Start with the
@@ -281,7 +291,7 @@
// there is a guarantee that any block on this list will either be
// of the correct size, or perhaps larger.
//
- Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
+ Entry = POOL_ENTRY(RemoveHeadList(ListHead));
ASSERT(Entry->BlockSize >= i);
ASSERT(Entry->PoolType == 0);
@@ -302,7 +312,7 @@
// turn it into a fragment that contains the leftover data
// that we don't need to satisfy the caller's request
//
- FragmentEntry = Entry + i;
+ FragmentEntry = POOL_BLOCK(Entry, i);
FragmentEntry->BlockSize = Entry->BlockSize - i;
//
@@ -314,7 +324,7 @@
// Now get the block that follows the new fragment and check
// if it's still on the same page as us (and not at the end)
//
- NextEntry = FragmentEntry + FragmentEntry->BlockSize;
+ NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
if (PAGE_ALIGN(NextEntry) != NextEntry)
{
//
@@ -346,14 +356,14 @@
// This is the entry that will actually end up holding the
// allocation!
//
- Entry += Entry->BlockSize;
+ Entry = POOL_NEXT_BLOCK(Entry);
Entry->PreviousSize = FragmentEntry->BlockSize;
//
// And now let's go to the entry after that one and check if
// it's still on the same page, and not at the end
//
- NextEntry = Entry + i;
+ NextEntry = POOL_BLOCK(Entry, i);
if (PAGE_ALIGN(NextEntry) != NextEntry)
{
//
@@ -387,7 +397,7 @@
// Insert the free entry into the free list for this size
//
InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- (PLIST_ENTRY)FragmentEntry + 1);
+ POOL_FREE_BLOCK(FragmentEntry));
}
}
@@ -402,7 +412,9 @@
// Return the pool allocation
//
Entry->PoolTag = Tag;
- return ++Entry;
+ (POOL_FREE_BLOCK(Entry))->Flink = NULL;
+ (POOL_FREE_BLOCK(Entry))->Blink = NULL;
+ return POOL_FREE_BLOCK(Entry);
}
} while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
@@ -410,6 +422,7 @@
// There were no free entries left, so we have to allocate a new fresh page
//
Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
+ ASSERT(Entry != NULL);
Entry->Ulong1 = 0;
Entry->BlockSize = i;
Entry->PoolType = PoolType + 1;
@@ -420,8 +433,8 @@
// to create now. The free bytes are the whole page minus what was allocated
// and then converted into units of block headers.
//
- BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
- FragmentEntry = Entry + i;
+ BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
+ FragmentEntry = POOL_BLOCK(Entry, i);
FragmentEntry->Ulong1 = 0;
FragmentEntry->BlockSize = BlockSize;
FragmentEntry->PreviousSize = i;
@@ -442,8 +455,8 @@
// And insert the free entry into the free list for this block size
//
InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- (PLIST_ENTRY)FragmentEntry + 1);
-
+ POOL_FREE_BLOCK(FragmentEntry));
+
//
// Release the pool lock
//
@@ -454,7 +467,7 @@
// And return the pool allocation
//
Entry->PoolTag = Tag;
- return ++Entry;
+ return POOL_FREE_BLOCK(Entry);
}
/*
@@ -485,7 +498,7 @@
POOL_TYPE PoolType;
PPOOL_DESCRIPTOR PoolDesc;
BOOLEAN Combined = FALSE;
-
+#if 1
//
// Check for paged pool
//
@@ -498,6 +511,7 @@
ExFreePagedPool(P);
return;
}
+#endif
//
// Quickly deal with big page allocations
@@ -526,7 +540,7 @@
//
// Get the pointer to the next entry
//
- NextEntry = Entry + BlockSize;
+ NextEntry = POOL_BLOCK(Entry, BlockSize);
//
// Acquire the pool lock
@@ -559,7 +573,7 @@
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
+ RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
}
//
@@ -577,7 +591,7 @@
//
// Great, grab that entry and check if it's free
//
- NextEntry = Entry - Entry->PreviousSize;
+ NextEntry = POOL_PREV_BLOCK(Entry);
if (NextEntry->PoolType == 0)
{
//
@@ -596,7 +610,7 @@
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
+ RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
}
//
@@ -618,7 +632,7 @@
// page, they could've all been combined).
//
if ((PAGE_ALIGN(Entry) == Entry) &&
- (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
+ (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
{
//
// In this case, release the pool lock, and free the page
@@ -644,7 +658,7 @@
// Get the first combined block (either our original to begin with, or
// the one after the original, depending if we combined with the previous)
//
- NextEntry = Entry + BlockSize;
+ NextEntry = POOL_NEXT_BLOCK(Entry);
//
// As long as the next block isn't on a page boundary, have it point
@@ -656,7 +670,7 @@
//
// Insert this new free block, and release the pool lock
//
- InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
+ InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
ExUnlockPool(PoolDesc, OldIrql);
}
Author: sir_richard
Date: Sat Jun 5 18:53:54 2010
New Revision: 47592
URL: http://svn.reactos.org/svn/reactos?rev=47592&view=rev
Log:
[NTOS]: Define the POOL_HEADER for x64.
[NTOS]: Define POOL_BLOCK_SIZE definition to set the minimum pool block size. In NT, this is equal to a LIST_ENTRY structure, because the Pool Allocator must be able to store a LIST_ENTRY into a freed pool block. This also determines the alignment of pool allocations. So 8 on x86, 16 on x64.
[NTOS]: Don't depend on LIST_ENTRY, but use POOL_BLOCK_SIZE instead (on IA64, if we ever want to support this, the pool block size is different from a LIST_ENTRY/POOL_HEADER).
[NTOS]: The following ASSERTs must hold: the POOL_HEADER must be as big as the the smallest pool block (POOL_BLOCK_SIZE), which must be at least as big as a LIST_ENTRY structure. 8 == 8 == 8 on x86, 16 == 16 == 16 on x64.
Modified:
trunk/reactos/ntoskrnl/mm/ARM3/miarm.h
Modified: trunk/reactos/ntoskrnl/mm/ARM3/miarm.h
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/ARM3/miarm.h?r…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/ARM3/miarm.h [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/ARM3/miarm.h [iso-8859-1] Sat Jun 5 18:53:54 2010
@@ -225,13 +225,18 @@
// Special IRQL value (found in assertions)
//
#define MM_NOIRQL (KIRQL)0xFFFFFFFF
-
+
//
// FIXFIX: These should go in ex.h after the pool merge
//
-#define POOL_LISTS_PER_PAGE (PAGE_SIZE / sizeof(LIST_ENTRY))
+#ifdef _M_AMD64
+#define POOL_BLOCK_SIZE 16
+#else
+#define POOL_BLOCK_SIZE 8
+#endif
+#define POOL_LISTS_PER_PAGE (PAGE_SIZE / POOL_BLOCK_SIZE)
#define BASE_POOL_TYPE_MASK 1
-#define POOL_MAX_ALLOC (PAGE_SIZE - (sizeof(POOL_HEADER) + sizeof(LIST_ENTRY)))
+#define POOL_MAX_ALLOC (PAGE_SIZE - (sizeof(POOL_HEADER) + POOL_BLOCK_SIZE))
typedef struct _POOL_DESCRIPTOR
{
@@ -256,16 +261,30 @@
{
struct
{
+#ifdef _M_AMD64
+ ULONG PreviousSize:8;
+ ULONG PoolIndex:8;
+ ULONG BlockSize:8;
+ ULONG PoolType:8;
+#else
USHORT PreviousSize:9;
USHORT PoolIndex:7;
USHORT BlockSize:9;
USHORT PoolType:7;
+#endif
};
ULONG Ulong1;
};
+#ifdef _M_AMD64
+ ULONG PoolTag;
+#endif
union
{
+#ifdef _M_AMD64
+ PEPROCESS ProcessBilled;
+#else
ULONG PoolTag;
+#endif
struct
{
USHORT AllocatorBackTraceIndex;
@@ -274,11 +293,8 @@
};
} POOL_HEADER, *PPOOL_HEADER;
-//
-// Everything depends on this
-//
-C_ASSERT(sizeof(POOL_HEADER) == 8);
-C_ASSERT(sizeof(POOL_HEADER) == sizeof(LIST_ENTRY));
+C_ASSERT(sizeof(POOL_HEADER) == POOL_BLOCK_SIZE);
+C_ASSERT(POOL_BLOCK_SIZE == sizeof(LIST_ENTRY));
extern ULONG ExpNumberOfPagedPools;
extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
Author: sir_richard
Date: Sat Jun 5 16:59:50 2010
New Revision: 47589
URL: http://svn.reactos.org/svn/reactos?rev=47589&view=rev
Log:
[NTOS]: Don't assume that ANY fault in the system address range, not associated to a memory area, might be ARM3. Instead, since this hack only exists for early boot page pool support, make only treat this as an ARM3 fault when it happens in the paged pool area or higher. Leads to more direct Mm crashes when invalid page access happens, instead of infinite "PAGE FAULT ON PAGE TABLES".
Modified:
trunk/reactos/ntoskrnl/mm/mmfault.c
Modified: trunk/reactos/ntoskrnl/mm/mmfault.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/mmfault.c?rev=…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/mmfault.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/mmfault.c [iso-8859-1] Sat Jun 5 16:59:50 2010
@@ -284,13 +284,13 @@
* can go away.
*/
MemoryArea = MmLocateMemoryAreaByAddress(MmGetKernelAddressSpace(), Address);
- if ((!(MemoryArea) && ((ULONG_PTR)Address >= (ULONG_PTR)MmSystemRangeStart)) ||
+ if ((!(MemoryArea) && ((ULONG_PTR)Address >= (ULONG_PTR)MmPagedPoolStart)) ||
((MemoryArea) && (MemoryArea->Type == MEMORY_AREA_OWNED_BY_ARM3)))
{
//
// Hand it off to more competent hands...
//
- DPRINT1("ARM3 fault\n");
+ DPRINT1("ARM3 fault %p\n", MemoryArea);
return MmArmAccessFault(StoreInstruction, Address, Mode, TrapInformation);
}