https://git.reactos.org/?p=reactos.git;a=commitdiff;h=f06b58925d9819c08bbf1…
commit f06b58925d9819c08bbf1482ca9d0435182bf656
Author: Jérôme Gardou <jerome.gardou(a)reactos.org>
AuthorDate: Mon Feb 22 17:30:24 2021 +0100
Commit: Jérôme Gardou <zefklop(a)users.noreply.github.com>
CommitDate: Thu Mar 18 12:24:21 2021 +0100
[NTOS:MM] Implement shrinking big pool allocation table
Shrink when using 1/8 of its allocated capacity (thus use 25% of it at the end of the
process)
Expand when using 3/4 of its allocated capacity (thus use ~40% of it at the end of the
process)
---
ntoskrnl/mm/ARM3/expool.c | 85 +++++++++++++++++++++++++++++++++++------------
1 file changed, 64 insertions(+), 21 deletions(-)
diff --git a/ntoskrnl/mm/ARM3/expool.c b/ntoskrnl/mm/ARM3/expool.c
index 1ea7ee6cc46..fb55f9159ba 100644
--- a/ntoskrnl/mm/ARM3/expool.c
+++ b/ntoskrnl/mm/ARM3/expool.c
@@ -1446,14 +1446,14 @@ ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION
SystemInformation,
}
_IRQL_requires_(DISPATCH_LEVEL)
+static
BOOLEAN
-NTAPI
-ExpExpandBigPageTable(
- _In_ _IRQL_restores_ KIRQL OldIrql)
+ExpReallocateBigPageTable(
+ _In_ _IRQL_restores_ KIRQL OldIrql,
+ _In_ BOOLEAN Shrink)
{
- ULONG OldSize = PoolBigPageTableSize;
- ULONG NewSize = 2 * OldSize;
- ULONG NewSizeInBytes;
+ SIZE_T OldSize = PoolBigPageTableSize;
+ SIZE_T NewSize, NewSizeInBytes;
PPOOL_TRACKER_BIG_PAGES NewTable;
PPOOL_TRACKER_BIG_PAGES OldTable;
ULONG i;
@@ -1465,11 +1465,41 @@ ExpExpandBigPageTable(
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Make sure we don't overflow */
- if (!NT_SUCCESS(RtlULongMult(2,
- OldSize * sizeof(POOL_TRACKER_BIG_PAGES),
- &NewSizeInBytes)))
+ if (Shrink)
+ {
+ NewSize = OldSize / 2;
+
+ /* Make sure we don't shrink too much. */
+ ASSERT(NewSize >= ExpPoolBigEntriesInUse);
+
+ NewSize = ALIGN_UP_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES));
+ ASSERT(NewSize <= OldSize);
+
+ /* If there is only one page left, then keep it around. Not a failure either. */
+ if (NewSize == OldSize)
+ {
+ ASSERT(NewSize == (PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)));
+ KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
+ return TRUE;
+ }
+ }
+ else
{
- DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
+ if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize)))
+ {
+ DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
+ KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
+ return FALSE;
+ }
+
+ /* Make sure we don't stupidly waste pages */
+ NewSize = ALIGN_DOWN_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES));
+ ASSERT(NewSize > OldSize);
+ }
+
+ if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize,
&NewSizeInBytes)))
+ {
+ DPRINT1("Overflow while calculating big page table size. Size=%lu\n",
OldSize);
KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
return FALSE;
}
@@ -1482,7 +1512,7 @@ ExpExpandBigPageTable(
return FALSE;
}
- DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize);
+ DPRINT("%s big pool tracker table to %lu entries\n", Shrink ?
"Shrinking" : "Expanding", NewSize);
/* Initialize the new table */
RtlZeroMemory(NewTable, NewSizeInBytes);
@@ -1503,15 +1533,16 @@ ExpExpandBigPageTable(
}
/* Recalculate the hash due to the new table size */
- Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask;
+ Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask;
/* Find the location in the new table */
while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
{
- Hash = (Hash + 1) & HashMask;
+ if (++Hash == NewSize)
+ Hash = 0;
}
- /* We just enlarged the table, so we must have space */
+ /* We must have space */
ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE);
/* Finally, copy the item */
@@ -1587,20 +1618,20 @@ Retry:
//
// Add one more entry to the count, and see if we're getting within
- // 25% of the table size, at which point we'll do an expansion now
+ // 75% of the table size, at which point we'll do an expansion now
// to avoid blocking too hard later on.
//
// Note that we only do this if it's also been the 16th time that we
// keep losing the race or that we are not finding a free entry anymore,
// which implies a massive number of concurrent big pool allocations.
//
- InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
- if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
+ ExpPoolBigEntriesInUse++;
+ if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize * 3 /
4)))
{
DPRINT("Attempting expansion since we now have %lu entries\n",
ExpPoolBigEntriesInUse);
ASSERT(TableSize == PoolBigPageTableSize);
- ExpExpandBigPageTable(OldIrql);
+ ExpReallocateBigPageTable(OldIrql, FALSE);
return TRUE;
}
@@ -1626,7 +1657,7 @@ Retry:
// to attempt expanding the table
//
ASSERT(TableSize == PoolBigPageTableSize);
- if (ExpExpandBigPageTable(OldIrql))
+ if (ExpReallocateBigPageTable(OldIrql, FALSE))
{
goto Retry;
}
@@ -1704,8 +1735,20 @@ ExpFindAndRemoveTagBigPages(IN PVOID Va,
// the lock and return the tag that was located
//
InterlockedIncrement((PLONG)&Entry->Va);
- InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
- KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
+
+ ExpPoolBigEntriesInUse--;
+
+ /* If reaching 12.5% of the size (or whatever integer rounding gets us to),
+ * halve the allocation size, which will get us to 25% of space used. */
+ if (ExpPoolBigEntriesInUse < (PoolBigPageTableSize / 8))
+ {
+ /* Shrink the table. */
+ ExpReallocateBigPageTable(OldIrql, TRUE);
+ }
+ else
+ {
+ KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
+ }
return PoolTag;
}