Author: tkreuzer
Date: Thu Mar 29 10:14:47 2012
New Revision: 56272
URL: http://svn.reactos.org/svn/reactos?rev=56272&view=rev
Log:
[NTOSKRNL]
"while (TRUE);" is probably the worst way of handling critical errors / unhandled failure pathes! Replace that with ASSERT(FALSE);
Modified:
trunk/reactos/ntoskrnl/mm/ARM3/section.c
Modified: trunk/reactos/ntoskrnl/mm/ARM3/section.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/ARM3/section.c…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/ARM3/section.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/ARM3/section.c [iso-8859-1] Thu Mar 29 10:14:47 2012
@@ -391,8 +391,7 @@
if (ProtoPte >= LastProtoPte)
{
/* But we don't handle this yet */
- UNIMPLEMENTED;
- while (TRUE);
+ ASSERT(FALSE);
}
/* The PTE should be completely clear */
@@ -717,8 +716,7 @@
if (*ViewSize > SectionSize)
{
/* We should probably fail. FIXME TODO */
- UNIMPLEMENTED;
- while (TRUE);
+ ASSERT(FALSE);
}
/* Get the number of 64K buckets required for this mapping */
@@ -729,8 +727,7 @@
if (Buckets >= MI_SYSTEM_VIEW_BUCKET_SIZE)
{
/* We should probably fail */
- UNIMPLEMENTED;
- while (TRUE);
+ ASSERT(FALSE);
}
/* Insert this view into system space and get a base address for it */
Author: tkreuzer
Date: Thu Mar 29 10:07:25 2012
New Revision: 56271
URL: http://svn.reactos.org/svn/reactos?rev=56271&view=rev
Log:
[NTOSKRNL/AMD64]
- Only initialize the nonpged pool after the pfn database was initialized
- "Fix" IRQL for the initialization of non paged pool
Modified:
trunk/reactos/ntoskrnl/mm/amd64/init.c
Modified: trunk/reactos/ntoskrnl/mm/amd64/init.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/amd64/init.c?r…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/amd64/init.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/amd64/init.c [iso-8859-1] Thu Mar 29 10:07:25 2012
@@ -351,8 +351,6 @@
MiInitializeNonPagedPool();
MiInitializeNonPagedPoolThresholds();
- /* Initialize the nonpaged pool */
- InitializePool(NonPagedPool, 0);
}
VOID
@@ -655,7 +653,7 @@
PageCount = MxFreeDescriptor->BasePage - BasePage;
MiAddDescriptorToDatabase(BasePage, PageCount, LoaderMemoryData);
- // Reset the descriptor back so we can create the correct memory blocks
+ /* Reset the descriptor back so we can create the correct memory blocks */
*MxFreeDescriptor = MxOldFreeDescriptor;
}
@@ -698,7 +696,16 @@
/* Now process the page tables */
MiBuildPfnDatabaseFromPageTables();
+ /* PFNs are initialized now! */
MiPfnsInitialized = TRUE;
+
+ //KeLowerIrql(OldIrql);
+
+ /* Need to be at DISPATCH_LEVEL for InitializePool */
+ //KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+
+ /* Initialize the nonpaged pool */
+ InitializePool(NonPagedPool, 0);
KeLowerIrql(OldIrql);
Author: arty
Date: Thu Mar 29 06:04:34 2012
New Revision: 56269
URL: http://svn.reactos.org/svn/reactos?rev=56269&view=rev
Log:
[NTOSKRNL]
Small edit.
Modified:
trunk/reactos/ntoskrnl/cache/section/data.c
Modified: trunk/reactos/ntoskrnl/cache/section/data.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/dat…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/data.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/data.c [iso-8859-1] Thu Mar 29 06:04:34 2012
@@ -68,7 +68,7 @@
handling a page fault, we place the swap entry MM_WAIT_ENTRY in the page table
at the fault address (this works on either the section page table or a process
address space), perform any blocking operations required, then replace the
-entry with
+entry.
*/
Author: arty
Date: Thu Mar 29 06:01:52 2012
New Revision: 56268
URL: http://svn.reactos.org/svn/reactos?rev=56268&view=rev
Log:
[NEWCC]
Add some prose describing this functionality.
Dedicated to timo, chongo, goto and ??=
Just formatting and comments.
Modified:
trunk/reactos/ntoskrnl/cache/copysup.c
trunk/reactos/ntoskrnl/cache/fssup.c
trunk/reactos/ntoskrnl/cache/pinsup.c
Modified: trunk/reactos/ntoskrnl/cache/copysup.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/copysup.c?r…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/copysup.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/copysup.c [iso-8859-1] Thu Mar 29 06:01:52 2012
@@ -28,6 +28,16 @@
/* FUNCTIONS ******************************************************************/
+/*
+
+CcCopyRead can be called for a region of any size and alignment, so we must
+crawl the cache space, focusing one cache stripe after another and using
+RtlCopyMemory to copy the input data into the cache. In constrained memory,
+pages faulted into new stripes are often taken from old stripes, causing the
+old stripes to be flushed right away. In the case of many short buffered in
+order writes, like the ones generated by stdio, this can be really efficient.
+
+*/
BOOLEAN
NTAPI
CcCopyRead(IN PFILE_OBJECT FileObject,
Modified: trunk/reactos/ntoskrnl/cache/fssup.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/fssup.c?rev…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/fssup.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/fssup.c [iso-8859-1] Thu Mar 29 06:01:52 2012
@@ -27,6 +27,45 @@
CLIENT_ID CcUnmapThreadId, CcLazyWriteThreadId;
FAST_MUTEX GlobalPageOperation;
+/*
+
+A note about private cache maps.
+
+CcInitializeCacheMap and CcUninitializeCacheMap are not meant to be paired,
+although they can work that way.
+
+The actual operation I've gleaned from reading both jan kratchovil's writing
+and real filesystems is this:
+
+CcInitializeCacheMap means:
+
+Make the indicated FILE_OBJECT have a private cache map if it doesn't already
+and make it have a shared cache map if it doesn't already.
+
+CcUninitializeCacheMap means:
+
+Take away the private cache map from this FILE_OBJECT. If it's the last
+private cache map corresponding to a specific shared cache map (the one that
+was present in the FILE_OBJECT when it was created), then delete that too,
+flusing all cached information.
+
+Using these simple semantics, filesystems can do all the things they actually
+do:
+
+- Copy out the shared cache map pointer from a newly initialized file object
+and store it in the fcb cache.
+- Copy it back into any file object and call CcInitializeCacheMap to make
+that file object be associated with the caching of all the other siblings.
+- Call CcUninitializeCacheMap on a FILE_OBJECT many times, but have only the
+first one count for each specific FILE_OBJECT.
+- Have the actual last call to CcUninitializeCacheMap (that is, the one that
+causes zero private cache maps to be associated with a shared cache map) to
+delete the cache map and flush.
+
+So private cache map here is a light weight structure that just remembers
+what shared cache map it associates with.
+
+ */
typedef struct _NOCC_PRIVATE_CACHE_MAP
{
LIST_ENTRY ListEntry;
@@ -98,6 +137,19 @@
Map->Callbacks.ReleaseFromLazyWrite(Map->LazyContext);
}
+/*
+
+Cc functions are required to treat alternate streams of a file as the same
+for the purpose of caching, meaning that we must be able to find the shared
+cache map associated with the ``real'' stream associated with a stream file
+object, if one exists. We do that by identifying a private cache map in
+our gamut that has the same volume, device and fscontext as the stream file
+object we're holding. It's heavy but it does work. This can probably be
+improved, although there doesn't seem to be any real association between
+a stream file object and a sibling file object in the file object struct
+itself.
+
+ */
// Must have CcpLock()
PFILE_OBJECT CcpFindOtherStreamFileObject(PFILE_OBJECT FileObject)
{
@@ -141,6 +193,8 @@
PNOCC_PRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap;
CcpLock();
+ /* We don't have a shared cache map. First find out if we have a sibling
+ stream file object we can take it from. */
if (!Map && FileObject->Flags & FO_STREAM_FILE)
{
PFILE_OBJECT IdenticalStreamFileObject =
@@ -154,6 +208,7 @@
FileObject, IdenticalStreamFileObject, Map);
}
}
+ /* We still don't have a shared cache map. We need to create one. */
if (!Map)
{
DPRINT("Initializing file object for (%p) %wZ\n", FileObject, &FileObject->FileName);
@@ -170,6 +225,9 @@
InsertTailList(&CcpAllSharedCacheMaps, &Map->Entry);
DPRINT("New Map %x\n", Map);
}
+ /* We don't have a private cache map. Link it with the shared cache map
+ to serve as a held reference. When the list in the shared cache map
+ is empty, we know we can delete it. */
if (!PrivateCacheMap)
{
PrivateCacheMap = ExAllocatePool(NonPagedPool, sizeof(*PrivateCacheMap));
@@ -183,6 +241,14 @@
CcpUnlock();
}
+
+/*
+
+This function is used by NewCC's MM to determine whether any section objects
+for a given file are not cache sections. If that's true, we're not allowed
+to resize the file, although nothing actually prevents us from doing ;-)
+
+ */
ULONG
NTAPI
@@ -210,18 +276,25 @@
ASSERT(UninitializeEvent == NULL);
+ /* It may not be strictly necessary to flush here, but we do just for
+ kicks. */
if (Map)
CcpFlushCache(Map, NULL, 0, NULL, FALSE);
CcpLock();
+ /* We have a private cache map, so we've been initialized and haven't been
+ * uninitialized. */
if (PrivateCacheMap)
{
ASSERT(!Map || Map == PrivateCacheMap->Map);
ASSERT(PrivateCacheMap->FileObject == FileObject);
RemoveEntryList(&PrivateCacheMap->ListEntry);
+ /* That was the last private cache map. It's time to delete all
+ cache stripes and all aspects of caching on the file. */
if (IsListEmpty(&PrivateCacheMap->Map->PrivateCacheMaps))
{
+ /* Get rid of all the cache stripes. */
while (!IsListEmpty(&PrivateCacheMap->Map->AssociatedBcb))
{
PNOCC_BCB Bcb = CONTAINING_RECORD(PrivateCacheMap->Map->AssociatedBcb.Flink, NOCC_BCB, ThisFileList);
@@ -242,9 +315,19 @@
DPRINT("Uninit complete\n");
+ /* The return from CcUninitializeCacheMap means that 'caching was stopped'.
+ */
return LastMap;
}
+/*
+
+CcSetFileSizes is used to tell the cache manager that the file changed
+size. In our case, we use the internal Mm method MmExtendCacheSection
+to notify Mm that our section potentially changed size, which may mean
+truncating off data.
+
+ */
VOID
NTAPI
CcSetFileSizes(IN PFILE_OBJECT FileObject,
@@ -298,6 +381,13 @@
while (TRUE);
}
+/*
+
+This could be implemented much more intelligently by mapping instances
+of a CoW zero page into the affected regions. We just RtlZeroMemory
+for now.
+
+*/
BOOLEAN
NTAPI
CcZeroData(IN PFILE_OBJECT FileObject,
Modified: trunk/reactos/ntoskrnl/cache/pinsup.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/pinsup.c?re…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/pinsup.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/pinsup.c [iso-8859-1] Thu Mar 29 06:01:52 2012
@@ -21,6 +21,73 @@
* This helped me determine that a certain bug was not a memory overwrite. */
//#define PIN_WRITE_ONLY
+/*
+
+Pinsup implements the core of NewCC.
+
+A couple of things about this code:
+
+I wrote this code over the course of about 2 years, often referring to Rajeev
+Nagar's Filesystem Internals, book, the msdn pages on the Cc interface, and
+a few NT filesystems that are open sourced. I went to fairly great lengths to
+achieve a couple of goals.
+
+1) To make a strictly layered facility that relies entirely on Mm to provide
+maps. There were many ways in which data segments in the legacy Mm were unable
+to provide what I needed; page maps were only 4 gig, and all offsets were in
+ULONG, so no mapping at an offset greater than 4 gig was possible. Worse than
+that, due to a convoluted set of dependencies, it would have been impossible to
+support any two mappings farther apart than 4 gig, even if the above was
+corrected. Along with that, the cache system's ownership of some pages was
+integral to the operation of legacy Mm. All of the above problems, along with
+an ambiguity about when the size of a file for mapping purposes is acquired,
+and its inability to allow a file to be resized when any mappings were active
+led me to rewrite data sections (and all other kinds of sections in the
+original version), and use that layer to implement the Cc API without regard
+to any internal, undocumented parts.
+
+2) To write the simplest possible code that implements the Cc interface as
+documented. Again this is without regard to any information that might be
+gained through reverse engineering the real Cc. All conclusions about workings
+of Cc here are mine, any failures are mine, any differences to the documented
+interface were introduced by me due to misreading, misunderstanding or mis
+remembering while implementing the code. I also implemented some obvious, but
+not actually specified behaviors of Cc, for example that each cache stripe is
+represented by a distinct BCB that the user can make decisions about as an
+opaque pointer.
+
+3) To make real filesystems work properly.
+
+So about how it works:
+
+CcCacheSections is the collection of cache sections that are currently mapped.
+The cache ranges which are allocated and contain pages is larger, due to the
+addition of sections containing rmaps and page references, but this array
+determines the actual mapped pages on behalf of all mapped files for Cc's use.
+All BCB pointers yielded to a driver are a pointer to one of these cache stripe
+structures. The data structure is specified as opaque and so it contains
+information convenient to NEWCC's implementation here. Free entries are
+summarized in CcpBitmapBuffer, for which bits are set when the entry may be
+safely evicted and redirected for use by another client. Note that the
+reference count for an evictable cache section will generally be 1, since
+we'll keep a reference to wait for any subsequent mapping of the same stripe.
+We use CcCacheClockHand as a hint to start checking free bits at a point that
+walks around the cache stripe list, so that we might evict a different stripe
+every time even if all are awaiting reuse. This is a way to avoid thrashing.
+
+CcpBitmapBuffer is the RTL_BITMAP that allows us to quickly decide what buffer
+to allocate from the mapped buffer set.
+
+CcDeleteEvent is an event used to wait for a cache stripe reference count to
+go to 1, thus making the stripe eligible for eviction. It's used by CcpMapData
+to wait for a free map when we can't fail.
+
+All in all, use of Mm by Cc makes this code into a simple manager that wields
+sections on behalf of filesystems. As such, its code is fairly high level and
+no architecture specific changes should be necessary.
+
+*/
+
/* GLOBALS ********************************************************************/
#define TAG_MAP_SEC TAG('C', 'c', 'S', 'x')
@@ -54,6 +121,14 @@
PDEVICE_OBJECT
NTAPI
MmGetDeviceObjectForFile(IN PFILE_OBJECT FileObject);
+
+/*
+
+Allocate an almost ordinary section object for use by the cache system.
+The special internal SEC_CACHE flag is used to indicate that the section
+should not count when determining whether the file can be resized.
+
+*/
NTSTATUS CcpAllocateSection
(PFILE_OBJECT FileObject,
@@ -94,6 +169,14 @@
BOOLEAN Dirty;
} WORK_QUEUE_WITH_CONTEXT, *PWORK_QUEUE_WITH_CONTEXT;
+/*
+
+Unmap a cache stripe. Note that cache stripes aren't unmapped when their
+last reference disappears. We enter this code only if cache for the file
+is uninitialized in the last file object, or a cache stripe is evicted.
+
+*/
+
VOID
CcpUnmapCache(PVOID Context)
{
@@ -104,6 +187,20 @@
ExFreePool(WorkItem);
DPRINT("Done\n");
}
+
+/*
+
+Somewhat deceptively named function which removes the last reference to a
+cache stripe and completely removes it using CcUnmapCache. This may be
+done either inline (if the Immediate BOOLEAN is set), or using a work item
+at a later time. Whether this is called to unmap immeidately is mainly
+determined by whether the caller is calling from a place in filesystem code
+where a deadlock may occur if immediate flushing is required.
+
+It's always safe to reuse the Bcb at CcCacheSections[Start] after calling
+this.
+
+ */
/* Must have acquired the mutex */
VOID CcpDereferenceCache(ULONG Start, BOOLEAN Immediate)
@@ -186,6 +283,18 @@
DPRINT("Done\n");
}
+/*
+
+CcpAllocateCacheSections is called by CcpMapData to obtain a cache stripe,
+possibly evicting an old stripe by calling CcpDereferenceCache in order to
+obtain an empty Bcb.
+
+This function was named plural due to a question I had at the beginning of
+this endeavor about whether a map may span a 256k stripe boundary. It can't
+so this function can only return the index of one Bcb. Returns INVALID_CACHE
+on failure.
+
+ */
/* Needs mutex */
ULONG CcpAllocateCacheSections
(PFILE_OBJECT FileObject,
@@ -198,12 +307,12 @@
DPRINT("AllocateCacheSections: FileObject %x\n", FileObject);
if (!FileObject->SectionObjectPointer)
- return INVALID_CACHE;
+ return INVALID_CACHE;
Map = (PNOCC_CACHE_MAP)FileObject->SectionObjectPointer->SharedCacheMap;
if (!Map)
- return INVALID_CACHE;
+ return INVALID_CACHE;
DPRINT("Allocating Cache Section\n");
@@ -212,34 +321,34 @@
if (i != INVALID_CACHE)
{
- DPRINT("Setting up Bcb #%x\n", i);
-
- Bcb = &CcCacheSections[i];
+ DPRINT("Setting up Bcb #%x\n", i);
+
+ Bcb = &CcCacheSections[i];
- ASSERT(Bcb->RefCount < 2);
-
- if (Bcb->RefCount > 0)
- {
- CcpDereferenceCache(i, FALSE);
- }
-
- ASSERT(!Bcb->RefCount);
- Bcb->RefCount = 1;
-
- DPRINT("Bcb #%x RefCount %d\n", Bcb - CcCacheSections, Bcb->RefCount);
-
- if (!RtlTestBit(CcCacheBitmap, i))
- {
- DPRINT1("Somebody stoeled BCB #%x\n", i);
- }
- ASSERT(RtlTestBit(CcCacheBitmap, i));
-
- DPRINT("Allocated #%x\n", i);
- ASSERT(CcCacheSections[i].RefCount);
+ ASSERT(Bcb->RefCount < 2);
+
+ if (Bcb->RefCount > 0)
+ {
+ CcpDereferenceCache(i, FALSE);
+ }
+
+ ASSERT(!Bcb->RefCount);
+ Bcb->RefCount = 1;
+
+ DPRINT("Bcb #%x RefCount %d\n", Bcb - CcCacheSections, Bcb->RefCount);
+
+ if (!RtlTestBit(CcCacheBitmap, i))
+ {
+ DPRINT1("Somebody stoeled BCB #%x\n", i);
+ }
+ ASSERT(RtlTestBit(CcCacheBitmap, i));
+
+ DPRINT("Allocated #%x\n", i);
+ ASSERT(CcCacheSections[i].RefCount);
}
else
{
- DPRINT1("Failed to allocate cache segment\n");
+ DPRINT1("Failed to allocate cache segment\n");
}
return i;
}
@@ -262,6 +371,14 @@
Bcb->ExclusiveWaiter++;
}
+/*
+
+Cache stripes have an idea of exclusive access, which would be hard to support
+properly in the previous code. In our case, it's fairly easy, since we have
+an event that indicates that the previous exclusive waiter has returned in each
+Bcb.
+
+*/
/* Must not have the mutex */
VOID CcpReferenceCacheExclusive(ULONG Start)
{
@@ -277,7 +394,19 @@
CcpUnlock();
}
-/* Find a map that encompasses the target range */
+/*
+
+Find a map that encompasses the target range. This function does not check
+whether the desired range is partly outside the stripe. This could be
+implemented with a generic table, but we generally aren't carring around a lot
+of segments at once for a particular file.
+
+When this returns a map for a given file address, then that address is by
+definition already mapped and can be operated on.
+
+Returns a valid index or INVALID_CACHE.
+
+*/
/* Must have the mutex */
ULONG CcpFindMatchingMap(PLIST_ENTRY Head, PLARGE_INTEGER FileOffset, ULONG Length)
{
@@ -301,6 +430,14 @@
return INVALID_CACHE;
}
+
+/*
+
+Internal function that's used by all pinning functions.
+It causes a mapped region to exist and prefaults the pages in it if possible,
+possibly evicting another stripe in order to get our stripe.
+
+*/
BOOLEAN
NTAPI
@@ -364,6 +501,11 @@
DPRINT("File size %08x%08x\n", Map->FileSizes.ValidDataLength.HighPart, Map->FileSizes.ValidDataLength.LowPart);
+ /* Not all files have length, in fact filesystems often use stream file
+ objects for various internal purposes and are loose about the file
+ length, since the filesystem promises itself to write the right number
+ of bytes to the internal stream. In these cases, we just allow the file
+ to have the full stripe worth of space. */
if (Map->FileSizes.ValidDataLength.QuadPart)
{
SectionSize = min(CACHE_STRIPE, Map->FileSizes.ValidDataLength.QuadPart - Target.QuadPart);
@@ -378,6 +520,8 @@
//ASSERT(SectionSize <= CACHE_STRIPE);
CcpUnlock();
+ /* CcpAllocateSection doesn't need the lock, so we'll give other action
+ a chance in here. */
Status = CcpAllocateSection
(FileObject,
SectionSize,
@@ -399,8 +543,9 @@
retry:
/* Returns a reference */
- DPRINT("Allocating cache sections: %wZ\n", &FileObject->FileName);
+ DPRINT("Allocating cache sections: %wZ\n", &FileObject->FileName);
BcbHead = CcpAllocateCacheSections(FileObject, SectionObject);
+ /* XXX todo: we should handle the immediate fail case here, but don't */
if (BcbHead == INVALID_CACHE)
{
ULONG i;
@@ -429,12 +574,18 @@
ViewSize = CACHE_STRIPE;
Bcb = &CcCacheSections[BcbHead];
+ /* MmMapCacheViewInSystemSpaceAtOffset is one of three methods of Mm
+ that are specific to NewCC. In this case, it's implementation
+ exactly mirrors MmMapViewInSystemSpace, but allows an offset to
+ be specified. */
Status = MmMapCacheViewInSystemSpaceAtOffset
(SectionObject->Segment,
&Bcb->BaseAddress,
&Target,
&ViewSize);
+ /* Summary: Failure. Dereference our section and tell the user we failed
+ */
if (!NT_SUCCESS(Status))
{
*BcbResult = NULL;
@@ -447,6 +598,9 @@
goto cleanup;
}
+ /* Summary: Success. Put together a valid Bcb and link it with the others
+ * in the NOCC_CACHE_MAP.
+ */
Success = TRUE;
//DPRINT("w1n\n");
@@ -539,6 +693,8 @@
return Result;
}
+/* Used by functions that repin data, CcpPinMappedData does not alter the map,
+ but finds the appropriate stripe and update the accounting. */
BOOLEAN
NTAPI
CcpPinMappedData(IN PNOCC_CACHE_MAP Map,
@@ -707,6 +863,32 @@
return Result;
}
+
+/*
+
+CcpUnpinData is the internal function that generally handles unpinning data.
+It may be a little confusing, because of the way reference counts are handled.
+
+A reference count of 2 or greater means that the stripe is still fully pinned
+and can't be removed. If the owner had taken an exclusive reference, then
+give one up. Note that it's an error to take more than one exclusive reference
+or to take a non-exclusive reference after an exclusive reference, so detecting
+or handling that case is not considered.
+
+ReleaseBit is unset if we want to detect when a cache stripe would become
+evictable without actually giving up our reference. We might want to do that
+if we were going to flush before formally releasing the cache stripe, although
+that facility is not used meaningfully at this time.
+
+A reference count of exactly 1 means that the stripe could potentially be
+reused, but could also be evicted for another mapping. In general, most
+stripes should be in that state most of the time.
+
+A reference count of zero means that the Bcb is completely unused. That's the
+start state and the state of a Bcb formerly owned by a file that is
+uninitialized.
+
+*/
BOOLEAN
NTAPI
Author: arty
Date: Thu Mar 29 04:43:44 2012
New Revision: 56267
URL: http://svn.reactos.org/svn/reactos?rev=56267&view=rev
Log:
[NTOSKRKNL]
Write a lot of general prose about the operation of the Mm parts of NewCC.
Dedicate to timo and chongo.
No code changes.
Modified:
trunk/reactos/ntoskrnl/cache/section/data.c
trunk/reactos/ntoskrnl/cache/section/fault.c
trunk/reactos/ntoskrnl/cache/section/io.c
trunk/reactos/ntoskrnl/cache/section/reqtools.c
trunk/reactos/ntoskrnl/cache/section/sptab.c
trunk/reactos/ntoskrnl/cache/section/swapout.c
trunk/reactos/ntoskrnl/mm/rmap.c
Modified: trunk/reactos/ntoskrnl/cache/section/data.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/dat…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/data.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/data.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -42,6 +42,36 @@
* Herve Poussineau
*/
+/*
+
+A note on this code:
+
+Unlike the previous section code, this code does not rely on an active map
+for a page to exist in a data segment. Each mapping contains a large integer
+offset to map at, and the segment always represents the entire section space
+from zero to the maximum long long. This allows us to associate one single
+page map with each file object, and to let each mapping view an offset into
+the overall mapped file. Temporarily unmapping the file has no effect on the
+section membership.
+
+This necessitates a change in the section page table implementation, which is
+now an RtlGenericTable. This will be elaborated more in sptab.c. One upshot
+of this change is that a mapping of a small files takes a bit more than 1/4
+of the size in nonpaged kernel space as it did previously.
+
+When we need other threads that may be competing for the same page fault to
+wait, we have a mechanism seperate from PageOps for dealing with that, which
+was suggested by Travis Geiselbrecht after a conversation I had with Alex
+Ionescu. That mechanism is the MM_WAIT_ENTRY, which is the all-ones SWAPENTRY.
+
+When we wish for other threads to know that we're waiting and will finish
+handling a page fault, we place the swap entry MM_WAIT_ENTRY in the page table
+at the fault address (this works on either the section page table or a process
+address space), perform any blocking operations required, then replace the
+entry with
+
+*/
+
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
@@ -163,6 +193,17 @@
return STATUS_SUCCESS;
}
+/*
+
+MiFlushMappedSection
+
+Called from cache code to cause dirty pages of a section
+to be written back. This doesn't affect the mapping.
+
+BaseOffset is the base at which to start writing in file space.
+FileSize is the length of the file as understood by the cache.
+
+ */
NTSTATUS
NTAPI
_MiFlushMappedSection(PVOID BaseAddress,
@@ -299,6 +340,13 @@
return Status;
}
+/*
+
+This deletes a segment entirely including its page map.
+It must have been unmapped in every address space.
+
+ */
+
VOID
NTAPI
MmFinalizeSegment(PMM_SECTION_SEGMENT Segment)
@@ -346,7 +394,7 @@
ULONG AllocationAttributes,
PFILE_OBJECT FileObject)
/*
- * Create a section backed by a data file
+ * Create a section backed by a data file.
*/
{
PROS_SECTION_OBJECT Section;
@@ -641,6 +689,13 @@
return STATUS_SUCCESS;
}
+/*
+
+Completely remove the page at FileOffset in Segment. The page must not
+be mapped.
+
+*/
+
VOID
NTAPI
MiFreeSegmentPage(PMM_SECTION_SEGMENT Segment,
Modified: trunk/reactos/ntoskrnl/cache/section/fault.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/fau…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/fault.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/fault.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -43,6 +43,34 @@
* Herve Poussineau
*/
+/*
+
+I've generally organized fault handling code in newmm as handlers that run
+under a single lock acquisition, check the state, and either take necessary
+action atomically, or place a wait entry and return a continuation to the
+caller. This lends itself to code that has a simple, structured form,
+doesn't make assumptions about lock taking and breaking, and provides an
+obvious, graphic seperation between code that may block and code that isn't
+allowed to. This file contains the non-blocking half.
+
+In order to request a blocking operation to happen outside locks, place a
+function pointer in the provided MM_REQUIRED_RESOURCES struct and return
+STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
+provided struct and take action outside of any mm related locks and at
+PASSIVE_LEVEL. The same fault handler will be called again after the
+blocking operation succeeds. In this way, the fault handler can accumulate
+state, but will freely work while competing with other threads.
+
+Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
+table they're using and return STATUS_SUCCESS + 1 if it's found. In that
+case, the caller will wait on the wait entry event until the competing thread
+is finished, and recall this handler in the current thread.
+
+Another thing to note here is that we require mappings to exactly mirror
+rmaps, so each mapping should be immediately followed by an rmap addition.
+
+*/
+
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
@@ -55,6 +83,22 @@
extern KEVENT MmWaitPageEvent;
extern PMMWSL MmWorkingSetList;
+
+/*
+
+Multiple stage handling of a not-present fault in a data section.
+
+Required->State is used to accumulate flags that indicate the next action
+the handler should take.
+
+State & 2 is currently used to indicate that the page acquired by a previous
+callout is a global page to the section and should be placed in the section
+page table.
+
+Note that the primitive tail recursion done here reaches the base case when
+the page is present.
+
+*/
NTSTATUS
NTAPI
@@ -168,6 +212,10 @@
}
else if (MM_IS_WAIT_PTE(Entry))
{
+ // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
+ // ask the fault handler to wait until we should continue. Rathern
+ // than recopy this boilerplate code everywhere, we just ask them
+ // to wait.
MmUnlockSectionSegment(Segment);
return STATUS_SUCCESS + 1;
}
@@ -254,6 +302,18 @@
MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
return STATUS_SUCCESS;
}
+
+/*
+
+This function is deceptively named, in that it does the actual work of handling
+access faults on data sections. In the case of the code that's present here,
+we don't allow cow sections, but we do need this to unset the initial
+PAGE_READONLY condition of pages faulted into the cache so that we can add
+a dirty bit in the section page table on the first modification.
+
+In the ultimate form of this code, CoW is reenabled.
+
+*/
NTSTATUS
NTAPI
@@ -344,6 +404,8 @@
else
return STATUS_SUCCESS; // Nonwait swap entry ... handle elsewhere
}
+ /* Call out to acquire a page to copy to. We'll be re-called when
+ * the page has been allocated. */
Required->Page[1] = MmGetPfnForProcess(Process, Address);
Required->Consumer = MC_CACHE;
Required->Amount = 1;
@@ -402,6 +464,15 @@
KEVENT Wait;
AcquireResource DoAcquisition;
} WORK_QUEUE_WITH_CONTEXT, *PWORK_QUEUE_WITH_CONTEXT;
+
+/*
+
+This is the work item used do blocking resource acquisition when a fault
+handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
+acquisition to take place on a different stack, and outside of any locks used
+by fault handling, making recursive fault handling possible when required.
+
+*/
VOID
NTAPI
@@ -414,6 +485,38 @@
DPRINT("Status %x\n", WorkItem->Status);
KeSetEvent(&WorkItem->Wait, IO_NO_INCREMENT, FALSE);
}
+
+/*
+
+This code seperates the action of fault handling into an upper and lower
+handler to allow the inner handler to optionally be called in work item
+if the stack is getting too deep. My experiments show that the third
+recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
+worker thread. In the ultimate form of this code, the primary fault handler
+makes this decision by using a thread-local counter to detect a too-deep
+fault stack and call the inner fault handler in a worker thread if required.
+
+Note that faults are taken at passive level and have access to ordinary
+driver entry points such as those that read and write files, and filesystems
+should use paged structures whenever possible. This makes recursive faults
+both a perfectly normal occurrance, and a worthwhile case to handle.
+
+The code below will repeatedly call MiCowSectionPage as long as it returns
+either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
+processing required case, we call out to a blocking resource acquisition
+function and then recall the faut handler with the shared state represented
+by the MM_REQUIRED_RESOURCES struct.
+
+In the other case, we wait on the wait entry event and recall the handler.
+Each time the wait entry event is signalled, one thread has removed an
+MM_WAIT_ENTRY from a page table.
+
+In the ultimate form of this code, there is a single system wide fault handler
+for each of access fault and not present and each memory area contains a
+function pointer that indicates the active fault handler. Since the mm code
+in reactos is currently fragmented, I didn't bring this change to trunk.
+
+*/
NTSTATUS
NTAPI
@@ -564,6 +667,17 @@
return Status;
}
+/*
+
+This is the outer fault handler mentioned in the description of
+MmpSectionAccsesFaultInner. It increments a fault depth count in the current
+thread.
+
+In the ultimate form of this code, the lower fault handler will optionally
+use the count to keep the kernel stack from overflowing.
+
+*/
+
NTSTATUS
NTAPI
MmAccessFaultCacheSection(KPROCESSOR_MODE Mode,
@@ -612,6 +726,16 @@
return Status;
}
+
+/*
+
+As above, this code seperates the active part of fault handling from a carrier
+that can use the thread's active fault count to determine whether a work item
+is required. Also as above, this function repeatedly calls the active not
+present fault handler until a clear success or failure is received, using a
+return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
+
+*/
NTSTATUS
NTAPI
@@ -765,6 +889,14 @@
return Status;
}
+/*
+
+Call the inner not present fault handler, keeping track of the fault count.
+In the ultimate form of this code, optionally use a worker thread the handle
+the fault in order to sidestep stack overflow in the multiple fault case.
+
+*/
+
NTSTATUS
NTAPI
MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,
Modified: trunk/reactos/ntoskrnl/cache/section/io.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/io.…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/io.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/io.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -64,9 +64,13 @@
return IoGetRelatedDeviceObject(FileObject);
}
-/* Note:
- This completion function is really required. Paging io completion does almost
- nothing, including freeing the mdls. */
+/*
+
+Note:
+This completion function is really required. Paging io completion does almost
+nothing, including freeing the mdls.
+
+*/
NTSTATUS
NTAPI
MiSimpleReadComplete(PDEVICE_OBJECT DeviceObject,
@@ -94,6 +98,15 @@
return STATUS_SUCCESS;
}
+
+/*
+
+MiSimpleRead is a convenience function that provides either paging or non
+paging reads. The caching and mm systems use this in paging mode, where
+a completion function is required as above. The Paging BOOLEAN determines
+whether the read is issued as a paging read or as an ordinary buffered read.
+
+*/
NTSTATUS
NTAPI
@@ -177,6 +190,13 @@
return Status;
}
+/*
+
+Convenience function for writing from kernel space. This issues a paging
+write in all cases.
+
+*/
+
NTSTATUS
NTAPI
_MiSimpleWrite(PFILE_OBJECT FileObject,
@@ -259,6 +279,15 @@
extern KEVENT MpwThreadEvent;
FAST_MUTEX MiWriteMutex;
+/*
+
+Function which uses MiSimpleWrite to write back a single page to a file.
+The page in question does not need to be mapped. This function could be
+made a bit more efficient by avoiding the copy and making a system space
+mdl.
+
+*/
+
NTSTATUS
NTAPI
_MiWriteBackPage(PFILE_OBJECT FileObject,
Modified: trunk/reactos/ntoskrnl/cache/section/reqtools.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/req…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/reqtools.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/reqtools.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -42,6 +42,13 @@
* Herve Poussineau
*/
+/*
+ This file contains functions used by fault.c to do blocking resource
+ acquisition. To call one of these functions, fill out your
+ MM_REQUIRED_RESOURCES with a pointer to the desired function and configure
+ the other members as below.
+ */
+
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
@@ -55,6 +62,22 @@
NTAPI
MmBuildMdlFromPages(PMDL Mdl, PPFN_NUMBER Pages);
+/*
+
+Blocking function to acquire zeroed pages from the balancer.
+
+Upon entry:
+
+Required->Amount: Number of pages to acquire
+Required->Consumer: consumer to charge the page to
+
+Upon return:
+
+Required->Pages[0..Amount]: Allocated pages.
+
+The function fails unless all requested pages can be allocated.
+
+ */
NTSTATUS
NTAPI
MiGetOnePage(PMMSUPPORT AddressSpace,
@@ -85,6 +108,26 @@
return Status;
}
+/*
+
+Blocking function to read (part of) a page from a file.
+
+Upon entry:
+
+Required->Context: a FILE_OBJECT to read
+Required->Consumer: consumer to charge the page to
+Required->FileOffset: Offset to read at
+Required->Amount: Number of bytes to read (0 -> 4096)
+
+Upon return:
+
+Required->Page[Required->Offset]: The allocated and read in page
+
+The indicated page is filled to Required->Amount with file data and zeroed
+afterward.
+
+ */
+
NTSTATUS
NTAPI
MiReadFilePage(PMMSUPPORT AddressSpace,
@@ -158,6 +201,21 @@
return STATUS_SUCCESS;
}
+/*
+
+Blocking function to read a swap page into a memory page.
+
+Upon entry:
+
+Required->Consumer: consumer to charge the page to
+Required->SwapEntry: swap entry to use
+
+Upon return:
+
+Required->Page[Required->Offset]: Populated page
+
+*/
+
NTSTATUS
NTAPI
MiSwapInPage(PMMSUPPORT AddressSpace,
@@ -192,6 +250,22 @@
return Status;
}
+
+/*
+
+A way to write a page without a lock acquired using the same blocking mechanism
+as resource acquisition.
+
+Upon entry:
+
+Required->Page[Required->Offset]: Page to write
+Required->Context: FILE_OBJECT to write to
+Required->FileOffset: offset to write at
+
+This always does a paging write with whole page size. Note that paging IO
+doesn't change the valid data length of a file.
+
+*/
NTSTATUS
NTAPI
Modified: trunk/reactos/ntoskrnl/cache/section/sptab.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/spt…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/sptab.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/sptab.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -22,6 +22,34 @@
*
* PROGRAMMERS: arty
*/
+
+/*
+
+This file implements the section page table. It relies on rtl generic table
+functionality to provide access to 256-page chunks. Calls to
+MiSetPageEntrySectionSegment and MiGetPageEntrySectionSegment must be
+synchronized by holding the segment lock.
+
+Each page table entry is a ULONG as in x86.
+
+Bit 1 is used as a swap entry indication as in the main page table.
+Bit 2 is used as a dirty indication. A dirty page will eventually be written
+back to the file.
+Bits 3-11 are used as a map count in the legacy mm code, Note that zero is
+illegal, as the legacy code does not take advantage of segment rmaps.
+Therefore, every segment page is mapped in at least one address space, and
+MmUnsharePageEntry is quite complicated. In addition, the page may also be
+owned by the legacy cache manager, giving an implied additional reference.
+Upper bits are a PFN_NUMBER.
+
+These functions, in addition to maintaining the segment page table also
+automatically maintain the segment rmap by calling MmSetSectionAssociation
+and MmDeleteSectionAssociation. Segment rmaps are discussed in rmap.c. The
+upshot is that it is impossible to have a page properly registered in a segment
+page table and not also found in a segment rmap that can be found from the
+paging machinery.
+
+*/
/* INCLUDES *****************************************************************/
@@ -232,6 +260,17 @@
return Result;
}
+/*
+
+Destroy the rtl generic table that serves as the section's page table. Call
+the FreePage function for each non-zero entry in the section page table as
+we go. Note that the page table is still techinally valid until after all
+pages are destroyed, as we don't finally destroy the table until we've free
+each slice. There is no order guarantee for deletion of individual elements
+although it's in-order as written now.
+
+*/
+
VOID
NTAPI
MmFreePageTablesSectionSegment(PMM_SECTION_SEGMENT Segment,
@@ -271,6 +310,21 @@
DPRINT("Done\n");
}
+/*
+
+Retrieves the MM_SECTION_SEGMENT and fills in the LARGE_INTEGER Offset given
+by the caller that corresponds to the page specified. This uses
+MmGetSegmentRmap to find the rmap belonging to the segment itself, and uses
+the result as a pointer to a 256-entry page table structure. The rmap also
+includes 8 bits of offset information indication one of 256 page entries that
+the rmap corresponds to. This information together gives us an exact offset
+into the file, as well as the MM_SECTION_SEGMENT pointer stored in the page
+table slice.
+
+NULL is returned is there is no segment rmap for the page.
+
+*/
+
PMM_SECTION_SEGMENT
NTAPI
MmGetSectionAssociation(PFN_NUMBER Page,
Modified: trunk/reactos/ntoskrnl/cache/section/swapout.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/cache/section/swa…
==============================================================================
--- trunk/reactos/ntoskrnl/cache/section/swapout.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/cache/section/swapout.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -43,6 +43,21 @@
* Herve Poussineau
*/
+/*
+
+This file implements page out infrastructure for cache type sections. This
+is implemented a little differently from the legacy mm because mapping in an
+address space and membership in a segment are considered separate.
+
+The general strategy here is to try to remove all mappings as gently as
+possible, then to remove the page entry from the section itself as a final
+step. If at any time during the page out operation, the page is mapped in
+a new address space by a competing thread, the operation will abort before
+the segment page is finally removed, and the page will be naturally faulted
+back into any address spaces required in the normal way.
+
+*/
+
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
@@ -58,6 +73,15 @@
FAST_MUTEX MiGlobalPageOperation;
+/*
+
+MmWithdrawSectionPage removes a page entry from the section segment, replacing
+it with a wait entry. The caller must replace the wait entry with a 0, when
+any required writing is done. The wait entry must remain until the page is
+written to protect against cases where a fault brings a stale copy of the page
+back before writing is complete.
+
+*/
PFN_NUMBER
NTAPI
MmWithdrawSectionPage(PMM_SECTION_SEGMENT Segment,
@@ -119,6 +143,22 @@
return 0;
}
}
+
+/*
+
+This function determines whether the segment holds the very last reference to
+the page being considered and if so, writes it back or discards it as
+approriate. One small niggle here is that we might be holding the last
+reference to the section segment associated with this page. That happens
+when the segment is destroyed at the same time that an active swap operation
+is occurring, and all maps were already withdrawn. In that case, it's our
+responsiblity for finalizing the segment.
+
+Note that in the current code, WriteZero is always TRUE because the section
+always backs a file. In the ultimate form of this code, it also writes back
+pages without necessarily evicting them. In reactos' trunk, this is vestigal.
+
+*/
NTSTATUS
NTAPI
@@ -219,6 +259,20 @@
DPRINT("Status %x\n", Status);
return Status;
}
+
+/*
+
+The slightly misnamed MmPageOutCacheSection removes a page from an address
+space in the manner of fault handlers found in fault.c. In the ultimate form
+of the code, this is one of the function pointers stored in a memory area
+to control how pages in that memory area are managed.
+
+Also misleading is the call to MmReleasePageMemoryConsumer, which releases
+the reference held by this address space only. After all address spaces
+have had MmPageOutCacheSection succeed on them for the indicated page,
+then paging out of a cache page can continue.
+
+*/
NTSTATUS
NTAPI
@@ -257,12 +311,32 @@
MmDeleteVirtualMapping(Process, Address, FALSE, Dirty, &OurPage);
ASSERT(OurPage == Required->Page[0]);
+ /* Note: this releases the reference held by this address space only. */
MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
MmUnlockSectionSegment(Segment);
MiSetPageEvent(Process, Address);
return STATUS_SUCCESS;
}
+
+/*
+
+This function is called by rmap when spare pages are needed by the blancer.
+It attempts first to release the page from every address space in which it
+appears, and, after a final check that no competing thread has mapped the
+page again, uses MmFinalizeSectionPageOut to completely evict the page. If
+that's successful, then a suitable non-page map will be left in the segment
+page table, otherwise, the original page is replaced in the section page
+map. Failure may result from a variety of conditions, but always leaves
+the page mapped.
+
+This code is like the other fault handlers, in that MmPageOutCacheSection has
+the option of returning either STATUS_SUCCESS + 1 to wait for a wait entry
+to disppear or to use the blocking callout facility by returning
+STATUS_MORE_PROCESSING_REQUIRED and placing a pointer to a function from
+reqtools.c in the MM_REQUIRED_RESOURCES struct.
+
+*/
NTSTATUS
NTAPI
Modified: trunk/reactos/ntoskrnl/mm/rmap.c
URL: http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/mm/rmap.c?rev=562…
==============================================================================
--- trunk/reactos/ntoskrnl/mm/rmap.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/mm/rmap.c [iso-8859-1] Thu Mar 29 04:43:44 2012
@@ -418,6 +418,20 @@
KeBugCheck(MEMORY_MANAGEMENT);
}
+/*
+
+Return the process pointer given when a previous call to MmInsertRmap was
+called with a process and address pointer that conform to the segment rmap
+schema. In short, this requires the address part to be 0xffffff00 + n
+where n is between 0 and 255. When such an rmap exists, it specifies a
+segment rmap in which the process part is a pointer to a slice of a section
+page table, and the low 8 bits of the address represent a page index in the
+page table slice. Together, this information is used by
+MmGetSectionAssociation to determine which page entry points to this page in
+the segment page table.
+
+*/
+
PVOID
NTAPI
MmGetSegmentRmap(PFN_NUMBER Page, PULONG RawOffset)
@@ -445,6 +459,12 @@
return NULL;
}
+/*
+
+Remove the section rmap associated with the indicated page, if it exists.
+
+*/
+
VOID
NTAPI
MmDeleteSectionAssociation(PFN_NUMBER Page)