https://git.reactos.org/?p=reactos.git;a=commitdiff;h=9a07c71eef55f16fd64ad…
commit 9a07c71eef55f16fd64ad01821d7798f8f89944b
Author: Pierre Schweitzer <pierre(a)reactos.org>
AuthorDate: Tue Jan 23 22:56:23 2018 +0100
Commit: Pierre Schweitzer <pierre(a)reactos.org>
CommitDate: Tue Jan 23 23:25:26 2018 +0100
[NTOSKRNL] Implement support for deferred writes in Cc.
Namely, implement CcCanIWrite() (very basic, and likely wrong).
And implement CcDeferWrite() which will queue the write operation.
In CciLazyWriter() (which may be renamed CcWorkerThread() ;-)),
handle the queued write operations one by one. This is likely
not to be accurate, but, given we have only on FS supporting
this for now (NFS / RDBSS / Shares), this is OK.
CORE-14235
---
ntoskrnl/cc/copy.c | 53 ++++++++++++++++++++++++++++++++++++++++--
ntoskrnl/cc/view.c | 35 ++++++++++++++++++++++++++++
ntoskrnl/include/internal/cc.h | 15 ++++++++++++
3 files changed, 101 insertions(+), 2 deletions(-)
diff --git a/ntoskrnl/cc/copy.c b/ntoskrnl/cc/copy.c
index dadae020c7..4c68bf036b 100644
--- a/ntoskrnl/cc/copy.c
+++ b/ntoskrnl/cc/copy.c
@@ -378,6 +378,22 @@ CcCanIWrite (
CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d
Retrying=%d\n",
FileObject, BytesToWrite, Wait, Retrying);
+ /* We cannot write if dirty pages count is above threshold */
+ if (CcTotalDirtyPages > CcDirtyPageThreshold)
+ {
+ return FALSE;
+ }
+
+ /* We cannot write if dirty pages count will bring use above
+ * XXX: Might not be accurate
+ */
+ if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
+ {
+ return FALSE;
+ }
+
+ /* FIXME: Handle per-file threshold */
+
return TRUE;
}
@@ -442,7 +458,7 @@ CcCopyWrite (
}
/*
- * @unimplemented
+ * @implemented
*/
VOID
NTAPI
@@ -454,10 +470,43 @@ CcDeferWrite (
IN ULONG BytesToWrite,
IN BOOLEAN Retrying)
{
+ PROS_DEFERRED_WRITE_CONTEXT Context;
+
CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p
BytesToWrite=%lu Retrying=%d\n",
FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
- PostRoutine(Context1, Context2);
+ /* Try to allocate a context for queueing the write operation */
+ Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(ROS_DEFERRED_WRITE_CONTEXT),
'CcDw');
+ /* If it failed, immediately execute the operation! */
+ if (Context == NULL)
+ {
+ PostRoutine(Context1, Context2);
+ return;
+ }
+
+ /* Otherwise, initialize the context */
+ Context->FileObject = FileObject;
+ Context->PostRoutine = PostRoutine;
+ Context->Context1 = Context1;
+ Context->Context2 = Context2;
+ Context->BytesToWrite = BytesToWrite;
+ Context->Retrying = Retrying;
+
+ /* And queue it */
+ if (Retrying)
+ {
+ /* To the top, if that's a retry */
+ ExInterlockedInsertHeadList(&CcDeferredWrites,
+ &Context->CcDeferredWritesEntry,
+ &CcDeferredWriteSpinLock);
+ }
+ else
+ {
+ /* To the bottom, if that's a first time */
+ ExInterlockedInsertTailList(&CcDeferredWrites,
+ &Context->CcDeferredWritesEntry,
+ &CcDeferredWriteSpinLock);
+ }
}
/*
diff --git a/ntoskrnl/cc/view.c b/ntoskrnl/cc/view.c
index 465fffa595..61036b0d99 100644
--- a/ntoskrnl/cc/view.c
+++ b/ntoskrnl/cc/view.c
@@ -60,9 +60,13 @@ ULONG CcLazyWriteIos = 0;
/* Internal vars (MS):
* - Threshold above which lazy writer will start action
* - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
/* Internal vars (ROS):
* - Event to notify lazy writer to shutdown
@@ -308,6 +312,7 @@ CciLazyWriter(PVOID Unused)
while (TRUE)
{
NTSTATUS Status;
+ PLIST_ENTRY ListEntry;
ULONG Target, Count = 0;
/* One per second or until we have to stop */
@@ -342,6 +347,34 @@ CciLazyWriter(PVOID Unused)
/* Inform people waiting on us that we're done */
KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+ /* Likely not optimal, but let's handle one deferred write now! */
+ ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites,
&CcDeferredWriteSpinLock);
+ if (ListEntry != NULL)
+ {
+ PROS_DEFERRED_WRITE_CONTEXT Context;
+
+ /* Extract the context */
+ Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT,
CcDeferredWritesEntry);
+
+ /* Can we write now? */
+ if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE,
Context->Retrying))
+ {
+ /* Yes! Do it, and destroy the associated context */
+ Context->PostRoutine(Context->Context1, Context->Context2);
+ ExFreePoolWithTag(Context, 'CcDw');
+ }
+ else
+ {
+ /* Otherwise, requeue it, but in tail, so that it doesn't block
others
+ * This is clearly to improve, but given the poor algorithm used now
+ * It's better than nothing!
+ */
+ ExInterlockedInsertTailList(&CcDeferredWrites,
+ &Context->CcDeferredWritesEntry,
+ &CcDeferredWriteSpinLock);
+ }
+ }
}
}
@@ -1358,6 +1391,8 @@ CcInitView (
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
diff --git a/ntoskrnl/include/internal/cc.h b/ntoskrnl/include/internal/cc.h
index cc62381aad..05eb53fec7 100644
--- a/ntoskrnl/include/internal/cc.h
+++ b/ntoskrnl/include/internal/cc.h
@@ -41,6 +41,10 @@
//
extern ULONG CcRosTraceLevel;
extern LIST_ENTRY DirtyVacbListHead;
+extern ULONG CcDirtyPageThreshold;
+extern ULONG CcTotalDirtyPages;
+extern LIST_ENTRY CcDeferredWrites;
+extern KSPIN_LOCK CcDeferredWriteSpinLock;
typedef struct _PF_SCENARIO_ID
{
@@ -193,6 +197,17 @@ typedef struct _ROS_VACB
/* Pointer to the next VACB in a chain. */
} ROS_VACB, *PROS_VACB;
+typedef struct _ROS_DEFERRED_WRITE_CONTEXT
+{
+ LIST_ENTRY CcDeferredWritesEntry;
+ PFILE_OBJECT FileObject;
+ PCC_POST_DEFERRED_WRITE PostRoutine;
+ PVOID Context1;
+ PVOID Context2;
+ ULONG BytesToWrite;
+ BOOLEAN Retrying;
+} ROS_DEFERRED_WRITE_CONTEXT, *PROS_DEFERRED_WRITE_CONTEXT;
+
typedef struct _INTERNAL_BCB
{
/* Lock */