--- trunk/reactos/ntoskrnl/cc/view.c 2005-08-13 13:13:05 UTC (rev 17366)
+++ trunk/reactos/ntoskrnl/cc/view.c 2005-08-13 13:16:16 UTC (rev 17367)
@@ -81,12 +81,81 @@
#error Unknown compiler for alloca intrinsic stack allocation "function"
#endif
+#if defined(DBG) || defined(KDBG)
+static void CcRosCacheSegmentIncRefCount ( PCACHE_SEGMENT cs )
+{
+ ++cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DPRINT1("CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
+ cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+static void CcRosCacheSegmentDecRefCount ( PCACHE_SEGMENT cs )
+{
+ --cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DPRINT1("CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
+ cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+#else
+#define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
+#define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
+#endif
NTSTATUS
CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
/* FUNCTIONS *****************************************************************/
+VOID
+STDCALL
+CcRosTraceCacheMap (
+ PBCB Bcb,
+ BOOLEAN Trace )
+{
+#if defined(DBG) || defined(KDBG)
+ KIRQL oldirql;
+ PLIST_ENTRY current_entry;
+ PCACHE_SEGMENT current;
+
+ if ( !Bcb )
+ return;
+
+ Bcb->Trace = Trace;
+
+ if ( Trace )
+ {
+ DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
+
+ ExAcquireFastMutex(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ while (current_entry != &Bcb->BcbSegmentListHead)
+ {
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ current_entry = current_entry->Flink;
+
+ DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
+ current, current->ReferenceCount, current->Dirty, current->PageOut );
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ ExReleaseFastMutex(&ViewLock);
+ }
+ else
+ {
+ DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
+ }
+
+#else
+ Bcb = Bcb;
+ Trace = Trace;
+#endif
+}
+
NTSTATUS
CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
{
@@ -100,7 +169,7 @@
CacheSegment->Dirty = FALSE;
RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
- CacheSegment->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount ( CacheSegment );
KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
ExReleaseFastMutex(&ViewLock);
}
@@ -243,7 +312,7 @@
ULONG i;
NTSTATUS Status;
- current->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(current);
last = current;
current->PageOut = TRUE;
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
@@ -260,8 +329,8 @@
}
ExAcquireFastMutex(&ViewLock);
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- current->ReferenceCount--;
- current->PageOut = FALSE;
+ CcRosCacheSegmentDecRefCount(current);
+ current->PageOut = FALSE;
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
current_entry = ¤t->CacheSegmentLRUListEntry;
continue;
@@ -315,14 +384,14 @@
CacheSeg->MappedCount++;
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
if (Mapped && CacheSeg->MappedCount == 1)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
if (!WasDirty && CacheSeg->Dirty)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
ExReleaseFastMutex(&ViewLock);
@@ -351,7 +420,7 @@
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
- current->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
ExAcquireFastMutex(¤t->Lock);
return(current);
@@ -387,7 +456,7 @@
else
{
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
@@ -430,14 +499,14 @@
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
if (!WasDirty && NowDirty)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
if (CacheSeg->MappedCount == 0)
{
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
@@ -480,6 +549,12 @@
current->PageOut = FALSE;
current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
current->Bcb = Bcb;
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
+ }
+#endif
current->MappedCount = 0;
current->DirtySegmentListEntry.Flink = NULL;
current->DirtySegmentListEntry.Blink = NULL;
@@ -504,8 +579,17 @@
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
- current->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
+ Bcb,
+ (*CacheSeg),
+ current );
+ }
+#endif
ExReleaseFastMutex(&(*CacheSeg)->Lock);
ExReleaseFastMutex(&ViewLock);
ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
@@ -761,6 +845,12 @@
KIRQL oldIrql;
#endif
DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
+#if defined(DBG) || defined(KDBG)
+ if ( CacheSeg->Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
+ }
+#endif
#ifdef CACHE_BITMAP
RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
@@ -878,7 +968,7 @@
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
ExReleaseFastMutex(¤t->Lock);
- current->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
@@ -953,6 +1043,9 @@
}
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
}
+#if defined(DBG) || defined(KDBG)
+ Bcb->Trace = FALSE;
+#endif
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
ExReleaseFastMutex(&ViewLock);