Author: tkreuzer
Date: Sat Jun 4 12:33:54 2011
New Revision: 52078
URL:
http://svn.reactos.org/svn/reactos?rev=52078&view=rev
Log:
[NTOSKRNL/HAL]
- Add explicit memory barriers to KxAcquireSpinLock, KxReleaseSpinLock inline functions
and KeTryToAcquireQueuedSpinLock, KeTryToAcquireQueuedSpinLockRaiseToSynch in the UP case.
This will prevent the compiler from reordering memory access instructions across the
boundaries of these functions, even when being inlined.
- Use the inline functions in x64 spinlock functions, too
Modified:
trunk/reactos/hal/halx86/generic/spinlock.c
trunk/reactos/ntoskrnl/include/internal/spinlock.h
trunk/reactos/ntoskrnl/ke/amd64/spinlock.c
Modified: trunk/reactos/hal/halx86/generic/spinlock.c
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/hal/halx86/generic/spinloc…
==============================================================================
--- trunk/reactos/hal/halx86/generic/spinlock.c [iso-8859-1] (original)
+++ trunk/reactos/hal/halx86/generic/spinlock.c [iso-8859-1] Sat Jun 4 12:33:54 2011
@@ -188,6 +188,10 @@
/* Simply raise to synch */
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
+
/* Always return true on UP Machines */
return TRUE;
}
@@ -208,6 +212,10 @@
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
+
/* Always return true on UP Machines */
return TRUE;
}
Modified: trunk/reactos/ntoskrnl/include/internal/spinlock.h
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/include/internal/…
==============================================================================
--- trunk/reactos/ntoskrnl/include/internal/spinlock.h [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/include/internal/spinlock.h [iso-8859-1] Sat Jun 4 12:33:54
2011
@@ -21,6 +21,10 @@
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
+
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
}
//
@@ -32,6 +36,10 @@
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
+
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
}
#else
Modified: trunk/reactos/ntoskrnl/ke/amd64/spinlock.c
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/ntoskrnl/ke/amd64/spinlock…
==============================================================================
--- trunk/reactos/ntoskrnl/ke/amd64/spinlock.c [iso-8859-1] (original)
+++ trunk/reactos/ntoskrnl/ke/amd64/spinlock.c [iso-8859-1] Sat Jun 4 12:33:54 2011
@@ -23,14 +23,14 @@
KIRQL
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
{
-#ifndef CONFIG_SMP
- KIRQL OldIrql;
- /* Simply raise to dispatch */
- KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
- return OldIrql;
-#else
- UNIMPLEMENTED;
-#endif
+ KIRQL OldIrql;
+
+ /* Raise to sync */
+ KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
+
+ /* Acquire the lock and return */
+ KxAcquireSpinLock(SpinLock);
+ return OldIrql;
}
/*
@@ -40,14 +40,14 @@
NTAPI
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
{
-#ifndef CONFIG_SMP
- KIRQL OldIrql;
- /* Simply raise to dispatch */
+ KIRQL OldIrql;
+
+ /* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
- return OldIrql;
-#else
- UNIMPLEMENTED;
-#endif
+
+ /* Acquire the lock and return */
+ KxAcquireSpinLock(SpinLock);
+ return OldIrql;
}
/*
@@ -58,12 +58,9 @@
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL OldIrql)
{
-#ifndef CONFIG_SMP
- /* Simply lower IRQL back */
+ /* Release the lock and lower IRQL back */
+ KxReleaseSpinLock(SpinLock);
KeLowerIrql(OldIrql);
-#else
- UNIMPLEMENTED;
-#endif
}
/*
@@ -72,14 +69,14 @@
KIRQL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
-#ifndef CONFIG_SMP
- KIRQL OldIrql;
- /* Simply raise to dispatch */
+ KIRQL OldIrql;
+
+ /* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
- return OldIrql;
-#else
- UNIMPLEMENTED;
-#endif
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
+ return OldIrql;
}
/*
@@ -88,14 +85,14 @@
KIRQL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
-#ifndef CONFIG_SMP
- KIRQL OldIrql;
- /* Simply raise to dispatch */
- KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
- return OldIrql;
-#else
- UNIMPLEMENTED;
-#endif
+ KIRQL OldIrql;
+
+ /* Raise to synch */
+ KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
+ return OldIrql;
}
/*
@@ -105,13 +102,17 @@
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
-#ifndef CONFIG_SMP
- /* Simply raise to dispatch */
+ /* Set up the lock */
+ LockHandle->LockQueue.Next = NULL;
+ LockHandle->LockQueue.Lock = SpinLock;
+
+ /* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
-#else
- UNIMPLEMENTED;
-#endif
-}
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
+}
+
/*
* @implemented
@@ -120,13 +121,17 @@
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
-#ifndef CONFIG_SMP
- /* Simply raise to synch */
+ /* Set up the lock */
+ LockHandle->LockQueue.Next = NULL;
+ LockHandle->LockQueue.Lock = SpinLock;
+
+ /* Raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
-#else
- UNIMPLEMENTED;
-#endif
-}
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
+}
+
/*
* @implemented
@@ -135,27 +140,25 @@
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN KIRQL OldIrql)
{
-#ifndef CONFIG_SMP
+ /* Release the lock */
+ KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
+
+ /* Lower IRQL back */
+ KeLowerIrql(OldIrql);
+}
+
+
+/*
+ * @implemented
+ */
+VOID
+KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
+{
/* Simply lower IRQL back */
- KeLowerIrql(OldIrql);
-#else
- UNIMPLEMENTED;
-#endif
-}
-
-/*
- * @implemented
- */
-VOID
-KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
-{
-#ifndef CONFIG_SMP
- /* Simply lower IRQL back */
+ KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
KeLowerIrql(LockHandle->OldIrql);
-#else
- UNIMPLEMENTED;
-#endif
-}
+}
+
/*
* @implemented
@@ -167,11 +170,16 @@
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
+
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
+ ASSERT(FALSE);
#endif
}
@@ -185,11 +193,16 @@
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
+
+ /* Add an explicit memory barrier to prevent the compiler from reordering
+ memory accesses across the borders of spinlocks */
+ _ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
+ ASSERT(FALSE);
#endif
}