Can you explain, why the change from unsigned to signed should make a
difference, when it's used with inline assembly?
And why adding prototypes, when the functions are declared as inline
functions?
Am 10.08.2013 23:17, schrieb akhaldi(a)svn.reactos.org:
Author: akhaldi
Date: Sat Aug 10 21:17:38 2013
New Revision: 59692
URL:
http://svn.reactos.org/svn/reactos?rev=59692&view=rev
Log:
[CRT/INTRIN_X86]
* Fix __ll_rshift() and __ull_rshift(). Thanks to Filip Navara for his help with the
issue.
* Properly mark int64part as unsigned __int6 in _byteswap_uint64().
* Add missing prototypes.
Modified:
trunk/reactos/include/crt/mingw32/intrin_x86.h
Modified: trunk/reactos/include/crt/mingw32/intrin_x86.h
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/include/crt/mingw32/intrin…
==============================================================================
--- trunk/reactos/include/crt/mingw32/intrin_x86.h [iso-8859-1] (original)
+++ trunk/reactos/include/crt/mingw32/intrin_x86.h [iso-8859-1] Sat Aug 10 21:17:38 2013
@@ -88,6 +88,14 @@
/*** Memory barriers ***/
+__INTRIN_INLINE void _ReadWriteBarrier(void);
+__INTRIN_INLINE void _mm_mfence(void);
+__INTRIN_INLINE void _mm_lfence(void);
+__INTRIN_INLINE void _mm_sfence(void);
+#ifdef __x86_64__
+__INTRIN_INLINE void __faststorefence(void);
+#endif
+
__INTRIN_INLINE void _ReadWriteBarrier(void)
{
__asm__ __volatile__("" : : : "memory");
@@ -127,7 +135,46 @@
/*** Atomic operations ***/
+__INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * const Addend,
const long Value);
+__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long
b);
+__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, const long
b);
+#if defined(_M_AMD64)
+__INTRIN_INLINE unsigned char _interlockedbittestandreset64(volatile long long * a,
const long long b);
+__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, const
long long b);
+#endif
+
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
+
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand);
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination,
const short Exchange, const short Comperand);
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination,
const long Exchange, const long Comperand);
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const
Destination, void * const Exchange, void * const Comperand);
+__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long
Value);
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void
* const Value);
+__INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const
short Value);
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long
Value);
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short
mask);
+__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask);
+__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask);
+__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask);
+__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short
mask);
+__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask);
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend);
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend);
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend);
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend);
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedExchange64(volatile long long * const Target,
const long long Value);
+__INTRIN_INLINE long long _InterlockedExchangeAdd64(volatile long long * const Addend,
const long long Value);
+__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long
long mask);
+__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long
long mask);
+__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long
long mask);
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend);
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend);
+#endif
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand)
{
@@ -287,7 +334,33 @@
}
#endif
-#else
+#else /* (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 */
+
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand);
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination,
const short Exchange, const short Comperand);
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination,
const long Exchange, const long Comperand);
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const
Destination, void * const Exchange, void * const Comperand);
+__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long
Value);
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void
* const Value);
+__INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const
short Value);
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long
Value);
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short
mask);
+__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask);
+__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask);
+__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask);
+__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask);
+__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short
mask);
+__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask);
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend);
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend);
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend);
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend);
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend);
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend);
+#endif
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand)
{
@@ -530,10 +603,11 @@
}
#endif
-#endif
+#endif /* (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 */
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
&& defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand);
__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
@@ -541,6 +615,7 @@
#else
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand);
__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand)
{
long long retval = Comperand;
@@ -609,6 +684,19 @@
#endif
/*** String operations ***/
+
+__INTRIN_INLINE void __stosb(unsigned char * Dest, const unsigned char Data, size_t
Count);
+__INTRIN_INLINE void __stosw(unsigned short * Dest, const unsigned short Data, size_t
Count);
+__INTRIN_INLINE void __stosd(unsigned long * Dest, const unsigned long Data, size_t
Count);
+__INTRIN_INLINE void __movsb(unsigned char * Destination, const unsigned char * Source,
size_t Count);
+__INTRIN_INLINE void __movsw(unsigned short * Destination, const unsigned short *
Source, size_t Count);
+__INTRIN_INLINE void __movsd(unsigned long * Destination, const unsigned long * Source,
size_t Count);
+#ifdef _M_AMD64
+__INTRIN_INLINE void __stosq(unsigned __int64 * Dest, const unsigned __int64 Data,
size_t Count);
+__INTRIN_INLINE void __movsq(unsigned long * Destination, const unsigned long * Source,
size_t Count);
+#endif
+
+
/* NOTE: we don't set a memory clobber in the __stosX functions because Visual C++
doesn't */
__INTRIN_INLINE void __stosb(unsigned char * Dest, const unsigned char Data, size_t
Count)
{
@@ -695,7 +783,25 @@
#endif
#if defined(_M_AMD64)
+
/*** GS segment addressing ***/
+
+__INTRIN_INLINE void __writegsbyte(const unsigned long Offset, const unsigned char
Data);
+__INTRIN_INLINE void __writegsword(const unsigned long Offset, const unsigned short
Data);
+__INTRIN_INLINE void __writegsdword(const unsigned long Offset, const unsigned long
Data);
+__INTRIN_INLINE void __writegsqword(const unsigned long Offset, const unsigned __int64
Data);
+__INTRIN_INLINE unsigned char __readgsbyte(const unsigned long Offset);
+__INTRIN_INLINE unsigned short __readgsword(const unsigned long Offset);
+__INTRIN_INLINE unsigned long __readgsdword(const unsigned long Offset);
+__INTRIN_INLINE unsigned __int64 __readgsqword(const unsigned long Offset);
+__INTRIN_INLINE void __incgsbyte(const unsigned long Offset);
+__INTRIN_INLINE void __incgsword(const unsigned long Offset);
+__INTRIN_INLINE void __incgsdword(const unsigned long Offset);
+__INTRIN_INLINE void __addgsbyte(const unsigned long Offset, const unsigned char Data);
+__INTRIN_INLINE void __addgsword(const unsigned long Offset, const unsigned short
Data);
+__INTRIN_INLINE void __addgsdword(const unsigned long Offset, const unsigned int Data);
+__INTRIN_INLINE void __addgsqword(const unsigned long Offset, const unsigned __int64
Data);
+
__INTRIN_INLINE void __writegsbyte(const unsigned long Offset, const unsigned char
Data)
{
@@ -780,8 +886,24 @@
__asm__ __volatile__("addq %k[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
}
-#else
+#else /* defined(_M_AMD64) */
+
/*** FS segment addressing ***/
+
+__INTRIN_INLINE void __writefsbyte(const unsigned long Offset, const unsigned char
Data);
+__INTRIN_INLINE void __writefsword(const unsigned long Offset, const unsigned short
Data);
+__INTRIN_INLINE void __writefsdword(const unsigned long Offset, const unsigned long
Data);
+__INTRIN_INLINE unsigned char __readfsbyte(const unsigned long Offset);
+__INTRIN_INLINE unsigned short __readfsword(const unsigned long Offset);
+__INTRIN_INLINE unsigned long __readfsdword(const unsigned long Offset);
+__INTRIN_INLINE void __incfsbyte(const unsigned long Offset);
+__INTRIN_INLINE void __incfsword(const unsigned long Offset);
+__INTRIN_INLINE void __incfsdword(const unsigned long Offset);
+__INTRIN_INLINE void __addfsbyte(const unsigned long Offset, const unsigned char Data);
+__INTRIN_INLINE void __addfsword(const unsigned long Offset, const unsigned short
Data);
+__INTRIN_INLINE void __addfsdword(const unsigned long Offset, const unsigned int Data);
+
+
__INTRIN_INLINE void __writefsbyte(const unsigned long Offset, const unsigned char
Data)
{
__asm__ __volatile__("movb %b[Data], %%fs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "iq" (Data) : "memory");
@@ -857,10 +979,39 @@
else
__asm__ __volatile__("addl %k[Data], %%fs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "iq" (Data) : "memory");
}
-#endif
+
+#endif /* defined(_M_AMD64) */
/*** Bit manipulation ***/
+
+__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const
unsigned long Mask);
+__INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * const Index, const
unsigned long Mask);
+__INTRIN_INLINE unsigned char _bittest(const long * const a, const long b);
+#ifdef _M_AMD64
+__INTRIN_INLINE unsigned char _bittest64(const __int64 * const a, const __int64 b);
+#endif
+__INTRIN_INLINE unsigned char _bittestandcomplement(long * const a, const long b);
+__INTRIN_INLINE unsigned char _bittestandreset(long * const a, const long b);
+__INTRIN_INLINE unsigned char _bittestandset(long * const a, const long b);
+__INTRIN_INLINE unsigned char _rotl8(unsigned char value, unsigned char shift);
+__INTRIN_INLINE unsigned short _rotl16(unsigned short value, unsigned char shift);
+__INTRIN_INLINE unsigned int _rotl(unsigned int value, int shift);
+__INTRIN_INLINE unsigned int _rotr(unsigned int value, int shift);
+__INTRIN_INLINE unsigned char _rotr8(unsigned char value, unsigned char shift);
+__INTRIN_INLINE unsigned short _rotr16(unsigned short value, unsigned char shift);
+__INTRIN_INLINE unsigned long long __ll_lshift(const unsigned long long Mask, const int
Bit);
+__INTRIN_INLINE long long __ll_rshift(const long long Mask, const int Bit);
+__INTRIN_INLINE unsigned long long __ull_rshift(const unsigned long long Mask, int
Bit);
+__INTRIN_INLINE unsigned short _byteswap_ushort(unsigned short value);
+__INTRIN_INLINE unsigned long _byteswap_ulong(unsigned long value);
+#ifdef _M_AMD64
+__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value);
+#else
+__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value);
+#endif
+
+
__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const
unsigned long Mask)
{
__asm__("bsfl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask]
"mr" (Mask));
@@ -1001,11 +1152,11 @@
__INTRIN_INLINE long long __ll_rshift(const long long Mask, const int Bit)
{
- unsigned long long retval = Mask;
+ long long retval = Mask;
__asm__
(
- "shldl %b[Bit], %%eax, %%edx; sarl %b[Bit], %%eax" :
+ "shrdl %b[Bit], %%edx, %%eax; sarl %b[Bit], %%edx" :
"+A" (retval) :
[Bit] "Nc" ((unsigned char)((unsigned long)Bit) & 0xFF)
);
@@ -1019,7 +1170,7 @@
__asm__
(
- "shrdl %b[Bit], %%eax, %%edx; shrl %b[Bit], %%eax" :
+ "shrdl %b[Bit], %%edx, %%eax; shrl %b[Bit], %%edx" :
"+A" (retval) :
[Bit] "Nc" ((unsigned char)((unsigned long)Bit) & 0xFF)
);
@@ -1052,7 +1203,7 @@
__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value)
{
union {
- __int64 int64part;
+ unsigned __int64 int64part;
struct {
unsigned long lowpart;
unsigned long hipart;
@@ -1067,6 +1218,15 @@
#endif
/*** 64-bit math ***/
+
+__INTRIN_INLINE long long __emul(const int a, const int b);
+__INTRIN_INLINE unsigned long long __emulu(const unsigned int a, const unsigned int b);
+#ifdef _M_AMD64
+__INTRIN_INLINE __int64 __mulh(__int64 a, __int64 b);
+__INTRIN_INLINE unsigned __int64 __umulh(unsigned __int64 a, unsigned __int64 b);
+#endif
+
+
__INTRIN_INLINE long long __emul(const int a, const int b)
{
long long retval;
@@ -1100,6 +1260,27 @@
#endif
/*** Port I/O ***/
+
+__INTRIN_INLINE unsigned char __inbyte(const unsigned short Port);
+__INTRIN_INLINE unsigned short __inword(const unsigned short Port);
+__INTRIN_INLINE unsigned long __indword(const unsigned short Port);
+__INTRIN_INLINE void __inbytestring(unsigned short Port, unsigned char * Buffer,
unsigned long Count);
+__INTRIN_INLINE void __inwordstring(unsigned short Port, unsigned short * Buffer,
unsigned long Count);
+__INTRIN_INLINE void __indwordstring(unsigned short Port, unsigned long * Buffer,
unsigned long Count);
+__INTRIN_INLINE void __outbyte(unsigned short const Port, const unsigned char Data);
+__INTRIN_INLINE void __outword(unsigned short const Port, const unsigned short Data);
+__INTRIN_INLINE void __outdword(unsigned short const Port, const unsigned long Data);
+__INTRIN_INLINE void __outbytestring(unsigned short const Port, const unsigned char *
const Buffer, const unsigned long Count);
+__INTRIN_INLINE void __outwordstring(unsigned short const Port, const unsigned short *
const Buffer, const unsigned long Count);
+__INTRIN_INLINE void __outdwordstring(unsigned short const Port, const unsigned long *
const Buffer, const unsigned long Count);
+__INTRIN_INLINE int _inp(unsigned short Port);
+__INTRIN_INLINE unsigned short _inpw(unsigned short Port);
+__INTRIN_INLINE unsigned long _inpd(unsigned short Port);
+__INTRIN_INLINE int _outp(unsigned short Port, int databyte);
+__INTRIN_INLINE unsigned short _outpw(unsigned short Port, unsigned short dataword);
+__INTRIN_INLINE unsigned long _outpd(unsigned short Port, unsigned long dataword);
+
+
__INTRIN_INLINE unsigned char __inbyte(const unsigned short Port)
{
unsigned char byte;
@@ -1201,7 +1382,7 @@
__INTRIN_INLINE int _outp(unsigned short Port, int databyte)
{
- __outbyte(Port, databyte);
+ __outbyte(Port, (unsigned char)databyte);
return databyte;
}
@@ -1219,6 +1400,13 @@
/*** System information ***/
+
+__INTRIN_INLINE void __cpuid(int CPUInfo [], const int InfoType);
+__INTRIN_INLINE unsigned long long __rdtsc(void);
+__INTRIN_INLINE void __writeeflags(uintptr_t Value);
+__INTRIN_INLINE uintptr_t __readeflags(void);
+
+
__INTRIN_INLINE void __cpuid(int CPUInfo[], const int InfoType)
{
__asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b"
(CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a"
(InfoType));
@@ -1250,9 +1438,16 @@
}
/*** Interrupts ***/
+
+__INTRIN_INLINE void __int2c(void);
+__INTRIN_INLINE void _disable(void);
+__INTRIN_INLINE void _enable(void);
+__INTRIN_INLINE void __halt(void);
+
#ifdef __clang__
#define __debugbreak() __asm__("int $3")
#else
+__INTRIN_INLINE void __debugbreak(void);
__INTRIN_INLINE void __debugbreak(void)
{
__asm__("int $3");
@@ -1281,7 +1476,34 @@
/*** Protected memory management ***/
+__INTRIN_INLINE void __invlpg(void * const Address);
#ifdef _M_AMD64
+__INTRIN_INLINE void __writecr0(const unsigned __int64 Data);
+__INTRIN_INLINE void __writecr3(const unsigned __int64 Data);
+__INTRIN_INLINE void __writecr4(const unsigned __int64 Data);
+__INTRIN_INLINE void __writecr8(const unsigned __int64 Data);
+__INTRIN_INLINE unsigned __int64 __readcr0(void);
+__INTRIN_INLINE unsigned __int64 __readcr2(void);
+__INTRIN_INLINE unsigned __int64 __readcr3(void);
+__INTRIN_INLINE unsigned __int64 __readcr4(void);
+__INTRIN_INLINE unsigned __int64 __readcr8(void);
+__INTRIN_INLINE unsigned __int64 __readdr(unsigned int reg);
+__INTRIN_INLINE void __writedr(unsigned reg, unsigned __int64 value);
+#else /* _M_AMD64 */
+__INTRIN_INLINE void __writecr0(const unsigned int Data);
+__INTRIN_INLINE void __writecr3(const unsigned int Data);
+__INTRIN_INLINE void __writecr4(const unsigned int Data);
+__INTRIN_INLINE unsigned long __readcr0(void);
+__INTRIN_INLINE unsigned long __readcr2(void);
+__INTRIN_INLINE unsigned long __readcr3(void);
+__INTRIN_INLINE unsigned long __readcr4(void);
+__INTRIN_INLINE unsigned int __readdr(unsigned int reg);
+__INTRIN_INLINE void __writedr(unsigned reg, unsigned int value);
+#endif /* _M_AMD64 */
+
+
+#ifdef _M_AMD64
+
__INTRIN_INLINE void __writecr0(const unsigned __int64 Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) :
"memory");
@@ -1336,7 +1558,9 @@
__asm__ __volatile__("movq %%cr8, %q[value]" : [value] "=r"
(value));
return value;
}
-#else
+
+#else /* _M_AMD64 */
+
__INTRIN_INLINE void __writecr0(const unsigned int Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) :
"memory");
@@ -1379,9 +1603,11 @@
__asm__ __volatile__("mov %%cr4, %[value]" : [value] "=r"
(value));
return value;
}
-#endif
+
+#endif /* _M_AMD64 */
#ifdef _M_AMD64
+
__INTRIN_INLINE unsigned __int64 __readdr(unsigned int reg)
{
unsigned __int64 value;
@@ -1445,7 +1671,9 @@
break;
}
}
-#else
+
+#else /* _M_AMD64 */
+
__INTRIN_INLINE unsigned int __readdr(unsigned int reg)
{
unsigned int value;
@@ -1509,7 +1737,8 @@
break;
}
}
-#endif
+
+#endif /* _M_AMD64 */
__INTRIN_INLINE void __invlpg(void * const Address)
{
@@ -1518,6 +1747,16 @@
/*** System operations ***/
+
+__INTRIN_INLINE unsigned long long __readmsr(const int reg);
+__INTRIN_INLINE void __writemsr(const unsigned long Register, const unsigned long long
Value);
+__INTRIN_INLINE unsigned long long __readpmc(const int counter);
+__INTRIN_INLINE unsigned long __segmentlimit(const unsigned long a);
+__INTRIN_INLINE void __wbinvd(void);
+__INTRIN_INLINE void __lidt(void *Source);
+__INTRIN_INLINE void __sidt(void *Destination);
+
+
__INTRIN_INLINE unsigned long long __readmsr(const int reg)
{
#ifdef _M_AMD64
@@ -1572,6 +1811,9 @@
/*** Misc operations ***/
+__INTRIN_INLINE void _mm_pause(void);
+__INTRIN_INLINE void __nop(void);
+
__INTRIN_INLINE void _mm_pause(void)
{
__asm__ __volatile__("pause" : : : "memory");