Author: tkreuzer
Date: Sat Mar 7 13:22:42 2015
New Revision: 66598
URL:
http://svn.reactos.org/svn/reactos?rev=66598&view=rev
Log:
[INTRIN]
Add some missing intrinsics for GCC (_InterlockedExchangeAdd8, __incgsqword,
_BitScanForward64, _BitScanReverse64, _bittest64, _bittestandset64, _bittestandreset64,
_bittestandcomplement64, _rotr64, _lrotl, _lrotr, __lzcnt, __lzcnt16, __popcnt,
__popcnt16, __lzcnt64, __popcnt64, __cpuidex, __ud2, __writecr8/__readcr8 (x86))
Remove useless top level const modifiers from parameters (they do only tell the compiler
that the variable cannot be modified, they do not tell the compiler whether the parameter
is a compiler constant!)
Modified:
trunk/reactos/include/crt/intrin.h
trunk/reactos/include/crt/mingw32/intrin_x86.h
Modified: trunk/reactos/include/crt/intrin.h
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/include/crt/intrin.h?rev=6…
==============================================================================
--- trunk/reactos/include/crt/intrin.h [iso-8859-1] (original)
+++ trunk/reactos/include/crt/intrin.h [iso-8859-1] Sat Mar 7 13:22:42 2015
@@ -581,6 +581,7 @@
unsigned long __readcr2(void);
unsigned long __readcr3(void);
unsigned long __readcr4(void);
+unsigned long __readcr8(void);
unsigned int __readdr(unsigned int);
unsigned int __readeflags(void);
unsigned char __readfsbyte(unsigned long);
@@ -589,6 +590,7 @@
void __writecr0(unsigned int);
void __writecr3(unsigned int);
void __writecr4(unsigned int);
+void __writecr8(unsigned int);
void __writedr(unsigned int, unsigned int);
void __writeeflags(unsigned int);
void __writefsbyte(unsigned long, unsigned char);
Modified: trunk/reactos/include/crt/mingw32/intrin_x86.h
URL:
http://svn.reactos.org/svn/reactos/trunk/reactos/include/crt/mingw32/intrin…
==============================================================================
--- trunk/reactos/include/crt/mingw32/intrin_x86.h [iso-8859-1] (original)
+++ trunk/reactos/include/crt/mingw32/intrin_x86.h [iso-8859-1] Sat Mar 7 13:22:42 2015
@@ -70,6 +70,7 @@
#endif
/*** memcopy must be memmove ***/
+void* memmove(void* dest, const void* source, size_t num);
__INTRIN_INLINE void* memcpy(void* dest, const void* source, size_t num)
{
return memmove(dest, source, num);
@@ -129,38 +130,38 @@
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
-__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand)
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * Destination, char
Exchange, char Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
-__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination,
const short Exchange, const short Comperand)
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * Destination, short
Exchange, short Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination, const
long Exchange, const long Comperand)
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * Destination, long
Exchange, long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
-__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const
Destination, void * const Exchange, void * const Comperand)
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * Destination,
void * Exchange, void * Comperand)
{
return (void *)__sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#endif
-__INTRIN_INLINE char _InterlockedExchange8(volatile char * const Target, const char
Value)
+__INTRIN_INLINE char _InterlockedExchange8(volatile char * Target, char Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full
barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
-__INTRIN_INLINE short _InterlockedExchange16(volatile short * const Target, const short
Value)
+__INTRIN_INLINE short _InterlockedExchange16(volatile short * Target, short Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full
barrier */
__sync_synchronize();
@@ -169,14 +170,14 @@
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long
Value)
+__INTRIN_INLINE long _InterlockedExchange(volatile long * Target, long Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full
barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
-__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void *
const Value)
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * Target, void *
Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full
barrier */
__sync_synchronize();
@@ -185,8 +186,8 @@
#endif
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedExchange64(volatile long long * const Target, const
long long Value)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedExchange64(volatile long long * Target, long long
Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full
barrier */
__sync_synchronize();
@@ -194,120 +195,125 @@
}
#endif
-__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * const Addend, const
short Value)
+__INTRIN_INLINE char _InterlockedExchangeAdd8(char volatile * Addend, char Value)
{
return __sync_fetch_and_add(Addend, Value);
}
+__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * Addend, short Value)
+{
+ return __sync_fetch_and_add(Addend, Value);
+}
+
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long
Value)
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * Addend, long Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#endif
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedExchangeAdd64(volatile long long * const Addend,
const long long Value)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedExchangeAdd64(volatile long long * Addend, long
long Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#endif
-__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * value, char mask)
{
return __sync_fetch_and_and(value, mask);
}
-__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * value, short mask)
{
return __sync_fetch_and_and(value, mask);
}
-__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedAnd(volatile long * value, long mask)
{
return __sync_fetch_and_and(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long
long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * value, long long mask)
{
return __sync_fetch_and_and(value, mask);
}
#endif
-__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedOr8(volatile char * value, char mask)
{
return __sync_fetch_and_or(value, mask);
}
-__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedOr16(volatile short * value, short mask)
{
return __sync_fetch_and_or(value, mask);
}
-__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedOr(volatile long * value, long mask)
{
return __sync_fetch_and_or(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long
long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedOr64(volatile long long * value, long long mask)
{
return __sync_fetch_and_or(value, mask);
}
#endif
-__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedXor8(volatile char * value, char mask)
{
return __sync_fetch_and_xor(value, mask);
}
-__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedXor16(volatile short * value, short mask)
{
return __sync_fetch_and_xor(value, mask);
}
-__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedXor(volatile long * value, long mask)
{
return __sync_fetch_and_xor(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long
long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedXor64(volatile long long * value, long long mask)
{
return __sync_fetch_and_xor(value, mask);
}
#endif
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
#endif
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
@@ -315,77 +321,84 @@
#else /* (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 */
-__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination,
const char Exchange, const char Comperand)
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * Destination, char
Exchange, char Comperand)
{
char retval = Comperand;
__asm__("lock; cmpxchgb %b[Exchange], %[Destination]" : [retval]
"+a" (retval) : [Destination] "m" (*Destination), [Exchange]
"q" (Exchange) : "memory");
return retval;
}
-__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination,
const short Exchange, const short Comperand)
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * Destination, short
Exchange, short Comperand)
{
short retval = Comperand;
__asm__("lock; cmpxchgw %w[Exchange], %[Destination]" : [retval]
"+a" (retval) : [Destination] "m" (*Destination), [Exchange]
"q" (Exchange): "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination, const
long Exchange, const long Comperand)
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * Destination, long
Exchange, long Comperand)
{
long retval = Comperand;
__asm__("lock; cmpxchgl %k[Exchange], %[Destination]" : [retval]
"+a" (retval) : [Destination] "m" (*Destination), [Exchange]
"q" (Exchange): "memory");
return retval;
}
-__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const
Destination, void * const Exchange, void * const Comperand)
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * Destination,
void * Exchange, void * Comperand)
{
void * retval = (void *)Comperand;
__asm__("lock; cmpxchgl %k[Exchange], %[Destination]" : [retval]
"=a" (retval) : "[retval]" (retval), [Destination] "m"
(*Destination), [Exchange] "q" (Exchange) : "memory");
return retval;
}
-__INTRIN_INLINE char _InterlockedExchange8(volatile char * const Target, const char
Value)
+__INTRIN_INLINE char _InterlockedExchange8(volatile char * Target, char Value)
{
char retval = Value;
__asm__("xchgb %[retval], %[Target]" : [retval] "+r" (retval) :
[Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE short _InterlockedExchange16(volatile short * const Target, const short
Value)
+__INTRIN_INLINE short _InterlockedExchange16(volatile short * Target, short Value)
{
short retval = Value;
__asm__("xchgw %[retval], %[Target]" : [retval] "+r" (retval) :
[Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long
Value)
+__INTRIN_INLINE long _InterlockedExchange(volatile long * Target, long Value)
{
long retval = Value;
__asm__("xchgl %[retval], %[Target]" : [retval] "+r" (retval) :
[Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void *
const Value)
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * Target, void *
Value)
{
void * retval = Value;
__asm__("xchgl %[retval], %[Target]" : [retval] "+r" (retval) :
[Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const short
Value)
-{
- long retval = Value;
+__INTRIN_INLINE char _InterlockedExchangeAdd8(char volatile * Addend, char Value)
+{
+ char retval = Value;
+ __asm__("lock; xaddb %[retval], %[Addend]" : [retval] "+r" (retval)
: [Addend] "m" (*Addend) : "memory");
+ return retval;
+}
+
+__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * Addend, short Value)
+{
+ short retval = Value;
__asm__("lock; xaddw %[retval], %[Addend]" : [retval] "+r" (retval)
: [Addend] "m" (*Addend) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long
Value)
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * Addend, long Value)
{
long retval = Value;
__asm__("lock; xaddl %[retval], %[Addend]" : [retval] "+r" (retval)
: [Addend] "m" (*Addend) : "memory");
return retval;
}
-__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * value, char mask)
{
char x;
char y;
@@ -402,7 +415,7 @@
return y;
}
-__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * value, short mask)
{
short x;
short y;
@@ -419,7 +432,7 @@
return y;
}
-__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedAnd(volatile long * value, long mask)
{
long x;
long y;
@@ -436,7 +449,7 @@
return y;
}
-__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedOr8(volatile char * value, char mask)
{
char x;
char y;
@@ -453,7 +466,7 @@
return y;
}
-__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedOr16(volatile short * value, short mask)
{
short x;
short y;
@@ -470,7 +483,7 @@
return y;
}
-__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedOr(volatile long * value, long mask)
{
long x;
long y;
@@ -487,7 +500,7 @@
return y;
}
-__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedXor8(volatile char * value, char mask)
{
char x;
char y;
@@ -504,7 +517,7 @@
return y;
}
-__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedXor16(volatile short * value, short mask)
{
short x;
short y;
@@ -521,7 +534,7 @@
return y;
}
-__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedXor(volatile long * value, long mask)
{
long x;
long y;
@@ -538,33 +551,33 @@
return y;
}
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, -1) - 1;
}
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, 1) + 1;
}
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * lpAddend)
{
return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
}
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * lpAddend)
{
return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * lpAddend)
{
return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
}
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * lpAddend)
{
return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
}
@@ -574,14 +587,14 @@
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 &&
defined(__x86_64__)
-__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand)
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * Destination,
long long Exchange, long long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#else
-__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const
Destination, const long long Exchange, const long long Comperand)
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * Destination,
long long Exchange, long long Comperand)
{
long long retval = Comperand;
@@ -600,7 +613,8 @@
#endif
-__INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * const Addend,
const long Value)
+#ifdef __i386__
+__INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * Addend, long
Value)
{
__asm__
(
@@ -615,16 +629,17 @@
return Value;
}
-
-__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long
b)
+#endif /* __i386__ */
+
+__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, long b)
{
unsigned char retval;
__asm__("lock; btrl %[b], %[a]; setb %b[retval]" : [retval] "=q"
(retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
return retval;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE unsigned char _interlockedbittestandreset64(volatile long long * a, const
long long b)
+#if defined(__x86_64__)
+__INTRIN_INLINE unsigned char _interlockedbittestandreset64(volatile long long * a, long
long b)
{
unsigned char retval;
__asm__("lock; btrq %[b], %[a]; setb %b[retval]" : [retval] "=r"
(retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
@@ -632,15 +647,15 @@
}
#endif
-__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, const long b)
+__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, long b)
{
unsigned char retval;
__asm__("lock; btsl %[b], %[a]; setc %b[retval]" : [retval] "=q"
(retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
return retval;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, const
long long b)
+#if defined(__x86_64__)
+__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, long
long b)
{
unsigned char retval;
__asm__("lock; btsq %[b], %[a]; setc %b[retval]" : [retval] "=r"
(retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
@@ -651,7 +666,7 @@
/*** String operations ***/
/* NOTE: we don't set a memory clobber in the __stosX functions because Visual C++
doesn't */
-__INTRIN_INLINE void __stosb(unsigned char * Dest, const unsigned char Data, size_t
Count)
+__INTRIN_INLINE void __stosb(unsigned char * Dest, unsigned char Data, size_t Count)
{
__asm__ __volatile__
(
@@ -661,7 +676,7 @@
);
}
-__INTRIN_INLINE void __stosw(unsigned short * Dest, const unsigned short Data, size_t
Count)
+__INTRIN_INLINE void __stosw(unsigned short * Dest, unsigned short Data, size_t Count)
{
__asm__ __volatile__
(
@@ -671,7 +686,7 @@
);
}
-__INTRIN_INLINE void __stosd(unsigned long * Dest, const unsigned long Data, size_t
Count)
+__INTRIN_INLINE void __stosd(unsigned long * Dest, unsigned long Data, size_t Count)
{
__asm__ __volatile__
(
@@ -681,8 +696,8 @@
);
}
-#ifdef _M_AMD64
-__INTRIN_INLINE void __stosq(unsigned __int64 * Dest, const unsigned __int64 Data, size_t
Count)
+#ifdef __x86_64__
+__INTRIN_INLINE void __stosq(unsigned long long * Dest, unsigned long long Data, size_t
Count)
{
__asm__ __volatile__
(
@@ -723,7 +738,7 @@
);
}
-#ifdef _M_AMD64
+#ifdef __x86_64__
__INTRIN_INLINE void __movsq(unsigned long * Destination, const unsigned long * Source,
size_t Count)
{
__asm__ __volatile__
@@ -735,7 +750,7 @@
}
#endif
-#if defined(_M_AMD64)
+#if defined(__x86_64__)
/*** GS segment addressing ***/
@@ -754,7 +769,7 @@
__asm__ __volatile__("movl %k[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
}
-__INTRIN_INLINE void __writegsqword(unsigned long Offset, unsigned __int64 Data)
+__INTRIN_INLINE void __writegsqword(unsigned long Offset, unsigned long long Data)
{
__asm__ __volatile__("movq %q[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
}
@@ -780,9 +795,9 @@
return value;
}
-__INTRIN_INLINE unsigned __int64 __readgsqword(unsigned long Offset)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readgsqword(unsigned long Offset)
+{
+ unsigned long long value;
__asm__ __volatile__("movq %%gs:%a[Offset], %q[value]" : [value]
"=r" (value) : [Offset] "ir" (Offset));
return value;
}
@@ -802,6 +817,11 @@
__asm__ __volatile__("incl %%gs:%a[Offset]" : : [Offset] "ir"
(Offset) : "memory");
}
+__INTRIN_INLINE void __incgsqword(unsigned long Offset)
+{
+ __asm__ __volatile__("incq %%gs:%a[Offset]" : : [Offset] "ir"
(Offset) : "memory");
+}
+
__INTRIN_INLINE void __addgsbyte(unsigned long Offset, unsigned char Data)
{
__asm__ __volatile__("addb %b[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
@@ -817,12 +837,12 @@
__asm__ __volatile__("addl %k[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
}
-__INTRIN_INLINE void __addgsqword(unsigned long Offset, unsigned __int64 Data)
+__INTRIN_INLINE void __addgsqword(unsigned long Offset, unsigned long long Data)
{
__asm__ __volatile__("addq %k[Data], %%gs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "ir" (Data) : "memory");
}
-#else /* defined(_M_AMD64) */
+#else /* defined(__x86_64__) */
/*** FS segment addressing ***/
@@ -902,25 +922,25 @@
__asm__ __volatile__("addl %k[Data], %%fs:%a[Offset]" : : [Offset]
"ir" (Offset), [Data] "iq" (Data) : "memory");
}
-#endif /* defined(_M_AMD64) */
+#endif /* defined(__x86_64__) */
/*** Bit manipulation ***/
-__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const unsigned
long Mask)
+__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * Index, unsigned long Mask)
{
__asm__("bsfl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask]
"mr" (Mask));
return Mask ? 1 : 0;
}
-__INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * const Index, const unsigned
long Mask)
+__INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * Index, unsigned long Mask)
{
__asm__("bsrl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask]
"mr" (Mask));
return Mask ? 1 : 0;
}
/* NOTE: again, the bizarre implementation follows Visual C++ */
-__INTRIN_INLINE unsigned char _bittest(const long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittest(const long * a, long b)
{
unsigned char retval;
@@ -932,8 +952,20 @@
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned char _bittest64(const __int64 * const a, const __int64 b)
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned char _BitScanForward64(unsigned long * Index, unsigned long long
Mask);
+{
+ __asm__("bsfq %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask]
"mr" (Mask));
+ return Mask ? 1 : 0;
+}
+
+__INTRIN_INLINE unsigned char _BitScanReverse64(unsigned long * Index, unsigned long long
Mask);
+{
+ __asm__("bsrq %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask]
"mr" (Mask));
+ return Mask ? 1 : 0;
+}
+
+__INTRIN_INLINE unsigned char _bittest64(const long long * a, long long b)
{
unsigned char retval;
@@ -946,7 +978,7 @@
}
#endif
-__INTRIN_INLINE unsigned char _bittestandcomplement(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandcomplement(long * a, long b)
{
unsigned char retval;
@@ -958,7 +990,7 @@
return retval;
}
-__INTRIN_INLINE unsigned char _bittestandreset(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandreset(long * a, long b)
{
unsigned char retval;
@@ -970,7 +1002,7 @@
return retval;
}
-__INTRIN_INLINE unsigned char _bittestandset(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandset(long * a, long b)
{
unsigned char retval;
@@ -982,6 +1014,46 @@
return retval;
}
+#ifdef __x86_64__
+
+__INTRIN_INLINE unsigned char _bittestandset64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btsq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b /
64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btsq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a),
[retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+__INTRIN_INLINE unsigned char _bittestandreset64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btrq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b /
64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btrq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a),
[retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+__INTRIN_INLINE unsigned char _bittestandcomplement64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btcq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b /
64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btcq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a),
[retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+#endif
+
__INTRIN_INLINE unsigned char _rotl8(unsigned char value, unsigned char shift)
{
unsigned char retval;
@@ -998,44 +1070,73 @@
__INTRIN_INLINE unsigned int _rotl(unsigned int value, int shift)
{
+ unsigned int retval;
+ __asm__("roll %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
+{
+ unsigned long long retval;
+ __asm__("rolq %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+#else
+__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
+{
+ /* FIXME: this is probably not optimal */
+ return (value << shift) | (value >> (64 - shift));
+}
+#endif
+
+__INTRIN_INLINE unsigned int _rotr(unsigned int value, int shift)
+{
+ unsigned int retval;
+ __asm__("rorl %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
+__INTRIN_INLINE unsigned char _rotr8(unsigned char value, unsigned char shift)
+{
+ unsigned char retval;
+ __asm__("rorb %b[shift], %b[retval]" : [retval] "=qm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
+__INTRIN_INLINE unsigned short _rotr16(unsigned short value, unsigned char shift)
+{
+ unsigned short retval;
+ __asm__("rorw %b[shift], %w[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
+{
+ unsigned long long retval;
+ __asm__("rorq %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+#else
+__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
+{
+ /* FIXME: this is probably not optimal */
+ return (value >> shift) | (value << (64 - shift));
+}
+#endif
+
+__INTRIN_INLINE unsigned long __cdecl _lrotl(unsigned long value, int shift)
+{
unsigned long retval;
__asm__("roll %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned __int64 _rotl64(unsigned __int64 value, int shift)
-{
- unsigned __int64 retval;
- __asm__("rolq %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
- return retval;
-}
-#else
-__INTRIN_INLINE unsigned __int64 _rotl64(unsigned __int64 value, int shift)
-{
- /* FIXME: this is probably not optimal */
- return (value << shift) | (value >> (64 - shift));
-}
-#endif
-
-__INTRIN_INLINE unsigned int _rotr(unsigned int value, int shift)
+__INTRIN_INLINE unsigned long __cdecl _lrotr(unsigned long value, int shift)
{
unsigned long retval;
__asm__("rorl %b[shift], %k[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
- return retval;
-}
-
-__INTRIN_INLINE unsigned char _rotr8(unsigned char value, unsigned char shift)
-{
- unsigned char retval;
- __asm__("rorb %b[shift], %b[retval]" : [retval] "=qm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
- return retval;
-}
-
-__INTRIN_INLINE unsigned short _rotr16(unsigned short value, unsigned char shift)
-{
- unsigned short retval;
- __asm__("rorw %b[shift], %w[retval]" : [retval] "=rm" (retval) :
"[retval]" (value), [shift] "Nc" (shift));
return retval;
}
@@ -1046,7 +1147,7 @@
just confuses it. Also we declare Bit as an int and then truncate it to
match Visual C++ behavior
*/
-__INTRIN_INLINE unsigned long long __ll_lshift(const unsigned long long Mask, const int
Bit)
+__INTRIN_INLINE unsigned long long __ll_lshift(unsigned long long Mask, int Bit)
{
unsigned long long retval = Mask;
@@ -1060,7 +1161,7 @@
return retval;
}
-__INTRIN_INLINE long long __ll_rshift(const long long Mask, const int Bit)
+__INTRIN_INLINE long long __ll_rshift(long long Mask, int Bit)
{
long long retval = Mask;
@@ -1074,7 +1175,7 @@
return retval;
}
-__INTRIN_INLINE unsigned long long __ull_rshift(const unsigned long long Mask, int Bit)
+__INTRIN_INLINE unsigned long long __ull_rshift(unsigned long long Mask, int Bit)
{
unsigned long long retval = Mask;
@@ -1102,18 +1203,18 @@
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value)
-{
- unsigned __int64 retval;
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _byteswap_uint64(unsigned long long value)
+{
+ unsigned long long retval;
__asm__("bswapq %[retval]" : [retval] "=r" (retval) :
"[retval]" (value));
return retval;
}
#else
-__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value)
+__INTRIN_INLINE unsigned long long _byteswap_uint64(unsigned long long value)
{
union {
- unsigned __int64 int64part;
+ unsigned long long int64part;
struct {
unsigned long lowpart;
unsigned long hipart;
@@ -1127,34 +1228,71 @@
}
#endif
+__INTRIN_INLINE unsigned int __lzcnt(unsigned int value)
+{
+ return __builtin_clz(value);
+}
+
+__INTRIN_INLINE unsigned short __lzcnt16(unsigned short value)
+{
+ return __builtin_clz(value);
+}
+
+__INTRIN_INLINE unsigned int __popcnt(unsigned int value)
+{
+ return __builtin_popcount(value);
+}
+
+__INTRIN_INLINE unsigned short __popcnt16(unsigned short value)
+{
+ return __builtin_popcount(value);
+}
+
+#ifdef __x86_64__
+unsigned long long __lzcnt64(unsigned long long value)
+{
+ return __builtin_clzll(value);
+}
+
+unsigned long long __popcnt64(unsigned long long value)
+{
+ return __builtin_popcountll(value);
+}
+#endif
+
/*** 64-bit math ***/
-__INTRIN_INLINE long long __emul(const int a, const int b)
+__INTRIN_INLINE long long __emul(int a, int b)
{
long long retval;
__asm__("imull %[b]" : "=A" (retval) : [a] "a" (a), [b]
"rm" (b));
return retval;
}
-__INTRIN_INLINE unsigned long long __emulu(const unsigned int a, const unsigned int b)
+__INTRIN_INLINE unsigned long long __emulu(unsigned int a, unsigned int b)
{
unsigned long long retval;
__asm__("mull %[b]" : "=A" (retval) : [a] "a" (a), [b]
"rm" (b));
return retval;
}
-#ifdef _M_AMD64
-
-__INTRIN_INLINE __int64 __mulh(__int64 a, __int64 b)
-{
- __int64 retval;
+__INTRIN_INLINE long long __cdecl _abs64(long long value)
+{
+ return (value >= 0) ? value : -value;
+}
+
+#ifdef __x86_64__
+
+__INTRIN_INLINE long long __mulh(long long a, long long b)
+{
+ long long retval;
__asm__("imulq %[b]" : "=d" (retval) : [a] "a" (a), [b]
"rm" (b));
return retval;
}
-__INTRIN_INLINE unsigned __int64 __umulh(unsigned __int64 a, unsigned __int64 b)
-{
- unsigned __int64 retval;
+__INTRIN_INLINE unsigned long long __umulh(unsigned long long a, unsigned long long b)
+{
+ unsigned long long retval;
__asm__("mulq %[b]" : "=d" (retval) : [a] "a" (a), [b]
"rm" (b));
return retval;
}
@@ -1163,21 +1301,21 @@
/*** Port I/O ***/
-__INTRIN_INLINE unsigned char __inbyte(const unsigned short Port)
+__INTRIN_INLINE unsigned char __inbyte(unsigned short Port)
{
unsigned char byte;
__asm__ __volatile__("inb %w[Port], %b[byte]" : [byte] "=a" (byte) :
[Port] "Nd" (Port));
return byte;
}
-__INTRIN_INLINE unsigned short __inword(const unsigned short Port)
+__INTRIN_INLINE unsigned short __inword(unsigned short Port)
{
unsigned short word;
__asm__ __volatile__("inw %w[Port], %w[word]" : [word] "=a" (word) :
[Port] "Nd" (Port));
return word;
}
-__INTRIN_INLINE unsigned long __indword(const unsigned short Port)
+__INTRIN_INLINE unsigned long __indword(unsigned short Port)
{
unsigned long dword;
__asm__ __volatile__("inl %w[Port], %k[dword]" : [dword] "=a"
(dword) : [Port] "Nd" (Port));
@@ -1217,32 +1355,32 @@
);
}
-__INTRIN_INLINE void __outbyte(unsigned short const Port, const unsigned char Data)
+__INTRIN_INLINE void __outbyte(unsigned short Port, unsigned char Data)
{
__asm__ __volatile__("outb %b[Data], %w[Port]" : : [Port] "Nd"
(Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outword(unsigned short const Port, const unsigned short Data)
+__INTRIN_INLINE void __outword(unsigned short Port, unsigned short Data)
{
__asm__ __volatile__("outw %w[Data], %w[Port]" : : [Port] "Nd"
(Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outdword(unsigned short const Port, const unsigned long Data)
+__INTRIN_INLINE void __outdword(unsigned short Port, unsigned long Data)
{
__asm__ __volatile__("outl %k[Data], %w[Port]" : : [Port] "Nd"
(Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outbytestring(unsigned short Port, unsigned char * const Buffer,
const unsigned long Count)
+__INTRIN_INLINE void __outbytestring(unsigned short Port, unsigned char * Buffer,
unsigned long Count)
{
__asm__ __volatile__("rep; outsb" : : [Port] "d" (Port), [Buffer]
"S" (Buffer), "c" (Count));
}
-__INTRIN_INLINE void __outwordstring(unsigned short Port, unsigned short * const Buffer,
const unsigned long Count)
+__INTRIN_INLINE void __outwordstring(unsigned short Port, unsigned short * Buffer,
unsigned long Count)
{
__asm__ __volatile__("rep; outsw" : : [Port] "d" (Port), [Buffer]
"S" (Buffer), "c" (Count));
}
-__INTRIN_INLINE void __outdwordstring(unsigned short Port, unsigned long * const Buffer,
const unsigned long Count)
+__INTRIN_INLINE void __outdwordstring(unsigned short Port, unsigned long * Buffer,
unsigned long Count)
{
__asm__ __volatile__("rep; outsl" : : [Port] "d" (Port), [Buffer]
"S" (Buffer), "c" (Count));
}
@@ -1283,14 +1421,19 @@
/*** System information ***/
-__INTRIN_INLINE void __cpuid(int CPUInfo[], const int InfoType)
+__INTRIN_INLINE void __cpuid(int CPUInfo[4], int InfoType)
{
__asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b"
(CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a"
(InfoType));
}
+__INTRIN_INLINE void __cpuidex(int CPUInfo[4], int InfoType, int ECXValue)
+{
+ __asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b"
(CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a"
(InfoType), "c" (ECXValue));
+}
+
__INTRIN_INLINE unsigned long long __rdtsc(void)
{
-#ifdef _M_AMD64
+#ifdef __x86_64__
unsigned long long low, high;
__asm__ __volatile__("rdtsc" : "=a"(low), "=d"(high));
return low | (high << 32);
@@ -1324,6 +1467,11 @@
}
#endif
+__INTRIN_INLINE void __ud2(void)
+{
+ __asm__("ud2");
+}
+
__INTRIN_INLINE void __int2c(void)
{
__asm__("int $0x2c");
@@ -1353,78 +1501,83 @@
/*** Protected memory management ***/
-#ifdef _M_AMD64
-
-__INTRIN_INLINE void __writecr0(const unsigned __int64 Data)
+#ifdef __x86_64__
+
+__INTRIN_INLINE void __writecr0(unsigned long long Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE void __writecr3(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr3(unsigned long long Data)
{
__asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE void __writecr4(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr4(unsigned long long Data)
{
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE void __writecr8(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr8(unsigned long long Data)
{
__asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE unsigned __int64 __readcr0(void)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readcr0(void)
+{
+ unsigned long long value;
__asm__ __volatile__("mov %%cr0, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr2(void)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readcr2(void)
+{
+ unsigned long long value;
__asm__ __volatile__("mov %%cr2, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr3(void)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readcr3(void)
+{
+ unsigned long long value;
__asm__ __volatile__("mov %%cr3, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr4(void)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readcr4(void)
+{
+ unsigned long long value;
__asm__ __volatile__("mov %%cr4, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr8(void)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long long __readcr8(void)
+{
+ unsigned long long value;
__asm__ __volatile__("movq %%cr8, %q[value]" : [value] "=r"
(value));
return value;
}
-#else /* _M_AMD64 */
-
-__INTRIN_INLINE void __writecr0(const unsigned int Data)
+#else /* __x86_64__ */
+
+__INTRIN_INLINE void __writecr0(unsigned int Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE void __writecr3(const unsigned int Data)
+__INTRIN_INLINE void __writecr3(unsigned int Data)
{
__asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) :
"memory");
}
-__INTRIN_INLINE void __writecr4(const unsigned int Data)
+__INTRIN_INLINE void __writecr4(unsigned int Data)
{
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) :
"memory");
+}
+
+__INTRIN_INLINE void __writecr8(unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) :
"memory");
}
__INTRIN_INLINE unsigned long __readcr0(void)
@@ -1455,13 +1608,20 @@
return value;
}
-#endif /* _M_AMD64 */
-
-#ifdef _M_AMD64
-
-__INTRIN_INLINE unsigned __int64 __readdr(unsigned int reg)
-{
- unsigned __int64 value;
+__INTRIN_INLINE unsigned long __readcr8(void)
+{
+ unsigned long value;
+ __asm__ __volatile__("mov %%cr8, %[value]" : [value] "=r" (value));
+ return value;
+}
+
+#endif /* __x86_64__ */
+
+#ifdef __x86_64__
+
+__INTRIN_INLINE unsigned long long __readdr(unsigned int reg)
+{
+ unsigned long long value;
switch (reg)
{
case 0:
@@ -1492,7 +1652,7 @@
return value;
}
-__INTRIN_INLINE void __writedr(unsigned reg, unsigned __int64 value)
+__INTRIN_INLINE void __writedr(unsigned reg, unsigned long long value)
{
switch (reg)
{
@@ -1523,7 +1683,7 @@
}
}
-#else /* _M_AMD64 */
+#else /* __x86_64__ */
__INTRIN_INLINE unsigned int __readdr(unsigned int reg)
{
@@ -1589,9 +1749,9 @@
}
}
-#endif /* _M_AMD64 */
-
-__INTRIN_INLINE void __invlpg(void * const Address)
+#endif /* __x86_64__ */
+
+__INTRIN_INLINE void __invlpg(void *Address)
{
__asm__("invlpg %[Address]" : : [Address] "m" (*((unsigned char
*)(Address))) : "memory");
}
@@ -1601,7 +1761,7 @@
__INTRIN_INLINE unsigned long long __readmsr(unsigned long reg)
{
-#ifdef _M_AMD64
+#ifdef __x86_64__
unsigned long low, high;
__asm__ __volatile__("rdmsr" : "=a" (low), "=d" (high) :
"c" (reg));
return ((unsigned long long)high << 32) | low;
@@ -1612,9 +1772,9 @@
#endif
}
-__INTRIN_INLINE void __writemsr(const unsigned long Register, const unsigned long long
Value)
-{
-#ifdef _M_AMD64
+__INTRIN_INLINE void __writemsr(unsigned long Register, unsigned long long Value)
+{
+#ifdef __x86_64__
__asm__ __volatile__("wrmsr" : : "a" (Value), "d" (Value
>> 32), "c" (Register));
#else
__asm__ __volatile__("wrmsr" : : "A" (Value), "c"
(Register));
@@ -1629,7 +1789,7 @@
}
/* NOTE: an immediate value for 'a' will raise an ICE in Visual C++ */
-__INTRIN_INLINE unsigned long __segmentlimit(const unsigned long a)
+__INTRIN_INLINE unsigned long __segmentlimit(unsigned long a)
{
unsigned long retval;
__asm__ __volatile__("lsl %[a], %[retval]" : [retval] "=r" (retval)
: [a] "rm" (a));