inline -> __inline
patch by Brezenbak - small modifications by me
Modified: trunk/reactos/apps/utils/pice/module/shell.c
Modified: trunk/reactos/boot/freeldr/freeldr/inffile/inffile.c
Modified: trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c
Modified: trunk/reactos/drivers/fs/cdfs/fsctl.c
Modified: trunk/reactos/drivers/lib/oskittcp/include/memtrack.h
Modified: trunk/reactos/drivers/lib/oskittcp/include/oskitfreebsd.h
Modified: trunk/reactos/drivers/lib/undis/include/ndishack.h
Modified: trunk/reactos/drivers/lib/undis/ndis/compat.c
Modified: trunk/reactos/drivers/net/tcpip/include/memtrack.h
Modified: trunk/reactos/drivers/net/tcpip/include/tcpcore.h
Modified: trunk/reactos/hal/halx86/include/apic.h
Modified: trunk/reactos/hal/halx86/mp/apic.c
Modified: trunk/reactos/include/win32k/float.h
Modified: trunk/reactos/lib/adns/src/internal.h
Modified: trunk/reactos/lib/advapi32/reg/reg.c
Modified: trunk/reactos/lib/advapi32/sec/sid.c
Modified: trunk/reactos/lib/dbghelp/path.c
Modified: trunk/reactos/lib/dbghelp/stabs.c
Modified: trunk/reactos/lib/dbghelp/stackframe.h
Modified: trunk/reactos/lib/dbghelp/symbol.c
Modified: trunk/reactos/lib/devenum/devenum_private.h
Modified: trunk/reactos/lib/kjs/include/jsint.h
Modified: trunk/reactos/lib/kjs/ksrc/alloc.c
Modified: trunk/reactos/lib/kjs/ksrc/main.c
Modified: trunk/reactos/lib/kjs/src/alloc.c
Modified: trunk/reactos/lib/kjs/src/heap.c
Modified: trunk/reactos/lib/kjs/src/main.c
Modified: trunk/reactos/lib/rtl/qsort.c
Modified: trunk/reactos/regtests/shared/regtests.h
Modified: trunk/reactos/subsys/win32k/dib/dib16bpp.c
Modified: trunk/reactos/subsys/win32k/dib/dib32bpp.c
Modified: trunk/reactos/subsys/win32k/dib/dib8bpp.c
Modified: trunk/reactos/subsys/win32k/eng/clip.c
Modified: trunk/reactos/subsys/win32k/eng/xlate.c
Modified: trunk/reactos/subsys/win32k/include/class.h
Modified: trunk/reactos/subsys/win32k/include/msgqueue.h
Modified: trunk/reactos/subsys/win32k/ntuser/class.c
Modified: trunk/reactos/subsys/win32k/ntuser/object.c
Modified: trunk/reactos/subsys/win32k/objects/bitmaps.c
Modified: trunk/reactos/subsys/win32k/objects/gdiobj.c
Modified: trunk/reactos/subsys/win32k/objects/region.c
Modified: trunk/reactos/subsys/win32k/objects/text.c

Modified: trunk/reactos/apps/utils/pice/module/shell.c
--- trunk/reactos/apps/utils/pice/module/shell.c	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/apps/utils/pice/module/shell.c	2005-11-28 23:35:35 UTC (rev 19734)
@@ -293,7 +293,7 @@
 // bNoCtrlKeys()
 //
 //*************************************************************************
-BOOLEAN inline bNoCtrlKeys(void)
+BOOLEAN __inline bNoCtrlKeys(void)
 {
     return (!bControl && !bAlt && !bShift);
 }

Modified: trunk/reactos/boot/freeldr/freeldr/inffile/inffile.c
--- trunk/reactos/boot/freeldr/freeldr/inffile/inffile.c	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/boot/freeldr/freeldr/inffile/inffile.c	2005-11-28 23:35:35 UTC (rev 19734)
@@ -387,7 +387,7 @@
 
 
 /* push the current state on the parser stack */
-inline static void push_state( struct parser *parser, enum parser_state state )
+__inline static void push_state( struct parser *parser, enum parser_state state )
 {
 //  assert( parser->stack_pos < sizeof(parser->stack)/sizeof(parser->stack[0]) );
   parser->stack[parser->stack_pos++] = state;
@@ -395,7 +395,7 @@
 
 
 /* pop the current state */
-inline static void pop_state( struct parser *parser )
+__inline static void pop_state( struct parser *parser )
 {
 //  assert( parser->stack_pos );
   parser->state = parser->stack[--parser->stack_pos];
@@ -403,7 +403,7 @@
 
 
 /* set the parser state and return the previous one */
-inline static enum parser_state set_state( struct parser *parser, enum parser_state state )
+__inline static enum parser_state set_state( struct parser *parser, enum parser_state state )
 {
   enum parser_state ret = parser->state;
   parser->state = state;
@@ -412,14 +412,14 @@
 
 
 /* check if the pointer points to an end of file */
-inline static int is_eof( struct parser *parser, const CHAR *ptr )
+__inline static int is_eof( struct parser *parser, const CHAR *ptr )
 {
   return (ptr >= parser->end || *ptr == CONTROL_Z);
 }
 
 
 /* check if the pointer points to an end of line */
-inline static int is_eol( struct parser *parser, const CHAR *ptr )
+__inline static int is_eol( struct parser *parser, const CHAR *ptr )
 {
   return (ptr >= parser->end ||
 	  *ptr == CONTROL_Z ||

Modified: trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c
--- trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c	2005-11-28 23:35:35 UTC (rev 19734)
@@ -60,7 +60,7 @@
 
 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
 #if defined (L_divdi3) || defined (L_moddi3)
-static inline
+static __inline
 #endif
 DWtype
 __negdi2 (DWtype u)
@@ -514,7 +514,7 @@
 
 #if (defined (L_udivdi3) || defined (L_divdi3) || \
      defined (L_umoddi3) || defined (L_moddi3))
-static inline
+static __inline
 #endif
 UDWtype
 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)

Modified: trunk/reactos/drivers/fs/cdfs/fsctl.c
--- trunk/reactos/drivers/fs/cdfs/fsctl.c	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/fs/cdfs/fsctl.c	2005-11-28 23:35:35 UTC (rev 19734)
@@ -35,7 +35,7 @@
 
 /* FUNCTIONS ****************************************************************/
 
-static inline
+static __inline
 int msf_to_lba (BYTE m, BYTE s, BYTE f)
 {
    return (((m * 60) + s) * 75 + f) - 150;

Modified: trunk/reactos/drivers/lib/oskittcp/include/memtrack.h
--- trunk/reactos/drivers/lib/oskittcp/include/memtrack.h	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/lib/oskittcp/include/memtrack.h	2005-11-28 23:35:35 UTC (rev 19734)
@@ -49,12 +49,12 @@
 #define TrackDump() TrackDumpFL(__FILE__,__LINE__)
 VOID TrackTag( DWORD Tag );
 
-static inline PVOID ExAllocatePoolX( POOL_TYPE type, SIZE_T size, PCHAR File, ULONG Line ) {
+static __inline PVOID ExAllocatePoolX( POOL_TYPE type, SIZE_T size, PCHAR File, ULONG Line ) {
     PVOID Out = ExAllocatePool( type, size );
     if( Out ) TrackWithTag( EXALLOC_TAG, Out, File, Line );
     return Out;
 }
-static inline VOID ExFreePoolX( PVOID Data, PCHAR File, ULONG Line ) {
+static __inline VOID ExFreePoolX( PVOID Data, PCHAR File, ULONG Line ) {
     UntrackFL(File, Line, Data);
     ExFreePool(Data);
 }

Modified: trunk/reactos/drivers/lib/oskittcp/include/oskitfreebsd.h
--- trunk/reactos/drivers/lib/oskittcp/include/oskitfreebsd.h	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/lib/oskittcp/include/oskitfreebsd.h	2005-11-28 23:35:35 UTC (rev 19734)
@@ -16,7 +16,7 @@
 #define bzero(x,y) memset(x,0,y)
 #define bcopy(src,dst,n) memcpy(dst,src,n)
 #ifdef _MSC_VER
-static inline void panic ( const char* fmt, ... )
+static __inline void panic ( const char* fmt, ... )
 {
 	va_list arg;
 	va_start(arg, fmt);

Modified: trunk/reactos/drivers/lib/undis/include/ndishack.h
--- trunk/reactos/drivers/lib/undis/include/ndishack.h	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/lib/undis/include/ndishack.h	2005-11-28 23:35:35 UTC (rev 19734)
@@ -27,7 +27,7 @@
  *	Entry = Pointer to the entry that is inserted in the lookaside list
  */
 static
-inline
+__inline
 VOID
 ExFreeToNPagedLookasideList (
 	IN	PNPAGED_LOOKASIDE_LIST	Lookaside,
@@ -54,7 +54,7 @@
  *	Address of the allocated list entry
  */
 static
-inline
+__inline
 PVOID
 ExAllocateFromNPagedLookasideList (
 	IN	PNPAGED_LOOKASIDE_LIST	Lookaside

Modified: trunk/reactos/drivers/lib/undis/ndis/compat.c
--- trunk/reactos/drivers/lib/undis/ndis/compat.c	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/lib/undis/ndis/compat.c	2005-11-28 23:35:35 UTC (rev 19734)
@@ -73,7 +73,7 @@
 }
 
 static
-inline
+__inline
 PSINGLE_LIST_ENTRY
  PopEntrySList(
 	PSLIST_HEADER	ListHead
@@ -93,7 +93,7 @@
 
 
 static
-inline
+__inline
 VOID
 PushEntrySList (
 	PSLIST_HEADER	ListHead,

Modified: trunk/reactos/drivers/net/tcpip/include/memtrack.h
--- trunk/reactos/drivers/net/tcpip/include/memtrack.h	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/net/tcpip/include/memtrack.h	2005-11-28 23:35:35 UTC (rev 19734)
@@ -40,12 +40,12 @@
 #define TrackDump() TrackDumpFL(__FILE__,__LINE__)
 VOID TrackTag( DWORD Tag );
 
-static inline PVOID ExAllocatePoolX( POOL_TYPE type, SIZE_T size, PCHAR File, ULONG Line ) {
+static __inline PVOID ExAllocatePoolX( POOL_TYPE type, SIZE_T size, PCHAR File, ULONG Line ) {
     PVOID Out = PoolAllocateBuffer( size );
     if( Out ) TrackWithTag( EXALLOC_TAG, Out, File, Line );
     return Out;
 }
-static inline VOID ExFreePoolX( PVOID Data, PCHAR File, ULONG Line ) {
+static __inline VOID ExFreePoolX( PVOID Data, PCHAR File, ULONG Line ) {
     UntrackFL(File, Line, Data);
     PoolFreeBuffer(Data);
 }

Modified: trunk/reactos/drivers/net/tcpip/include/tcpcore.h
--- trunk/reactos/drivers/net/tcpip/include/tcpcore.h	2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/net/tcpip/include/tcpcore.h	2005-11-28 23:35:35 UTC (rev 19734)
@@ -262,7 +262,7 @@
  *	Returns true if the queue is empty, false otherwise.
  */
 
-static inline int skb_queue_empty(struct sk_buff_head *list)
+static __inline int skb_queue_empty(struct sk_buff_head *list)
 {
 	return (list->next == (struct sk_buff *) list);
 }
@@ -275,7 +275,7 @@
  *	to the buffer.
  */
 
-static inline struct sk_buff *skb_get(struct sk_buff *skb)
+static __inline struct sk_buff *skb_get(struct sk_buff *skb)
 {
 	atomic_inc(&skb->users);
 	return skb;
@@ -294,14 +294,14 @@
  *	hit zero.
  */
 
-static inline void kfree_skb(struct sk_buff *skb)
+static __inline void kfree_skb(struct sk_buff *skb)
 {
 	if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
 		__kfree_skb(skb);
 }
 
 /* Use this if you didn't touch the skb state [for fast switching] */
-static inline void kfree_skb_fast(struct sk_buff *skb)
+static __inline void kfree_skb_fast(struct sk_buff *skb)
 {
 	if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
 		kfree_skbmem(skb);
@@ -316,7 +316,7 @@
  *	shared data so must not be written to under normal circumstances.
  */
 
-static inline int skb_cloned(struct sk_buff *skb)
+static __inline int skb_cloned(struct sk_buff *skb)
 {
 	return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
 }
@@ -329,7 +329,7 @@
  *	buffer.
  */
 
-static inline int skb_shared(struct sk_buff *skb)
+static __inline int skb_shared(struct sk_buff *skb)
 {
 	return (atomic_read(&skb->users) != 1);
 }
@@ -348,7 +348,7 @@
  *	NULL is returned on a memory allocation failure.
  */
 
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+static __inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
 {
 	if (skb_shared(skb)) {
 		struct sk_buff *nskb;
@@ -381,7 +381,7 @@
  *	%NULL is returned on a memory allocation failure.
  */
 
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+static __inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
 {
 	struct sk_buff *nskb;
 	if(!skb_cloned(skb))
@@ -405,7 +405,7 @@
  *	volatile. Use with caution.
  */
 
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+static __inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
 {
 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
 	if (list == (struct sk_buff *)list_)
@@ -427,7 +427,7 @@
  *	volatile. Use with caution.
  */
 
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+static __inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
 {
 	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
 	if (list == (struct sk_buff *)list_)
@@ -442,12 +442,12 @@
  *	Return the length of an &sk_buff queue.
  */
 
-static inline __u32 skb_queue_len(struct sk_buff_head *list_)
+static __inline __u32 skb_queue_len(struct sk_buff_head *list_)
 {
 	return(list_->qlen);
 }
 
-static inline void skb_queue_head_init(struct sk_buff_head *list)
+static __inline void skb_queue_head_init(struct sk_buff_head *list)
 {
 	spin_lock_init(&list->lock);
 	list->prev = (struct sk_buff *)list;
@@ -473,7 +473,7 @@
  *	A buffer cannot be placed on two lists at the same time.
  */
 
-static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
 {
 	struct sk_buff *prev, *next;
 
@@ -500,7 +500,7 @@
  *	A buffer cannot be placed on two lists at the same time.
  */
 
-static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
 {
 	unsigned long flags;
 
@@ -521,7 +521,7 @@
  */
 
 
-static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
 {
 	struct sk_buff *prev, *next;
 
@@ -547,7 +547,7 @@
  *	A buffer cannot be placed on two lists at the same time.
  */
 
-static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
 {
 	unsigned long flags;
 
@@ -565,7 +565,7 @@
  *	returned or %NULL if the list is empty.
  */
 
-static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+static __inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
 	struct sk_buff *next, *prev, *result;
 
@@ -594,7 +594,7 @@
  *	returned or %NULL if the list is empty.
  */
 
-static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+static __inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
 {
 	unsigned long flags;
 	struct sk_buff *result;
@@ -609,7 +609,7 @@
  *	Insert a packet on a list.
  */
 
-static inline void __skb_insert(struct sk_buff *newsk,
+static __inline void __skb_insert(struct sk_buff *newsk,
 	struct sk_buff * prev, struct sk_buff *next,
 	struct sk_buff_head * list)
 {
@@ -631,7 +631,7 @@
  *	A buffer cannot be placed on two lists at the same time.
  */
 
-static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
 {
 	unsigned long flags;
 
@@ -644,7 +644,7 @@
  *	Place a packet after a given packet in a list.
  */
 
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
 {
 	__skb_insert(newsk, old, old->next, old->list);
 }
@@ -660,7 +660,7 @@
  */
 
 
-static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
 {
 	unsigned long flags;
 
@@ -674,7 +674,7 @@
  * the list known..
  */
 
-static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+static __inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
 	struct sk_buff * next, * prev;
 
@@ -701,7 +701,7 @@
  *	destroyed.
  */
 
-static inline void skb_unlink(struct sk_buff *skb)
+static __inline void skb_unlink(struct sk_buff *skb)
 {
 	struct sk_buff_head *list = skb->list;
 
@@ -726,7 +726,7 @@
  *	returned or %NULL if the list is empty.
  */
 
-static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+static __inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
 	struct sk_buff *skb = skb_peek_tail(list);
 	if (skb)
@@ -743,7 +743,7 @@
  *	returned or %NULL if the list is empty.
  */
 
-static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+static __inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
 {
 	unsigned long flags;
 	struct sk_buff *result;
@@ -754,12 +754,12 @@
 	return result;
 }
 
-static inline int skb_is_nonlinear(const struct sk_buff *skb)
+static __inline int skb_is_nonlinear(const struct sk_buff *skb)
 {
 	return skb->data_len;
 }
 
-static inline int skb_headlen(const struct sk_buff *skb)
+static __inline int skb_headlen(const struct sk_buff *skb)
 {
 	return skb->len - skb->data_len;
 }
@@ -772,7 +772,7 @@
  *	Add data to an sk_buff
  */
 
-static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
 	unsigned char *tmp=skb->tail;
 	SKB_LINEAR_ASSERT(skb);
@@ -791,7 +791,7 @@
  *	first byte of the extra data is returned.
  */
 
-static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
 {
 #if 0
 	unsigned char *tmp=skb->tail;
@@ -807,7 +807,7 @@
 #endif
 }
 
-static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 {
 	skb->data-=len;
 	skb->len+=len;
@@ -824,7 +824,7 @@
  *	panic. A pointer to the first byte of the extra data is returned.
  */
 
-static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
 {
 #if 0
 	skb->data-=len;
@@ -838,7 +838,7 @@
 #endif
 }
 
-static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+static __inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
 	skb->len-=len;
 	if (skb->len < skb->data_len)
@@ -857,7 +857,7 @@
  *	the old data.
  */
 
-static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len > skb->len)
 		return NULL;
@@ -866,7 +866,7 @@
 
 extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
 
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static __inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len > skb_headlen(skb) &&
 	    __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
@@ -875,14 +875,14 @@
 	return 	skb->data += len;
 }
 
-static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len > skb->len)
 		return NULL;
 	return __pskb_pull(skb,len);
 }
 
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static __inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
 {
 	if (len <= skb_headlen(skb))
 		return 1;
@@ -898,7 +898,7 @@
  *	Return the number of bytes of free space at the head of an &sk_buff.
  */
 
-static inline int skb_headroom(const struct sk_buff *skb)
+static __inline int skb_headroom(const struct sk_buff *skb)
 {
 	return skb->data-skb->head;
 }
@@ -910,7 +910,7 @@
  *	Return the number of bytes of free space at the tail of an sk_buff
  */
 
-static inline int skb_tailroom(const struct sk_buff *skb)
+static __inline int skb_tailroom(const struct sk_buff *skb)
 {
 	return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
 }
@@ -924,7 +924,7 @@
  *	room. This is only allowed for an empty buffer.
  */
 
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+static __inline void skb_reserve(struct sk_buff *skb, unsigned int len)
 {
 	skb->data+=len;
 	skb->tail+=len;
@@ -932,7 +932,7 @@
 
 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
 
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+static __inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 {
 	if (!skb->data_len) {
 		skb->len = len;
@@ -951,7 +951,7 @@
  *	the buffer is already under the length specified it is not modified.
  */
 
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+static __inline void skb_trim(struct sk_buff *skb, unsigned int len)
 {
 	if (skb->len > len) {
 		__skb_trim(skb, len);
@@ -959,7 +959,7 @@
 }
 
 
-static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+static __inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 {
 	if (!skb->data_len) {
 		skb->len = len;
@@ -970,7 +970,7 @@
 	}
 }
 
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+static __inline int pskb_trim(struct sk_buff *skb, unsigned int len)
 {
 	if (len < skb->len)
 		return __pskb_trim(skb, len);
@@ -987,7 +987,7 @@
  */
 
 
-static inline void skb_orphan(struct sk_buff *skb)
+static __inline void skb_orphan(struct sk_buff *skb)
 {
 	if (skb->destructor)
 		skb->destructor(skb);
@@ -1005,7 +1005,7 @@
  */
 
 
-static inline void skb_queue_purge(struct sk_buff_head *list)
+static __inline void skb_queue_purge(struct sk_buff_head *list)
 {
 	struct sk_buff *skb;
 	while ((skb=skb_dequeue(list))!=NULL)
@@ -1022,7 +1022,7 @@
  */
 
 
-static inline void __skb_queue_purge(struct sk_buff_head *list)
+static __inline void __skb_queue_purge(struct sk_buff_head *list)
 {
 	struct sk_buff *skb;
 	while ((skb=__skb_dequeue(list))!=NULL)
@@ -1042,7 +1042,7 @@
  *	%NULL is returned in there is no free memory.
  */
 
-static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+static __inline struct sk_buff *__dev_alloc_skb(unsigned int length,
 					      int gfp_mask)
 {
 	struct sk_buff *skb;
@@ -1066,7 +1066,7 @@
  *	allocates memory it can be called from an interrupt.
  */
 
-static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+static __inline struct sk_buff *dev_alloc_skb(unsigned int length)
 {
 #if 0
 	return __dev_alloc_skb(length, GFP_ATOMIC);
@@ -1088,7 +1088,7 @@
  *	and at least @headroom of space at head.
  */
 
-static inline int
+static __inline int
 skb_cow(struct sk_buff *skb, unsigned int headroom)
 {
 #if 0
@@ -1114,7 +1114,7 @@
  *	is returned and the old skb data released.  */
 int skb_linearize(struct sk_buff *skb, int gfp);
 
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
+static __inline void *kmap_skb_frag(const skb_frag_t *frag)
 {
 #if 0
 #ifdef CONFIG_HIGHMEM
@@ -1129,7 +1129,7 @@
 #endif
 }
 
-static inline void kunmap_skb_frag(void *vaddr)
+static __inline void kunmap_skb_frag(void *vaddr)
 {
 #if 0
 	kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
@@ -1162,13 +1162,13 @@
 extern void skb_add_mtu(int mtu);
 
 #ifdef CONFIG_NETFILTER
-static inline void
+static __inline void
 nf_conntrack_put(struct nf_ct_info *nfct)
 {
 	if (nfct && atomic_dec_and_test(&nfct->master->use))
 		nfct->master->destroy(nfct->master);
 }
-static inline void
+static __inline void
 nf_conntrack_get(struct nf_ct_info *nfct)
 {
 	if (nfct)
@@ -1733,12 +1733,12 @@
 
 #ifdef __KERNEL__
 
-static inline void dst_hold(struct dst_entry * dst)
+static __inline void dst_hold(struct dst_entry * dst)
 {
 	atomic_inc(&dst->__refcnt);
 }
 
-static inline
+static __inline
 struct dst_entry * dst_clone(struct dst_entry * dst)
 {
 	if (dst)
@@ -1746,7 +1746,7 @@
 	return dst;
 }
 
-static inline
+static __inline
 void dst_release(struct dst_entry * dst)
 {
 	if (dst)
@@ -1757,7 +1757,7 @@
 extern void __dst_free(struct dst_entry * dst);
 extern void dst_destroy(struct dst_entry * dst);
 
-static inline
+static __inline
 void dst_free(struct dst_entry * dst)
 {
 	if (dst->obsolete > 1)
@@ -1769,27 +1769,27 @@
 	__dst_free(dst);
 }
 
-static inline void dst_confirm(struct dst_entry *dst)
+static __inline void dst_confirm(struct dst_entry *dst)
 {
 	if (dst)
 		neigh_confirm(dst->neighbour);
 }
 
-static inline void dst_negative_advice(struct dst_entry **dst_p)
+static __inline void dst_negative_advice(struct dst_entry **dst_p)
 {
 	struct dst_entry * dst = *dst_p;
 	if (dst && dst->ops->negative_advice)
 		*dst_p = dst->ops->negative_advice(dst);
 }
 
-static inline void dst_link_failure(struct sk_buff *skb)
+static __inline void dst_link_failure(struct sk_buff *skb)
 {
 	struct dst_entry * dst = skb->dst;
 	if (dst && dst->ops && dst->ops->link_failure)
 		dst->ops->link_failure(skb);
 }
 
-static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+static __inline void dst_set_expires(struct dst_entry *dst, int timeout)
 {
 	unsigned long expires = jiffies + timeout;
 
@@ -1940,7 +1940,7 @@
 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
 
 /* These are AF independent. */
-static __inline__ int tcp_bhashfn(__u16 lport)
+static __inline int tcp_bhashfn(__u16 lport)
 {
 	return (lport & (tcp_bhash_size - 1));
 }
@@ -1992,7 +1992,7 @@
 
 extern kmem_cache_t *tcp_timewait_cachep;
 
-static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
+static __inline void tcp_tw_put(struct tcp_tw_bucket *tw)
 {
 	if (atomic_dec_and_test(&tw->refcnt)) {
 #ifdef INET_REFCNT_DEBUG
@@ -2048,7 +2048,7 @@
 	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
 
 /* These can have wildcards, don't try too hard. */
-static __inline__ int tcp_lhashfn(unsigned short num)
+static __inline int tcp_lhashfn(unsigned short num)
 {
 #if 0
 	return num & (TCP_LHTABLE_SIZE - 1);
@@ -2057,7 +2057,7 @@
 #endif
 }
 
-static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
+static __inline int tcp_sk_listen_hashfn(struct sock *sk)
 {
 #if 0
 	return tcp_lhashfn(sk->num);
@@ -2341,7 +2341,7 @@
 #define tcp_openreq_alloc()		kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
 #define tcp_openreq_fastfree(req)	kmem_cache_free(tcp_openreq_cachep, req)
 
-static inline void tcp_openreq_free(struct open_request *req)
+static __inline void tcp_openreq_free(struct open_request *req)
 {
 	req->class->destructor(req);
 	tcp_openreq_fastfree(req);
@@ -2477,17 +2477,17 @@
 	TCP_ACK_PUSHED= 4
 };
 
-static inline void tcp_schedule_ack(struct tcp_opt *tp)
+static __inline void tcp_schedule_ack(struct tcp_opt *tp)
 {
 	tp->ack.pending |= TCP_ACK_SCHED;
 }
 
-static inline int tcp_ack_scheduled(struct tcp_opt *tp)
+static __inline int tcp_ack_scheduled(struct tcp_opt *tp)
 {
 	return tp->ack.pending&TCP_ACK_SCHED;
 }
 
-static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
+static __inline void tcp_dec_quickack_mode(struct tcp_opt *tp)
 {
 	if (tp->ack.quick && --tp->ack.quick == 0) {
 		/* Leaving quickack mode we deflate ATO. */
@@ -2497,12 +2497,12 @@
 
 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
 
-static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+static __inline void tcp_delack_init(struct tcp_opt *tp)
 {
 	memset(&tp->ack, 0, sizeof(tp->ack));
 }
 
-static inline void tcp_clear_options(struct tcp_opt *tp)
+static __inline void tcp_clear_options(struct tcp_opt *tp)
 {
  	tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
 }
@@ -2641,7 +2641,7 @@
 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 			 sk_read_actor_t recv_actor);
 
-static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
+static __inline void tcp_clear_xmit_timer(struct sock *sk, int what)
 {
 #if 0
 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2677,7 +2677,7 @@
 /*
  *	Reset the retransmission timer
  */
-static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
+static __inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
 {
 #if 0
 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2715,7 +2715,7 @@
  * and even PMTU discovery events into account.
  */
 
-static __inline__ unsigned int tcp_current_mss(struct sock *sk)
+static __inline unsigned int tcp_current_mss(struct sock *sk)
 {
 #if 0
 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2742,7 +2742,7 @@
  * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
  */
 
-static inline void tcp_initialize_rcv_mss(struct sock *sk)
+static __inline void tcp_initialize_rcv_mss(struct sock *sk)
 {
 #if 0
 	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2756,7 +2756,7 @@
 #endif
 }
 
-static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+static __inline void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
 {
 #if 0
 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
@@ -2765,14 +2765,14 @@
 #endif
 }
 
-static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+static __inline void tcp_fast_path_on(struct tcp_opt *tp)
 {
 #if 0
 	__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
 #endif
 }
 
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
+static __inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
 {
 #if 0
 	if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
@@ -2787,7 +2787,7 @@
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
  */
-static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
+static __inline u32 tcp_receive_window(struct tcp_opt *tp)
 {
 #if 0
 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
@@ -2879,7 +2879,7 @@
 /*
  *	Compute minimal free write space needed to queue new packets.
  */
-static inline int tcp_min_write_space(struct sock *sk)
+static __inline int tcp_min_write_space(struct sock *sk)
 {
 #if 0
 	return sk->wmem_queued/2;
@@ -2888,7 +2888,7 @@
 #endif
 }
 
-static inline int tcp_wspace(struct sock *sk)
+static __inline int tcp_wspace(struct sock *sk)
 {
 #if 0
 	return sk->sndbuf - sk->wmem_queued;
@@ -2912,7 +2912,7 @@
  *	"Packets left network, but not honestly ACKed yet" PLUS
  *	"Packets fast retransmitted"
  */
-static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
+static __inline unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
 {
 #if 0
 	return tp->packets_out - tp->left_out + tp->retrans_out;
@@ -2926,7 +2926,7 @@
  * 	one half the current congestion window, but no
  *	less than two segments
  */
-static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
+static __inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
 {
 #if 0
 	return max(tp->snd_cwnd >> 1U, 2U);
@@ -2939,7 +2939,7 @@
  * The exception is rate halving phase, when cwnd is decreasing towards
  * ssthresh.
  */
-static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
+static __inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
 {
 #if 0
 	if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
@@ -2953,7 +2953,7 @@
 #endif
 }
 
-static inline void tcp_sync_left_out(struct tcp_opt *tp)
+static __inline void tcp_sync_left_out(struct tcp_opt *tp)
 {
 #if 0
 	if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
@@ -2966,7 +2966,7 @@
 
 /* Congestion window validation. (RFC2861) */
 
-static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
+static __inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
 {
 #if 0
 	if (tp->packets_out >= tp->snd_cwnd) {
@@ -2985,7 +2985,7 @@
 }
 
 /* Set slow start threshould and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct tcp_opt *tp)
+static __inline void __tcp_enter_cwr(struct tcp_opt *tp)
 {
 #if 0
 	tp->undo_marker = 0;
@@ -2999,7 +2999,7 @@
 #endif
 }
 
-static inline void tcp_enter_cwr(struct tcp_opt *tp)
+static __inline void tcp_enter_cwr(struct tcp_opt *tp)
 {
 #if 0
 	tp->prior_ssthresh = 0;
@@ -3015,7 +3015,7 @@
 /* Slow start with delack produces 3 packets of burst, so that
  * it is safe "de facto".
  */
-static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
+static __inline __u32 tcp_max_burst(struct tcp_opt *tp)
 {
 	return 3;
 }
@@ -3030,7 +3030,7 @@
 #endif
 }
 
-static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
+static __inline void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
 {
 #if 0
 	if (skb->len < mss)
@@ -3046,7 +3046,7 @@
       With Minshall's modification: all sent small packets are ACKed.
  */
 
-static __inline__ int
+static __inline int
 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
 {
 #if 0
@@ -3064,7 +3064,7 @@
 /* This checks if the data bearing packet SKB (usually tp->send_head)
  * should be put on the wire right now.
  */
[truncated at 1000 lines; 1316 more skipped]