--- trunk/reactos/drivers/net/tcpip/include/tcpcore.h 2005-11-28 23:27:16 UTC (rev 19733)
+++ trunk/reactos/drivers/net/tcpip/include/tcpcore.h 2005-11-28 23:35:35 UTC (rev 19734)
@@ -262,7 +262,7 @@
* Returns true if the queue is empty, false otherwise.
*/
-static inline int skb_queue_empty(struct sk_buff_head *list)
+static __inline int skb_queue_empty(struct sk_buff_head *list)
{
return (list->next == (struct sk_buff *) list);
}
@@ -275,7 +275,7 @@
* to the buffer.
*/
-static inline struct sk_buff *skb_get(struct sk_buff *skb)
+static __inline struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
return skb;
@@ -294,14 +294,14 @@
* hit zero.
*/
-static inline void kfree_skb(struct sk_buff *skb)
+static __inline void kfree_skb(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
__kfree_skb(skb);
}
/* Use this if you didn't touch the skb state [for fast switching] */
-static inline void kfree_skb_fast(struct sk_buff *skb)
+static __inline void kfree_skb_fast(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
kfree_skbmem(skb);
@@ -316,7 +316,7 @@
* shared data so must not be written to under normal circumstances.
*/
-static inline int skb_cloned(struct sk_buff *skb)
+static __inline int skb_cloned(struct sk_buff *skb)
{
return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
}
@@ -329,7 +329,7 @@
* buffer.
*/
-static inline int skb_shared(struct sk_buff *skb)
+static __inline int skb_shared(struct sk_buff *skb)
{
return (atomic_read(&skb->users) != 1);
}
@@ -348,7 +348,7 @@
* NULL is returned on a memory allocation failure.
*/
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+static __inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
if (skb_shared(skb)) {
struct sk_buff *nskb;
@@ -381,7 +381,7 @@
* %NULL is returned on a memory allocation failure.
*/
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+static __inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
struct sk_buff *nskb;
if(!skb_cloned(skb))
@@ -405,7 +405,7 @@
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+static __inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
@@ -427,7 +427,7 @@
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+static __inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
@@ -442,12 +442,12 @@
* Return the length of an &sk_buff queue.
*/
-static inline __u32 skb_queue_len(struct sk_buff_head *list_)
+static __inline __u32 skb_queue_len(struct sk_buff_head *list_)
{
return(list_->qlen);
}
-static inline void skb_queue_head_init(struct sk_buff_head *list)
+static __inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
list->prev = (struct sk_buff *)list;
@@ -473,7 +473,7 @@
* A buffer cannot be placed on two lists at the same time.
*/
-static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
@@ -500,7 +500,7 @@
* A buffer cannot be placed on two lists at the same time.
*/
-static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
@@ -521,7 +521,7 @@
*/
-static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
@@ -547,7 +547,7 @@
* A buffer cannot be placed on two lists at the same time.
*/
-static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+static __inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
@@ -565,7 +565,7 @@
* returned or %NULL if the list is empty.
*/
-static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+static __inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *next, *prev, *result;
@@ -594,7 +594,7 @@
* returned or %NULL if the list is empty.
*/
-static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+static __inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
@@ -609,7 +609,7 @@
* Insert a packet on a list.
*/
-static inline void __skb_insert(struct sk_buff *newsk,
+static __inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff * prev, struct sk_buff *next,
struct sk_buff_head * list)
{
@@ -631,7 +631,7 @@
* A buffer cannot be placed on two lists at the same time.
*/
-static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
@@ -644,7 +644,7 @@
* Place a packet after a given packet in a list.
*/
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
__skb_insert(newsk, old, old->next, old->list);
}
@@ -660,7 +660,7 @@
*/
-static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+static __inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
@@ -674,7 +674,7 @@
* the list known..
*/
-static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+static __inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff * next, * prev;
@@ -701,7 +701,7 @@
* destroyed.
*/
-static inline void skb_unlink(struct sk_buff *skb)
+static __inline void skb_unlink(struct sk_buff *skb)
{
struct sk_buff_head *list = skb->list;
@@ -726,7 +726,7 @@
* returned or %NULL if the list is empty.
*/
-static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+static __inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
if (skb)
@@ -743,7 +743,7 @@
* returned or %NULL if the list is empty.
*/
-static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+static __inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
@@ -754,12 +754,12 @@
return result;
}
-static inline int skb_is_nonlinear(const struct sk_buff *skb)
+static __inline int skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}
-static inline int skb_headlen(const struct sk_buff *skb)
+static __inline int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}
@@ -772,7 +772,7 @@
* Add data to an sk_buff
*/
-static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp=skb->tail;
SKB_LINEAR_ASSERT(skb);
@@ -791,7 +791,7 @@
* first byte of the extra data is returned.
*/
-static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
#if 0
unsigned char *tmp=skb->tail;
@@ -807,7 +807,7 @@
#endif
}
-static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data-=len;
skb->len+=len;
@@ -824,7 +824,7 @@
* panic. A pointer to the first byte of the extra data is returned.
*/
-static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
#if 0
skb->data-=len;
@@ -838,7 +838,7 @@
#endif
}
-static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+static __inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len-=len;
if (skb->len < skb->data_len)
@@ -857,7 +857,7 @@
* the old data.
*/
-static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb->len)
return NULL;
@@ -866,7 +866,7 @@
extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static __inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
__pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
@@ -875,14 +875,14 @@
return skb->data += len;
}
-static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
+static __inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb->len)
return NULL;
return __pskb_pull(skb,len);
}
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static __inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (len <= skb_headlen(skb))
return 1;
@@ -898,7 +898,7 @@
* Return the number of bytes of free space at the head of an &sk_buff.
*/
-static inline int skb_headroom(const struct sk_buff *skb)
+static __inline int skb_headroom(const struct sk_buff *skb)
{
return skb->data-skb->head;
}
@@ -910,7 +910,7 @@
* Return the number of bytes of free space at the tail of an sk_buff
*/
-static inline int skb_tailroom(const struct sk_buff *skb)
+static __inline int skb_tailroom(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
}
@@ -924,7 +924,7 @@
* room. This is only allowed for an empty buffer.
*/
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+static __inline void skb_reserve(struct sk_buff *skb, unsigned int len)
{
skb->data+=len;
skb->tail+=len;
@@ -932,7 +932,7 @@
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+static __inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
if (!skb->data_len) {
skb->len = len;
@@ -951,7 +951,7 @@
* the buffer is already under the length specified it is not modified.
*/
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+static __inline void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len) {
__skb_trim(skb, len);
@@ -959,7 +959,7 @@
}
-static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+static __inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (!skb->data_len) {
skb->len = len;
@@ -970,7 +970,7 @@
}
}
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+static __inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (len < skb->len)
return __pskb_trim(skb, len);
@@ -987,7 +987,7 @@
*/
-static inline void skb_orphan(struct sk_buff *skb)
+static __inline void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor)
skb->destructor(skb);
@@ -1005,7 +1005,7 @@
*/
-static inline void skb_queue_purge(struct sk_buff_head *list)
+static __inline void skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb=skb_dequeue(list))!=NULL)
@@ -1022,7 +1022,7 @@
*/
-static inline void __skb_queue_purge(struct sk_buff_head *list)
+static __inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb=__skb_dequeue(list))!=NULL)
@@ -1042,7 +1042,7 @@
* %NULL is returned in there is no free memory.
*/
-static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+static __inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
struct sk_buff *skb;
@@ -1066,7 +1066,7 @@
* allocates memory it can be called from an interrupt.
*/
-static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+static __inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
#if 0
return __dev_alloc_skb(length, GFP_ATOMIC);
@@ -1088,7 +1088,7 @@
* and at least @headroom of space at head.
*/
-static inline int
+static __inline int
skb_cow(struct sk_buff *skb, unsigned int headroom)
{
#if 0
@@ -1114,7 +1114,7 @@
* is returned and the old skb data released. */
int skb_linearize(struct sk_buff *skb, int gfp);
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
+static __inline void *kmap_skb_frag(const skb_frag_t *frag)
{
#if 0
#ifdef CONFIG_HIGHMEM
@@ -1129,7 +1129,7 @@
#endif
}
-static inline void kunmap_skb_frag(void *vaddr)
+static __inline void kunmap_skb_frag(void *vaddr)
{
#if 0
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
@@ -1162,13 +1162,13 @@
extern void skb_add_mtu(int mtu);
#ifdef CONFIG_NETFILTER
-static inline void
+static __inline void
nf_conntrack_put(struct nf_ct_info *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->master->use))
nfct->master->destroy(nfct->master);
}
-static inline void
+static __inline void
nf_conntrack_get(struct nf_ct_info *nfct)
{
if (nfct)
@@ -1733,12 +1733,12 @@
#ifdef __KERNEL__
-static inline void dst_hold(struct dst_entry * dst)
+static __inline void dst_hold(struct dst_entry * dst)
{
atomic_inc(&dst->__refcnt);
}
-static inline
+static __inline
struct dst_entry * dst_clone(struct dst_entry * dst)
{
if (dst)
@@ -1746,7 +1746,7 @@
return dst;
}
-static inline
+static __inline
void dst_release(struct dst_entry * dst)
{
if (dst)
@@ -1757,7 +1757,7 @@
extern void __dst_free(struct dst_entry * dst);
extern void dst_destroy(struct dst_entry * dst);
-static inline
+static __inline
void dst_free(struct dst_entry * dst)
{
if (dst->obsolete > 1)
@@ -1769,27 +1769,27 @@
__dst_free(dst);
}
-static inline void dst_confirm(struct dst_entry *dst)
+static __inline void dst_confirm(struct dst_entry *dst)
{
if (dst)
neigh_confirm(dst->neighbour);
}
-static inline void dst_negative_advice(struct dst_entry **dst_p)
+static __inline void dst_negative_advice(struct dst_entry **dst_p)
{
struct dst_entry * dst = *dst_p;
if (dst && dst->ops->negative_advice)
*dst_p = dst->ops->negative_advice(dst);
}
-static inline void dst_link_failure(struct sk_buff *skb)
+static __inline void dst_link_failure(struct sk_buff *skb)
{
struct dst_entry * dst = skb->dst;
if (dst && dst->ops && dst->ops->link_failure)
dst->ops->link_failure(skb);
}
-static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+static __inline void dst_set_expires(struct dst_entry *dst, int timeout)
{
unsigned long expires = jiffies + timeout;
@@ -1940,7 +1940,7 @@
extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
/* These are AF independent. */
-static __inline__ int tcp_bhashfn(__u16 lport)
+static __inline int tcp_bhashfn(__u16 lport)
{
return (lport & (tcp_bhash_size - 1));
}
@@ -1992,7 +1992,7 @@
extern kmem_cache_t *tcp_timewait_cachep;
-static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
+static __inline void tcp_tw_put(struct tcp_tw_bucket *tw)
{
if (atomic_dec_and_test(&tw->refcnt)) {
#ifdef INET_REFCNT_DEBUG
@@ -2048,7 +2048,7 @@
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
/* These can have wildcards, don't try too hard. */
-static __inline__ int tcp_lhashfn(unsigned short num)
+static __inline int tcp_lhashfn(unsigned short num)
{
#if 0
return num & (TCP_LHTABLE_SIZE - 1);
@@ -2057,7 +2057,7 @@
#endif
}
-static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
+static __inline int tcp_sk_listen_hashfn(struct sock *sk)
{
#if 0
return tcp_lhashfn(sk->num);
@@ -2341,7 +2341,7 @@
#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
-static inline void tcp_openreq_free(struct open_request *req)
+static __inline void tcp_openreq_free(struct open_request *req)
{
req->class->destructor(req);
tcp_openreq_fastfree(req);
@@ -2477,17 +2477,17 @@
TCP_ACK_PUSHED= 4
};
-static inline void tcp_schedule_ack(struct tcp_opt *tp)
+static __inline void tcp_schedule_ack(struct tcp_opt *tp)
{
tp->ack.pending |= TCP_ACK_SCHED;
}
-static inline int tcp_ack_scheduled(struct tcp_opt *tp)
+static __inline int tcp_ack_scheduled(struct tcp_opt *tp)
{
return tp->ack.pending&TCP_ACK_SCHED;
}
-static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
+static __inline void tcp_dec_quickack_mode(struct tcp_opt *tp)
{
if (tp->ack.quick && --tp->ack.quick == 0) {
/* Leaving quickack mode we deflate ATO. */
@@ -2497,12 +2497,12 @@
extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
-static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+static __inline void tcp_delack_init(struct tcp_opt *tp)
{
memset(&tp->ack, 0, sizeof(tp->ack));
}
-static inline void tcp_clear_options(struct tcp_opt *tp)
+static __inline void tcp_clear_options(struct tcp_opt *tp)
{
tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
}
@@ -2641,7 +2641,7 @@
extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
-static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
+static __inline void tcp_clear_xmit_timer(struct sock *sk, int what)
{
#if 0
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2677,7 +2677,7 @@
/*
* Reset the retransmission timer
*/
-static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
+static __inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
{
#if 0
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2715,7 +2715,7 @@
* and even PMTU discovery events into account.
*/
-static __inline__ unsigned int tcp_current_mss(struct sock *sk)
+static __inline unsigned int tcp_current_mss(struct sock *sk)
{
#if 0
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2742,7 +2742,7 @@
* Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
*/
-static inline void tcp_initialize_rcv_mss(struct sock *sk)
+static __inline void tcp_initialize_rcv_mss(struct sock *sk)
{
#if 0
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
@@ -2756,7 +2756,7 @@
#endif
}
-static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+static __inline void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
{
#if 0
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
@@ -2765,14 +2765,14 @@
#endif
}
-static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+static __inline void tcp_fast_path_on(struct tcp_opt *tp)
{
#if 0
__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
#endif
}
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
+static __inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
{
#if 0
if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
@@ -2787,7 +2787,7 @@
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
*/
-static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
+static __inline u32 tcp_receive_window(struct tcp_opt *tp)
{
#if 0
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
@@ -2879,7 +2879,7 @@
/*
* Compute minimal free write space needed to queue new packets.
*/
-static inline int tcp_min_write_space(struct sock *sk)
+static __inline int tcp_min_write_space(struct sock *sk)
{
#if 0
return sk->wmem_queued/2;
@@ -2888,7 +2888,7 @@
#endif
}
-static inline int tcp_wspace(struct sock *sk)
+static __inline int tcp_wspace(struct sock *sk)
{
#if 0
return sk->sndbuf - sk->wmem_queued;
@@ -2912,7 +2912,7 @@
* "Packets left network, but not honestly ACKed yet" PLUS
* "Packets fast retransmitted"
*/
-static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
+static __inline unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
{
#if 0
return tp->packets_out - tp->left_out + tp->retrans_out;
@@ -2926,7 +2926,7 @@
* one half the current congestion window, but no
* less than two segments
*/
-static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
+static __inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
{
#if 0
return max(tp->snd_cwnd >> 1U, 2U);
@@ -2939,7 +2939,7 @@
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
*/
-static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
+static __inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
{
#if 0
if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
@@ -2953,7 +2953,7 @@
#endif
}
-static inline void tcp_sync_left_out(struct tcp_opt *tp)
+static __inline void tcp_sync_left_out(struct tcp_opt *tp)
{
#if 0
if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
@@ -2966,7 +2966,7 @@
/* Congestion window validation. (RFC2861) */
-static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
+static __inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
{
#if 0
if (tp->packets_out >= tp->snd_cwnd) {
@@ -2985,7 +2985,7 @@
}
/* Set slow start threshould and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct tcp_opt *tp)
+static __inline void __tcp_enter_cwr(struct tcp_opt *tp)
{
#if 0
tp->undo_marker = 0;
@@ -2999,7 +2999,7 @@
#endif
}
-static inline void tcp_enter_cwr(struct tcp_opt *tp)
+static __inline void tcp_enter_cwr(struct tcp_opt *tp)
{
#if 0
tp->prior_ssthresh = 0;
@@ -3015,7 +3015,7 @@
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto".
*/
-static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
+static __inline __u32 tcp_max_burst(struct tcp_opt *tp)
{
return 3;
}
@@ -3030,7 +3030,7 @@
#endif
}
-static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
+static __inline void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
{
#if 0
if (skb->len < mss)
@@ -3046,7 +3046,7 @@
With Minshall's modification: all sent small packets are ACKed.
*/
-static __inline__ int
+static __inline int
tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
{
#if 0
@@ -3064,7 +3064,7 @@
/* This checks if the data bearing packet SKB (usually tp->send_head)
* should be put on the wire right now.
*/
[truncated at 1000 lines; 1316 more skipped]