Commit 582a72da authored by David S. Miller's avatar David S. Miller

inetpeer: Introduce inet_peer_address_t.

Currently only the v4 aspect is used, but this will change.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 98158f5a
...@@ -13,10 +13,18 @@ ...@@ -13,10 +13,18 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/atomic.h> #include <asm/atomic.h>
typedef struct {
union {
__be32 a4;
__be32 a6[4];
};
__u16 family;
} inet_peer_address_t;
struct inet_peer { struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */ /* group together avl_left,avl_right,v4daddr to speedup lookups */
struct inet_peer __rcu *avl_left, *avl_right; struct inet_peer __rcu *avl_left, *avl_right;
__be32 v4daddr; /* peer's address */ inet_peer_address_t daddr;
__u32 avl_height; __u32 avl_height;
struct list_head unused; struct list_head unused;
__u32 dtime; /* the time of last use of not __u32 dtime; /* the time of last use of not
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
* refcnt: atomically against modifications on other CPU; * refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing * usually under some other lock to prevent node disappearing
* dtime: unused node list lock * dtime: unused node list lock
* v4daddr: unchangeable * daddr: unchangeable
* ip_id_count: atomic value (no lock needed) * ip_id_count: atomic value (no lock needed)
*/ */
...@@ -165,9 +165,9 @@ static void unlink_from_unused(struct inet_peer *p) ...@@ -165,9 +165,9 @@ static void unlink_from_unused(struct inet_peer *p)
for (u = rcu_dereference_protected(_base->root, \ for (u = rcu_dereference_protected(_base->root, \
lockdep_is_held(&_base->lock)); \ lockdep_is_held(&_base->lock)); \
u != peer_avl_empty; ) { \ u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \ if (_daddr == u->daddr.a4) \
break; \ break; \
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ if ((__force __u32)_daddr < (__force __u32)u->daddr.a4) \
v = &u->avl_left; \ v = &u->avl_left; \
else \ else \
v = &u->avl_right; \ v = &u->avl_right; \
...@@ -191,7 +191,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base ...@@ -191,7 +191,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base
int count = 0; int count = 0;
while (u != peer_avl_empty) { while (u != peer_avl_empty) {
if (daddr == u->v4daddr) { if (daddr == u->daddr.a4) {
/* Before taking a reference, check if this entry was /* Before taking a reference, check if this entry was
* deleted, unlink_from_pool() sets refcnt=-1 to make * deleted, unlink_from_pool() sets refcnt=-1 to make
* distinction between an unused entry (refcnt=0) and * distinction between an unused entry (refcnt=0) and
...@@ -201,7 +201,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base ...@@ -201,7 +201,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base
u = NULL; u = NULL;
return u; return u;
} }
if ((__force __u32)daddr < (__force __u32)u->v4daddr) if ((__force __u32)daddr < (__force __u32)u->daddr.a4)
u = rcu_dereference_bh(u->avl_left); u = rcu_dereference_bh(u->avl_left);
else else
u = rcu_dereference_bh(u->avl_right); u = rcu_dereference_bh(u->avl_right);
...@@ -354,7 +354,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) ...@@ -354,7 +354,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
struct inet_peer __rcu **stack[PEER_MAXDEPTH]; struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp; struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(p->v4daddr, stack, base) != p) if (lookup(p->daddr.a4, stack, base) != p)
BUG(); BUG();
delp = stackptr - 1; /* *delp[0] == p */ delp = stackptr - 1; /* *delp[0] == p */
if (p->avl_left == peer_avl_empty_rcu) { if (p->avl_left == peer_avl_empty_rcu) {
...@@ -367,7 +367,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) ...@@ -367,7 +367,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
BUG_ON(rcu_dereference_protected(*stackptr[-1], BUG_ON(rcu_dereference_protected(*stackptr[-1],
lockdep_is_held(&base->lock)) != t); lockdep_is_held(&base->lock)) != t);
**--stackptr = t->avl_left; **--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any /* t is removed, t->daddr > x->daddr for any
* x in p->avl_left subtree. * x in p->avl_left subtree.
* Put t in the old place of p. */ * Put t in the old place of p. */
RCU_INIT_POINTER(*delp[0], t); RCU_INIT_POINTER(*delp[0], t);
...@@ -479,7 +479,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) ...@@ -479,7 +479,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
} }
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) { if (p) {
p->v4daddr = daddr; p->daddr.a4 = daddr;
atomic_set(&p->refcnt, 1); atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0); atomic_set(&p->rid, 0);
atomic_set(&p->ip_id_count, secure_ip_id(daddr)); atomic_set(&p->ip_id_count, secure_ip_id(daddr));
......
...@@ -1347,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1347,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle && tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL && (dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) { peer->daddr.a4 == saddr) {
inet_peer_refcheck(peer); inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) > (s32)(peer->tcp_ts - req->ts_recent) >
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment