Skip to content
This repository has been archived by the owner on Sep 24, 2020. It is now read-only.

Commit

Permalink
tcp/dccp: fix possible race __inet_lookup_established()
Browse files Browse the repository at this point in the history
Michal Kubecek and Firo Yang did a very nice analysis of crashes
happening in __inet_lookup_established().

Since a TCP socket can go from TCP_ESTABLISH to TCP_LISTEN
(via a close()/socket()/listen() cycle) without a RCU grace period,
I should not have changed listeners linkage in their hash table.

They must use the nulls protocol (Documentation/RCU/rculist_nulls.txt),
so that a lookup can detect a socket in a hash list was moved in
another one.

Since we added code in commit d296ba6 ("soreuseport: Resolve
merge conflict for v4/v6 ordering fix"), we have to add
hlist_nulls_add_tail_rcu() helper.

Fixes: 3b24d85 ("tcp/dccp: do not touch listener sk_refcnt under synflood")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Michal Kubecek <[email protected]>
Reported-by: Firo Yang <[email protected]>
Reviewed-by: Michal Kubecek <[email protected]>
Link: https://lore.kernel.org/netdev/[email protected]/
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
Eric Dumazet authored and Jakub Kicinski committed Dec 14, 2019
1 parent 8f9cc1e commit 8dbd76e
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 15 deletions.
37 changes: 37 additions & 0 deletions include/linux/rculist_nulls.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
first->pprev = &n->next;
}

/**
* hlist_nulls_add_tail_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = NULL;

/* Note: write side code, so rcu accessors are not needed. */
for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;

if (last) {
n->next = last->next;
n->pprev = &last->next;
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
}

/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
Expand Down
12 changes: 9 additions & 3 deletions include/net/inet_hashtables.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};

/*
* Sockets can be hashed in established or listening table
/* Sockets can be hashed in established or listening table.
* We must use different 'nulls' end-of-chain value for all hash buckets :
* A socket might transition from ESTABLISH to LISTEN state without
* RCU grace period. A lookup in ehash table needs to handle this case.
*/
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
unsigned int count;
struct hlist_head head;
union {
struct hlist_head head;
struct hlist_nulls_head nulls_head;
};
};

/* This is for listening sockets, thus all sockets which possess wildcards. */
Expand Down
5 changes: 5 additions & 0 deletions include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -722,6 +722,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}

static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
}

static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
Expand Down
3 changes: 2 additions & 1 deletion net/ipv4/inet_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,

for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
struct hlist_nulls_node *node;

num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock(&ilb->lock);
sk_for_each(sk, &ilb->head) {
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);

if (!net_eq(sock_net(sk), net))
Expand Down
16 changes: 8 additions & 8 deletions net/ipv4/inet_hashtables.c
Original file line number Diff line number Diff line change
Expand Up @@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);

sk_for_each_rcu(sk2, &ilb->head) {
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
Expand Down Expand Up @@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else
hlist_add_head_rcu(&sk->sk_node, &ilb->head);
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
inet_hash2(hashinfo, sk);
ilb->count++;
sock_set_flag(sk, SOCK_RCU_FREE);
Expand Down Expand Up @@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk)
reuseport_detach_sock(sk);
if (ilb) {
inet_unhash2(hashinfo, sk);
__sk_del_node_init(sk);
ilb->count--;
} else {
__sk_nulls_del_node_init_rcu(sk);
ilb->count--;
}
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
unlock:
spin_unlock_bh(lock);
Expand Down Expand Up @@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)

for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_HEAD(&h->listening_hash[i].head);
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
i + LISTENING_NULLS_BASE);
h->listening_hash[i].count = 0;
}

Expand Down
7 changes: 4 additions & 3 deletions net/ipv4/tcp_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -2147,23 +2147,24 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
struct hlist_nulls_node *node;
struct sock *sk = cur;

if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock(&ilb->lock);
sk = sk_head(&ilb->head);
sk = sk_nulls_head(&ilb->nulls_head);
st->offset = 0;
goto get_sk;
}
ilb = &tcp_hashinfo.listening_hash[st->bucket];
++st->num;
++st->offset;

sk = sk_next(sk);
sk = sk_nulls_next(sk);
get_sk:
sk_for_each_from(sk) {
sk_nulls_for_each_from(sk, node) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == afinfo->family)
Expand Down

0 comments on commit 8dbd76e

Please sign in to comment.