summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-07-10 11:04:02 +0300
committerZefan Li <lizefan@huawei.com>2016-10-26 18:15:44 +0300
commitd91a2aa46cbc95c9854d4a444fc6acee444ca655 (patch)
tree395e62bb99fcaa827da6289d8695d203b07f3d8a
parent00e9ff5931fe385b9e24e6a49fdfb1ae763984e6 (diff)
downloadlinux-d91a2aa46cbc95c9854d4a444fc6acee444ca655.tar.xz
tcp: make challenge acks less predictable
commit 75ff39ccc1bd5d3c455b6822ab09e533c551f758 upstream. Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. This patch increases the default limit from 100 to 1000, and adds some randomization so that the attacker can no longer hijack sessions without spending a considerable amount of probes. Based on initial analysis and patch from Linus. Note that we also have per socket rate limiting, so it is tempting to remove the host limit in the future. v2: randomize the count of challenge acks per second, not the period. Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2") Reported-by: Yue Cao <ycao009@ucr.edu> Signed-off-by: Eric Dumazet <edumazet@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> [lizf: Backported to 3.4: - adjust context - use ACCESS_ONCE instead WRITE_ONCE/READ_ONCE - open-code prandom_u32_max()] Signed-off-by: Zefan Li <lizefan@huawei.com>
-rw-r--r--net/ipv4/tcp_input.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2d3290496a0a..55b08e09f0c8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3701,13 +3701,18 @@ static void tcp_send_challenge_ack(struct sock *sk)
/* unprotected vars, we dont care of overwrites */
static u32 challenge_timestamp;
static unsigned int challenge_count;
- u32 now = jiffies / HZ;
+ u32 count, now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ ACCESS_ONCE(challenge_count) = half +
+ (u32)(((u64)random32() * sysctl_tcp_challenge_ack_limit) >> 32);
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = ACCESS_ONCE(challenge_count);
+ if (count > 0) {
+ ACCESS_ONCE(challenge_count) = count - 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}