summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-07-24 01:28:09 +0300
committerDave Airlie <airlied@redhat.com>2020-07-24 01:48:05 +0300
commit41206a073ceebc517245677a19f52ba6379b33a9 (patch)
tree2fc35aac6abe32b99058ad55b0fc6e4d449d1056 /net/ipv4/tcp_input.c
parent206739119508d5ab4b42ab480ff61a7e6cd72d7c (diff)
parentba47d845d715a010f7b51f6f89bae32845e6acb7 (diff)
downloadlinux-41206a073ceebc517245677a19f52ba6379b33a9.tar.xz
Merge v5.8-rc6 into drm-next
I've got a silent conflict + two trees based on fixes to merge. Fixes a silent merge with amdgpu Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12fda8f27b08..9615e72656d1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
@@ -3665,6 +3666,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_in_ack_event(sk, ack_ev_flags);
}
+ /* This is a deviation from RFC3168 since it states that:
+ * "When the TCP data sender is ready to set the CWR bit after reducing
+ * the congestion window, it SHOULD set the CWR bit only on the first
+ * new data packet that it transmits."
+ * We accept CWR on pure ACKs to be more robust
+ * with widely-deployed TCP implementations that do this.
+ */
+ tcp_ecn_accept_cwr(sk, skb);
+
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
@@ -4572,6 +4582,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+ sk->sk_data_ready(sk);
tcp_drop(sk, skb);
return;
}
@@ -4800,8 +4811,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
- tcp_ecn_accept_cwr(sk, skb);
-
tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
@@ -4820,6 +4829,7 @@ queue_and_out:
sk_forced_mem_schedule(sk, skb->truesize);
else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ sk->sk_data_ready(sk);
goto drop;
}