summaryrefslogtreecommitdiff
path: root/net/mptcp
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2021-01-20 17:39:12 +0300
committerJakub Kicinski <kuba@kernel.org>2021-01-23 06:21:02 +0300
commitec369c3a337fe075a7bd4da88d163d44c62ccbb1 (patch)
tree9ac9b3773c1571177bf1c24dc3ad97d05de07b6e /net/mptcp
parent5cf92bbadc585e1bcb710df75293e07b7c846bb6 (diff)
downloadlinux-ec369c3a337fe075a7bd4da88d163d44c62ccbb1.tar.xz
mptcp: do not queue excessive data on subflows
The current packet scheduler can enqueue up to sndbuf data on each subflow. If the send buffer is large and the subflows are not symmetric, this could lead to suboptimal aggregate bandwidth utilization. Limit the amount of queued data to the maximum send window. Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp')
-rw-r--r--net/mptcp/protocol.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index d07e60330df5..e741201acc98 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1389,7 +1389,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
continue;
nr_active += !subflow->backup;
- if (!sk_stream_memory_free(subflow->tcp_sock))
+ if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
continue;
pace = READ_ONCE(ssk->sk_pacing_rate);
@@ -1415,7 +1415,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
if (send_info[0].ssk) {
msk->last_snd = send_info[0].ssk;
msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
- sk_stream_wspace(msk->last_snd));
+ tcp_sk(msk->last_snd)->snd_wnd);
return msk->last_snd;
}