summaryrefslogtreecommitdiff
path: root/net/mptcp/options.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2020-11-27 13:10:27 +0300
committerJakub Kicinski <kuba@kernel.org>2020-12-01 04:55:23 +0300
commit6e628cd3a8f78cb0dfe85353e5e488bda296bedf (patch)
tree22796b798d2a4ed571c1bf4f48e1a7c8576ec0f3 /net/mptcp/options.c
parent7439d687b79cbbd971c6a170be9aefda4a564be4 (diff)
downloadlinux-6e628cd3a8f78cb0dfe85353e5e488bda296bedf.tar.xz
mptcp: use mptcp release_cb for delayed tasks
We have some tasks triggered by the subflow receive path which require to access the msk socket status, specifically: mptcp_clean_una() and mptcp_push_pending() We have almost everything in place to defer to the msk release_cb such tasks when the msk sock is owned. Since the worker is no more used to clean the acked data, for fallback sockets we need to explicitly flush them. As an added bonus we can move the wake-up code in __mptcp_clean_una(), simplify a lot mptcp_poll() and move the timer update under the data lock. The worker is now used only to process and send DATA_FIN packets and do the mptcp-level retransmissions. Acked-by: Florian Westphal <fw@strlen.de> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/options.c')
-rw-r--r--net/mptcp/options.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 3986454a0340..6b7b4b67f18c 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -830,7 +830,7 @@ static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
}
static void ack_update_msk(struct mptcp_sock *msk,
- const struct sock *ssk,
+ struct sock *ssk,
struct mptcp_options_received *mp_opt)
{
u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
@@ -854,8 +854,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
if (after64(new_wnd_end, msk->wnd_end)) {
msk->wnd_end = new_wnd_end;
- if (mptcp_send_head(sk))
- mptcp_schedule_work(sk);
+ __mptcp_wnd_updated(sk, ssk);
}
if (after64(new_snd_una, old_snd_una)) {
@@ -915,8 +914,19 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
struct mptcp_options_received mp_opt;
struct mptcp_ext *mpext;
- if (__mptcp_check_fallback(msk))
+ if (__mptcp_check_fallback(msk)) {
+ /* Keep it simple and unconditionally trigger send data cleanup and
+ * pending queue spooling. We will need to acquire the data lock
+ * for more accurate checks, and once the lock is acquired, such
+ * helpers are cheap.
+ */
+ mptcp_data_lock(subflow->conn);
+ if (mptcp_send_head(subflow->conn))
+ __mptcp_wnd_updated(subflow->conn, sk);
+ __mptcp_data_acked(subflow->conn);
+ mptcp_data_unlock(subflow->conn);
return;
+ }
mptcp_get_options(skb, &mp_opt);
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))