summaryrefslogtreecommitdiff
path: root/net/mptcp/subflow.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r--net/mptcp/subflow.c143
1 files changed, 97 insertions, 46 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index dadaf85db720..ba065b66551a 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
- if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
- schedule_work(&mptcp_sk(sk)->work))
- return; /* worker will put sk for us */
+ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
+ mptcp_schedule_work(sk);
sock_put(sk);
}
@@ -696,14 +695,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}
-static void mptcp_force_close(struct sock *sk)
-{
- /* the msk is not yet exposed to user-space, and refcount is 2 */
- inet_sk_state_store(sk, TCP_CLOSE);
- sk_common_release(sk);
- sock_put(sk);
-}
-
static void subflow_ulp_fallback(struct sock *sk,
struct mptcp_subflow_context *old_ctx)
{
@@ -724,9 +715,12 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
if (!ctx)
return;
- subflow_ulp_fallback(ssk, ctx);
- if (ctx->conn)
- sock_put(ctx->conn);
+ list_del(&mptcp_subflow_ctx(ssk)->node);
+ if (inet_csk(ssk)->icsk_ulp_ops) {
+ subflow_ulp_fallback(ssk, ctx);
+ if (ctx->conn)
+ sock_put(ctx->conn);
+ }
kfree_rcu(ctx, rcu);
}
@@ -755,7 +749,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_subflow_request_sock *subflow_req;
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
- struct sock *new_msk = NULL;
struct mptcp_sock *owner;
struct sock *child;
@@ -784,14 +777,9 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
* options.
*/
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
+ if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
fallback = true;
- goto create_child;
- }
- new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
- if (!new_msk)
- fallback = true;
} else if (subflow_req->mp_join) {
mptcp_get_options(skb, &mp_opt);
if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
@@ -820,23 +808,23 @@ create_child:
subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
goto dispose_child;
}
-
- if (new_msk)
- mptcp_copy_inaddrs(new_msk, child);
- mptcp_subflow_drop_ctx(child);
- goto out;
+ goto fallback;
}
/* ssk inherits options of listener sk */
ctx->setsockopt_seq = listener->setsockopt_seq;
if (ctx->mp_capable) {
- owner = mptcp_sk(new_msk);
+ ctx->conn = mptcp_sk_clone(listener->conn, &mp_opt, req);
+ if (!ctx->conn)
+ goto fallback;
+
+ owner = mptcp_sk(ctx->conn);
/* this can't race with mptcp_close(), as the msk is
* not yet exposted to user-space
*/
- inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
+ inet_sk_state_store(ctx->conn, TCP_ESTABLISHED);
/* record the newly created socket as the first msk
* subflow, but don't link it yet into conn_list
@@ -846,11 +834,9 @@ create_child:
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
- mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
+ owner->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(owner, child, 1);
mptcp_token_accept(subflow_req, owner);
- ctx->conn = new_msk;
- new_msk = NULL;
/* set msk addresses early to ensure mptcp_pm_get_local_id()
* uses the correct data
@@ -867,7 +853,7 @@ create_child:
*/
if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
mptcp_subflow_fully_established(ctx, &mp_opt);
- mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
+ mptcp_pm_fully_established(owner, child);
ctx->pm_notified = 1;
}
} else if (ctx->mp_join) {
@@ -900,11 +886,6 @@ create_child:
}
}
-out:
- /* dispose of the left over mptcp master, if any */
- if (unlikely(new_msk))
- mptcp_force_close(new_msk);
-
/* check for expected invariant - should never trigger, just help
* catching eariler subtle bugs
*/
@@ -922,6 +903,10 @@ dispose_child:
/* The last child reference will be released by the caller */
return child;
+
+fallback:
+ mptcp_subflow_drop_ctx(child);
+ return child;
}
static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
@@ -1118,8 +1103,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
skb_ext_del(skb, SKB_EXT_MPTCP);
return MAPPING_OK;
} else {
- if (updated && schedule_work(&msk->work))
- sock_hold((struct sock *)msk);
+ if (updated)
+ mptcp_schedule_work((struct sock *)msk);
return MAPPING_DATA_FIN;
}
@@ -1222,17 +1207,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
- struct sock *sk = (struct sock *)msk;
-
if (likely(ssk->sk_state != TCP_CLOSE))
return;
if (skb_queue_empty(&ssk->sk_receive_queue) &&
- !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
- sock_hold(sk);
- if (!schedule_work(&msk->work))
- sock_put(sk);
- }
+ !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ mptcp_schedule_work((struct sock *)msk);
}
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
@@ -1825,6 +1805,77 @@ static void subflow_state_change(struct sock *sk)
}
}
+void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+{
+ struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+ struct mptcp_sock *msk, *next, *head = NULL;
+ struct request_sock *req;
+ struct sock *sk;
+
+ /* build a list of all unaccepted mptcp sockets */
+ spin_lock_bh(&queue->rskq_lock);
+ for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+ struct mptcp_subflow_context *subflow;
+ struct sock *ssk = req->sk;
+
+ if (!sk_is_mptcp(ssk))
+ continue;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ if (!subflow || !subflow->conn)
+ continue;
+
+ /* skip if already in list */
+ sk = subflow->conn;
+ msk = mptcp_sk(sk);
+ if (msk->dl_next || msk == head)
+ continue;
+
+ sock_hold(sk);
+ msk->dl_next = head;
+ head = msk;
+ }
+ spin_unlock_bh(&queue->rskq_lock);
+ if (!head)
+ return;
+
+ /* can't acquire the msk socket lock under the subflow one,
+ * or will cause ABBA deadlock
+ */
+ release_sock(listener_ssk);
+
+ for (msk = head; msk; msk = next) {
+ sk = (struct sock *)msk;
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ next = msk->dl_next;
+ msk->dl_next = NULL;
+
+ __mptcp_unaccepted_force_close(sk);
+ release_sock(sk);
+
+ /* lockdep will report a false positive ABBA deadlock
+ * between cancel_work_sync and the listener socket.
+ * The involved locks belong to different sockets WRT
+ * the existing AB chain.
+ * Using a per socket key is problematic as key
+ * deregistration requires process context and must be
+ * performed at socket disposal time, in atomic
+ * context.
+ * Just tell lockdep to consider the listener socket
+ * released here.
+ */
+ mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+ mptcp_cancel_work(sk);
+ mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ sock_put(sk);
+ }
+
+ /* we are still under the listener msk socket lock */
+ lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+}
+
static int subflow_ulp_init(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);