]> git.dujemihanovic.xyz Git - linux.git/commitdiff
mptcp: sched: check both directions for backup
authorMatthieu Baerts (NGI0) <matttbe@kernel.org>
Sat, 27 Jul 2024 10:01:23 +0000 (12:01 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 30 Jul 2024 08:27:29 +0000 (10:27 +0200)
The 'mptcp_subflow_context' structure has two items related to the
backup flags:

 - 'backup': the subflow has been marked as backup by the other peer

 - 'request_bkup': the backup flag has been set by the host

Before this patch, the scheduler was only looking at the 'backup' flag.
That can make sense in some cases, but it looks like that's not what we
wanted for the general use, because either the path-manager was setting
both of them when sending an MP_PRIO, or the receiver was duplicating
the 'backup' flag in the subflow request.

Note that the use of these two flags in the path-manager are going to be
fixed in the next commits, but this change here is needed not to modify
the behaviour.

Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests")
Cc: stable@vger.kernel.org
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/trace/events/mptcp.h
net/mptcp/protocol.c

index 09e72215b9f9bb53ec363d7690e9b87a09d172cb..085b749cdd97e6598dd1c66631f45eda3b73ee14 100644 (file)
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
                struct sock *ssk;
 
                __entry->active = mptcp_subflow_active(subflow);
-               __entry->backup = subflow->backup;
+               __entry->backup = subflow->backup || subflow->request_bkup;
 
                if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
                        __entry->free = sk_stream_memory_free(subflow->tcp_sock);
index a26c2c840fd9ca6e1bd90aa57e0a11c122534eca..a2fc54ed68c01130f34087485962dd1abfc1c3d8 100644 (file)
@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
        }
 
        mptcp_for_each_subflow(msk, subflow) {
+               bool backup = subflow->backup || subflow->request_bkup;
+
                trace_mptcp_subflow_get_send(subflow);
                ssk =  mptcp_subflow_tcp_sock(subflow);
                if (!mptcp_subflow_active(subflow))
                        continue;
 
                tout = max(tout, mptcp_timeout_from_subflow(subflow));
-               nr_active += !subflow->backup;
+               nr_active += !backup;
                pace = subflow->avg_pacing_rate;
                if (unlikely(!pace)) {
                        /* init pacing rate from socket */
@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
                }
 
                linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
-               if (linger_time < send_info[subflow->backup].linger_time) {
-                       send_info[subflow->backup].ssk = ssk;
-                       send_info[subflow->backup].linger_time = linger_time;
+               if (linger_time < send_info[backup].linger_time) {
+                       send_info[backup].ssk = ssk;
+                       send_info[backup].linger_time = linger_time;
                }
        }
        __mptcp_set_timeout(sk, tout);