Skip to content

Commit

Permalink
mptcp: refactor sndbuf auto-tuning.
Browse files Browse the repository at this point in the history
The MPTCP protocol account for the data enqueued on all the subflows
to the main socket send buffer, while the send buffer auto-tuning
algorithm set the main socket send buffer size as the max size among
the subflows.

That causes bad performances when at least one subflow is sndbuf
limited, e.g. due to very high latency, as the MPTCP scheduler can't
even fill such buffer.

Change the send-buffer auto-tuning algorithm to compute the main socket
send buffer size as the sum of all the subflows buffer size.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
  • Loading branch information
Paolo Abeni authored and intel-lab-lkp committed Sep 14, 2023
1 parent 72135d3 commit e712e1e
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 8 deletions.
25 changes: 24 additions & 1 deletion net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
mptcp_sockopt_sync_locked(msk, ssk);
mptcp_subflow_joined(msk, ssk);
mptcp_stop_tout_timer(sk);
__mptcp_propagate_sndbuf(sk, ssk);
return true;
}

Expand Down Expand Up @@ -2403,6 +2404,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
* disconnect should never fail
*/
WARN_ON_ONCE(tcp_disconnect(ssk, 0));
__mptcp_propagate_sndbuf(sk, ssk);
mptcp_subflow_ctx_reset(subflow);
release_sock(ssk);

Expand All @@ -2427,6 +2429,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
}

out_release:
__mptcp_update_sndbuf(sk, -subflow->cached_sndbuf);
__mptcp_subflow_error_report(sk, ssk);
release_sock(ssk);

Expand Down Expand Up @@ -3214,7 +3217,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
* uses the correct data
*/
mptcp_copy_inaddrs(nsk, ssk);
mptcp_propagate_sndbuf(nsk, ssk);
__mptcp_propagate_sndbuf(nsk, ssk);

mptcp_rcv_space_init(msk, ssk);
bh_unlock_sock(nsk);
Expand Down Expand Up @@ -3339,6 +3342,14 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
}

static void __mptcp_tune_sndbuf(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);

__mptcp_update_sndbuf(sk, msk->delta_sndbuf);
msk->delta_sndbuf = 0;
}

#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
BIT(MPTCP_RETRANSMIT) | \
BIT(MPTCP_FLUSH_JOIN_LIST))
Expand Down Expand Up @@ -3392,6 +3403,8 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_set_connected(sk);
if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
__mptcp_error_report(sk);
if (__test_and_clear_bit(MPTCP_TUNE_SNDBUF, &msk->cb_flags))
__mptcp_tune_sndbuf(sk);
}

__mptcp_update_rmem(sk);
Expand Down Expand Up @@ -3437,6 +3450,15 @@ void mptcp_subflow_process_delegated(struct sock *ssk)
mptcp_data_unlock(sk);
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
}
if (test_bit(MPTCP_DELEGATE_SNDBUF, &subflow->delegated_status)) {
mptcp_data_lock(sk);
if (!sock_owned_by_user(sk))
__mptcp_propagate_sndbuf(sk, ssk);
else
__set_bit(MPTCP_TUNE_SNDBUF, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SNDBUF);
}
if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
schedule_3rdack_retransmission(ssk);
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
Expand Down Expand Up @@ -3523,6 +3545,7 @@ bool mptcp_finish_join(struct sock *ssk)
/* active subflow, already present inside the conn_list */
if (!list_empty(&subflow->node)) {
mptcp_subflow_joined(msk, ssk);
mptcp_propagate_sndbuf(parent, ssk);
return true;
}

Expand Down
32 changes: 27 additions & 5 deletions net/mptcp/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@
#define MPTCP_RETRANSMIT 4
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_CONNECTED 6
#define MPTCP_TUNE_SNDBUF 7

struct mptcp_skb_cb {
u64 map_seq;
Expand Down Expand Up @@ -320,6 +321,7 @@ struct mptcp_sock {
u64 rtt_us; /* last maximum rtt of subflows */
} rcvq_space;
u8 scaling_ratio;
int delta_sndbuf;

u32 subflow_id;
u32 setsockopt_seq;
Expand Down Expand Up @@ -446,6 +448,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);

#define MPTCP_DELEGATE_SEND 0
#define MPTCP_DELEGATE_ACK 1
#define MPTCP_DELEGATE_SNDBUF 2

/* MPTCP subflow context */
struct mptcp_subflow_context {
Expand Down Expand Up @@ -518,6 +521,7 @@ struct mptcp_subflow_context {

u32 setsockopt_seq;
u32 stale_rcv_tstamp;
int cached_sndbuf;

struct sock *tcp_sock; /* tcp sk backpointer */
struct sock *conn; /* parent mptcp_sock */
Expand Down Expand Up @@ -781,13 +785,31 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}

static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
static inline void __mptcp_update_sndbuf(struct sock *sk, int delta)
{
if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf))
return false;
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && delta)
WRITE_ONCE(sk->sk_sndbuf, sk->sk_sndbuf + delta);
}

static inline void __mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
int ssk_sndbuf = READ_ONCE(ssk->sk_sndbuf);

__mptcp_update_sndbuf(sk, ssk_sndbuf - subflow->cached_sndbuf);
subflow->cached_sndbuf = ssk_sndbuf;
}

static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);

if (likely(READ_ONCE(ssk->sk_sndbuf) == subflow->cached_sndbuf))
return;

WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
return true;
local_bh_disable();
mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF);
local_bh_enable();
}

static inline void mptcp_write_space(struct sock *sk)
Expand Down
3 changes: 1 addition & 2 deletions net/mptcp/subflow.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,7 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc

void __mptcp_set_connected(struct sock *sk)
{
__mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first);
if (sk->sk_state == TCP_SYN_SENT) {
inet_sk_state_store(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
Expand Down Expand Up @@ -472,7 +473,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
return;

msk = mptcp_sk(parent);
mptcp_propagate_sndbuf(parent, sk);
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
Expand Down Expand Up @@ -1728,7 +1728,6 @@ static void subflow_state_change(struct sock *sk)

msk = mptcp_sk(parent);
if (subflow_simultaneous_connect(sk)) {
mptcp_propagate_sndbuf(parent, sk);
mptcp_do_fallback(sk);
mptcp_rcv_space_init(msk, sk);
pr_fallback(msk);
Expand Down

0 comments on commit e712e1e

Please sign in to comment.