mptcp: infinite mapping sending

This patch adds the infinite mapping sending logic.

Add a new flag send_infinite_map in struct mptcp_subflow_context. Set
it true when a single contiguous subflow is in use and the
allow_infinite_fallback flag is true in mptcp_pm_mp_fail_received().

In mptcp_sendmsg_frag(), if this flag is true, call the new function
mptcp_update_infinite_map() to set the infinite mapping.

Add a new flag infinite_map in struct mptcp_ext, set it true in
mptcp_update_infinite_map(), and check this flag in a new helper
mptcp_check_infinite_map().

In mptcp_update_infinite_map(), set data_len to 0, and clear the
send_infinite_map flag, then do fallback.

In mptcp_established_options(), use the helper mptcp_check_infinite_map()
to let the infinite mapping DSS can be sent out in the fallback mode.

Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Geliang Tang 2022-04-22 14:55:39 -07:00 committed by David S. Miller
parent 0530020a7c
commit 1e39e5a32a
5 changed files with 43 additions and 3 deletions

View File

@ -35,7 +35,8 @@ struct mptcp_ext {
frozen:1,
reset_transient:1;
u8 reset_reason:4,
csum_reqd:1;
csum_reqd:1,
infinite_map:1;
};
#define MPTCP_RM_IDS_MAX 8

View File

@ -825,7 +825,7 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
opts->suboptions = 0;
if (unlikely(__mptcp_check_fallback(msk)))
if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
return false;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
@ -1340,8 +1340,12 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
put_unaligned_be32(mpext->subflow_seq, ptr);
ptr += 1;
if (opts->csum_reqd) {
/* data_len == 0 is reserved for the infinite mapping,
* the checksum will also be set to 0.
*/
put_unaligned_be32(mpext->data_len << 16 |
mptcp_make_csum(mpext), ptr);
(mpext->data_len ? mptcp_make_csum(mpext) : 0),
ptr);
} else {
put_unaligned_be32(mpext->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);

View File

@ -285,7 +285,13 @@ void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
pr_debug("fail_seq=%llu", fail_seq);
if (!mptcp_has_another_subflow(sk) && READ_ONCE(msk->allow_infinite_fallback))
subflow->send_infinite_map = 1;
}
/* path manager helpers */

View File

@ -1229,6 +1229,21 @@ static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
}
static void mptcp_update_infinite_map(struct mptcp_sock *msk,
struct sock *ssk,
struct mptcp_ext *mpext)
{
if (!mpext)
return;
mpext->infinite_map = 1;
mpext->data_len = 0;
mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
pr_fallback(msk);
__mptcp_do_fallback(msk);
}
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
struct mptcp_data_frag *dfrag,
struct mptcp_sendmsg_info *info)
@ -1360,6 +1375,8 @@ alloc_skb:
out:
if (READ_ONCE(msk->csum_enabled))
mptcp_update_data_checksum(skb, copy);
if (mptcp_subflow_ctx(ssk)->send_infinite_map)
mptcp_update_infinite_map(msk, ssk, mpext);
trace_mptcp_sendmsg_frag(mpext);
mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
return copy;

View File

@ -441,6 +441,7 @@ struct mptcp_subflow_context {
send_mp_prio : 1,
send_mp_fail : 1,
send_fastclose : 1,
send_infinite_map : 1,
rx_eof : 1,
can_ack : 1, /* only after processing the remote a key */
disposable : 1, /* ctx can be free at ulp release time */
@ -877,6 +878,17 @@ static inline void mptcp_do_fallback(struct sock *sk)
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
struct mptcp_ext *mpext;
mpext = skb ? mptcp_get_ext(skb) : NULL;
if (mpext && mpext->infinite_map)
return true;
return false;
}
static inline bool subflow_simultaneous_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);