mirror of https://gitee.com/openkylin/linux.git
mptcp: avoid processing packet if a subflow reset
If check_fully_established() causes a subflow reset, it should not
continue to process the packet in tcp_data_queue().
Add a return value to mptcp_incoming_options(), and return false if a
subflow has been reset, else return true. Then drop the packet in
tcp_data_queue()/tcp_rcv_state_process() if mptcp_incoming_options()
return false.
Fixes: d582484726
("mptcp: fix fallback for MP_JOIN subflows")
Signed-off-by: Jianguo Wu <wujianguo@chinatelecom.cn>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8547ea5f52
commit
6787b7e350
|
@ -105,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
|
|||
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int *size, unsigned int remaining,
|
||||
struct mptcp_out_options *opts);
|
||||
void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
|
||||
bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
struct mptcp_out_options *opts);
|
||||
|
@ -227,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline void mptcp_incoming_options(struct sock *sk,
|
||||
static inline bool mptcp_incoming_options(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mptcp_skb_ext_move(struct sk_buff *to,
|
||||
|
|
|
@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
|
|||
{
|
||||
trace_tcp_receive_reset(sk);
|
||||
|
||||
/* mptcp can't tell us to ignore reset pkts,
|
||||
* so just ignore the return value of mptcp_incoming_options().
|
||||
*/
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcp_incoming_options(sk, skb);
|
||||
|
||||
|
@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|||
bool fragstolen;
|
||||
int eaten;
|
||||
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcp_incoming_options(sk, skb);
|
||||
/* If a subflow has been reset, the packet should not continue
|
||||
* to be processed, drop the packet.
|
||||
*/
|
||||
if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
|
||||
__kfree_skb(skb);
|
||||
|
@ -6523,8 +6531,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
|||
case TCP_CLOSING:
|
||||
case TCP_LAST_ACK:
|
||||
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcp_incoming_options(sk, skb);
|
||||
/* If a subflow has been reset, the packet should not
|
||||
* continue to be processed, drop the packet.
|
||||
*/
|
||||
if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
|
||||
goto discard;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
|
|
|
@ -1035,7 +1035,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
|
|||
return hmac == mp_opt->ahmac;
|
||||
}
|
||||
|
||||
void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
||||
/* Return false if a subflow has been reset, else return true */
|
||||
bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
||||
|
@ -1053,12 +1054,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
|||
__mptcp_check_push(subflow->conn, sk);
|
||||
__mptcp_data_acked(subflow->conn);
|
||||
mptcp_data_unlock(subflow->conn);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
mptcp_get_options(sk, skb, &mp_opt);
|
||||
|
||||
/* The subflow can be in close state only if check_fully_established()
|
||||
* just sent a reset. If so, tell the caller to ignore the current packet.
|
||||
*/
|
||||
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
|
||||
return;
|
||||
return sk->sk_state != TCP_CLOSE;
|
||||
|
||||
if (mp_opt.fastclose &&
|
||||
msk->local_key == mp_opt.rcvr_key) {
|
||||
|
@ -1100,7 +1105,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (!mp_opt.dss)
|
||||
return;
|
||||
return true;
|
||||
|
||||
/* we can't wait for recvmsg() to update the ack_seq, otherwise
|
||||
* monodirectional flows will stuck
|
||||
|
@ -1119,12 +1124,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
|||
schedule_work(&msk->work))
|
||||
sock_hold(subflow->conn);
|
||||
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
|
||||
if (!mpext)
|
||||
return;
|
||||
return true;
|
||||
|
||||
memset(mpext, 0, sizeof(*mpext));
|
||||
|
||||
|
@ -1153,6 +1158,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
|||
if (mpext->csum_reqd)
|
||||
mpext->csum = mp_opt.csum;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mptcp_set_rwin(const struct tcp_sock *tp)
|
||||
|
|
Loading…
Reference in New Issue