mptcp: deliver ssk errors to msk
Currently all errors received on msk subflows are ignored.
We need to catch at least the errors on connect() and
on fallback sockets.
Use a custom sk_error_report callback at subflow level,
and do the real action under the msk socket lock - via
the usual sock_owned_by_user()/release_callback() schema.
Fixes: 6e628cd3a8
("mptcp: use mptcp release_cb for delayed tasks")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4c0d2e96ba
commit
15cc104533
3 changed files with 54 additions and 0 deletions
|
@ -2959,6 +2959,8 @@ static void mptcp_release_cb(struct sock *sk)
|
||||||
mptcp_push_pending(sk, 0);
|
mptcp_push_pending(sk, 0);
|
||||||
spin_lock_bh(&sk->sk_lock.slock);
|
spin_lock_bh(&sk->sk_lock.slock);
|
||||||
}
|
}
|
||||||
|
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
|
||||||
|
__mptcp_error_report(sk);
|
||||||
|
|
||||||
/* clear any wmem reservation and errors */
|
/* clear any wmem reservation and errors */
|
||||||
__mptcp_update_wmem(sk);
|
__mptcp_update_wmem(sk);
|
||||||
|
@ -3355,6 +3357,11 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
|
||||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||||
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
|
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
|
||||||
|
|
||||||
|
/* This barrier is coupled with smp_wmb() in tcp_reset() */
|
||||||
|
smp_rmb();
|
||||||
|
if (sk->sk_err)
|
||||||
|
mask |= EPOLLERR;
|
||||||
|
|
||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,6 +95,7 @@
|
||||||
#define MPTCP_WORK_CLOSE_SUBFLOW 5
|
#define MPTCP_WORK_CLOSE_SUBFLOW 5
|
||||||
#define MPTCP_PUSH_PENDING 6
|
#define MPTCP_PUSH_PENDING 6
|
||||||
#define MPTCP_CLEAN_UNA 7
|
#define MPTCP_CLEAN_UNA 7
|
||||||
|
#define MPTCP_ERROR_REPORT 8
|
||||||
|
|
||||||
static inline bool before64(__u64 seq1, __u64 seq2)
|
static inline bool before64(__u64 seq1, __u64 seq2)
|
||||||
{
|
{
|
||||||
|
@ -414,6 +415,7 @@ struct mptcp_subflow_context {
|
||||||
void (*tcp_data_ready)(struct sock *sk);
|
void (*tcp_data_ready)(struct sock *sk);
|
||||||
void (*tcp_state_change)(struct sock *sk);
|
void (*tcp_state_change)(struct sock *sk);
|
||||||
void (*tcp_write_space)(struct sock *sk);
|
void (*tcp_write_space)(struct sock *sk);
|
||||||
|
void (*tcp_error_report)(struct sock *sk);
|
||||||
|
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
@ -478,6 +480,7 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
|
||||||
sk->sk_data_ready = ctx->tcp_data_ready;
|
sk->sk_data_ready = ctx->tcp_data_ready;
|
||||||
sk->sk_state_change = ctx->tcp_state_change;
|
sk->sk_state_change = ctx->tcp_state_change;
|
||||||
sk->sk_write_space = ctx->tcp_write_space;
|
sk->sk_write_space = ctx->tcp_write_space;
|
||||||
|
sk->sk_error_report = ctx->tcp_error_report;
|
||||||
|
|
||||||
inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
|
inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
|
||||||
}
|
}
|
||||||
|
@ -505,6 +508,7 @@ bool mptcp_finish_join(struct sock *sk);
|
||||||
bool mptcp_schedule_work(struct sock *sk);
|
bool mptcp_schedule_work(struct sock *sk);
|
||||||
void __mptcp_check_push(struct sock *sk, struct sock *ssk);
|
void __mptcp_check_push(struct sock *sk, struct sock *ssk);
|
||||||
void __mptcp_data_acked(struct sock *sk);
|
void __mptcp_data_acked(struct sock *sk);
|
||||||
|
void __mptcp_error_report(struct sock *sk);
|
||||||
void mptcp_subflow_eof(struct sock *sk);
|
void mptcp_subflow_eof(struct sock *sk);
|
||||||
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
|
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
|
||||||
void __mptcp_flush_join_list(struct mptcp_sock *msk);
|
void __mptcp_flush_join_list(struct mptcp_sock *msk);
|
||||||
|
|
|
@ -1043,6 +1043,46 @@ static void subflow_write_space(struct sock *ssk)
|
||||||
/* we take action in __mptcp_clean_una() */
|
/* we take action in __mptcp_clean_una() */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __mptcp_error_report(struct sock *sk)
|
||||||
|
{
|
||||||
|
struct mptcp_subflow_context *subflow;
|
||||||
|
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||||
|
|
||||||
|
mptcp_for_each_subflow(msk, subflow) {
|
||||||
|
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||||
|
int err = sock_error(ssk);
|
||||||
|
|
||||||
|
if (!err)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* only propagate errors on fallen-back sockets or
|
||||||
|
* on MPC connect
|
||||||
|
*/
|
||||||
|
if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
inet_sk_state_store(sk, inet_sk_state_load(ssk));
|
||||||
|
sk->sk_err = -err;
|
||||||
|
|
||||||
|
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
|
||||||
|
smp_wmb();
|
||||||
|
sk->sk_error_report(sk);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void subflow_error_report(struct sock *ssk)
|
||||||
|
{
|
||||||
|
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
|
||||||
|
|
||||||
|
mptcp_data_lock(sk);
|
||||||
|
if (!sock_owned_by_user(sk))
|
||||||
|
__mptcp_error_report(sk);
|
||||||
|
else
|
||||||
|
set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
|
||||||
|
mptcp_data_unlock(sk);
|
||||||
|
}
|
||||||
|
|
||||||
static struct inet_connection_sock_af_ops *
|
static struct inet_connection_sock_af_ops *
|
||||||
subflow_default_af_ops(struct sock *sk)
|
subflow_default_af_ops(struct sock *sk)
|
||||||
{
|
{
|
||||||
|
@ -1352,9 +1392,11 @@ static int subflow_ulp_init(struct sock *sk)
|
||||||
ctx->tcp_data_ready = sk->sk_data_ready;
|
ctx->tcp_data_ready = sk->sk_data_ready;
|
||||||
ctx->tcp_state_change = sk->sk_state_change;
|
ctx->tcp_state_change = sk->sk_state_change;
|
||||||
ctx->tcp_write_space = sk->sk_write_space;
|
ctx->tcp_write_space = sk->sk_write_space;
|
||||||
|
ctx->tcp_error_report = sk->sk_error_report;
|
||||||
sk->sk_data_ready = subflow_data_ready;
|
sk->sk_data_ready = subflow_data_ready;
|
||||||
sk->sk_write_space = subflow_write_space;
|
sk->sk_write_space = subflow_write_space;
|
||||||
sk->sk_state_change = subflow_state_change;
|
sk->sk_state_change = subflow_state_change;
|
||||||
|
sk->sk_error_report = subflow_error_report;
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1407,6 +1449,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
|
||||||
new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
|
new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
|
||||||
new_ctx->tcp_state_change = old_ctx->tcp_state_change;
|
new_ctx->tcp_state_change = old_ctx->tcp_state_change;
|
||||||
new_ctx->tcp_write_space = old_ctx->tcp_write_space;
|
new_ctx->tcp_write_space = old_ctx->tcp_write_space;
|
||||||
|
new_ctx->tcp_error_report = old_ctx->tcp_error_report;
|
||||||
new_ctx->rel_write_seq = 1;
|
new_ctx->rel_write_seq = 1;
|
||||||
new_ctx->tcp_sock = newsk;
|
new_ctx->tcp_sock = newsk;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue