mptcp: fix race in release_cb
If we receive a MPTCP_PUSH_PENDING even from a subflow when
mptcp_release_cb() is serving the previous one, the latter
will be delayed up to the next release_sock(msk).
Address the issue implementing a test/serve loop for such
event.
Additionally rename the push helper to __mptcp_push_pending()
to be more consistent with the existing code.
Fixes: 6e628cd3a8
("mptcp: use mptcp release_cb for delayed tasks")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2948d0a1e5
commit
c2e6048fa1
1 changed files with 21 additions and 12 deletions
|
@ -1445,7 +1445,7 @@ static void mptcp_push_release(struct sock *sk, struct sock *ssk,
|
||||||
release_sock(ssk);
|
release_sock(ssk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mptcp_push_pending(struct sock *sk, unsigned int flags)
|
static void __mptcp_push_pending(struct sock *sk, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct sock *prev_ssk = NULL, *ssk = NULL;
|
struct sock *prev_ssk = NULL, *ssk = NULL;
|
||||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||||
|
@ -1697,14 +1697,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||||
|
|
||||||
wait_for_memory:
|
wait_for_memory:
|
||||||
mptcp_set_nospace(sk);
|
mptcp_set_nospace(sk);
|
||||||
mptcp_push_pending(sk, msg->msg_flags);
|
__mptcp_push_pending(sk, msg->msg_flags);
|
||||||
ret = sk_stream_wait_memory(sk, &timeo);
|
ret = sk_stream_wait_memory(sk, &timeo);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copied)
|
if (copied)
|
||||||
mptcp_push_pending(sk, msg->msg_flags);
|
__mptcp_push_pending(sk, msg->msg_flags);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
@ -2959,13 +2959,14 @@ static void mptcp_release_cb(struct sock *sk)
|
||||||
{
|
{
|
||||||
unsigned long flags, nflags;
|
unsigned long flags, nflags;
|
||||||
|
|
||||||
/* push_pending may touch wmem_reserved, do it before the later
|
for (;;) {
|
||||||
* cleanup
|
flags = 0;
|
||||||
*/
|
if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
|
||||||
if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
|
flags |= MPTCP_PUSH_PENDING;
|
||||||
__mptcp_clean_una(sk);
|
if (!flags)
|
||||||
if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) {
|
break;
|
||||||
/* mptcp_push_pending() acquires the subflow socket lock
|
|
||||||
|
/* the following actions acquire the subflow socket lock
|
||||||
*
|
*
|
||||||
* 1) can't be invoked in atomic scope
|
* 1) can't be invoked in atomic scope
|
||||||
* 2) must avoid ABBA deadlock with msk socket spinlock: the RX
|
* 2) must avoid ABBA deadlock with msk socket spinlock: the RX
|
||||||
|
@ -2974,13 +2975,21 @@ static void mptcp_release_cb(struct sock *sk)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_unlock_bh(&sk->sk_lock.slock);
|
spin_unlock_bh(&sk->sk_lock.slock);
|
||||||
mptcp_push_pending(sk, 0);
|
if (flags & MPTCP_PUSH_PENDING)
|
||||||
|
__mptcp_push_pending(sk, 0);
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
spin_lock_bh(&sk->sk_lock.slock);
|
spin_lock_bh(&sk->sk_lock.slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
|
||||||
|
__mptcp_clean_una(sk);
|
||||||
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
|
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
|
||||||
__mptcp_error_report(sk);
|
__mptcp_error_report(sk);
|
||||||
|
|
||||||
/* clear any wmem reservation and errors */
|
/* push_pending may touch wmem_reserved, ensure we do the cleanup
|
||||||
|
* later
|
||||||
|
*/
|
||||||
__mptcp_update_wmem(sk);
|
__mptcp_update_wmem(sk);
|
||||||
__mptcp_update_rmem(sk);
|
__mptcp_update_rmem(sk);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue