1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/fs/netfs/read_retry.c
David Howells 1d0013962d
netfs: Fix a number of read-retry hangs
Fix a number of hangs in the netfslib read-retry code, including:

 (1) netfs_reissue_read() doubles up the getting of references on
     subrequests, thereby leaking the subrequest and causing inode eviction
     to wait indefinitely.  This can lead to the kernel reporting a hang in
     the filesystem's evict_inode().

     Fix this by removing the get from netfs_reissue_read() and adding one
     to netfs_retry_read_subrequests() to deal with the one place that
     didn't double up.

 (2) The loop in netfs_retry_read_subrequests() that retries a sequence of
     failed subrequests doesn't record whether or not it retried the one
     that the "subreq" pointer points to when it leaves the loop.  It may
     not if renegotiation/repreparation of the subrequests means that fewer
     subrequests are needed to span the cumulative range of the sequence.

     Because it doesn't record this, the piece of code that discards
     now-superfluous subrequests doesn't know whether it should discard the
     one "subreq" points to - and so it doesn't.

     Fix this by noting whether the last subreq it examines is superfluous
     and if it is, then getting rid of it and all subsequent subrequests.

     If that one one wasn't superfluous, then we would have tried to go
     round the previous loop again and so there can be no further unretried
     subrequests in the sequence.

 (3) netfs_retry_read_subrequests() gets yet an extra ref on any additional
     subrequests it has to get because it ran out of ones it could reuse to
     to renegotiation/repreparation shrinking the subrequests.

     Fix this by removing that extra ref.

 (4) In netfs_retry_reads(), it was using wait_on_bit() to wait for
     NETFS_SREQ_IN_PROGRESS to be cleared on all subrequests in the
     sequence - but netfs_read_subreq_terminated() is now using a wait
     queue on the request instead and so this wait will never finish.

     Fix this by waiting on the wait queue instead.  To make this work, a
     new flag, NETFS_RREQ_RETRYING, is now set around the wait loop to tell
     the wake-up code to wake up the wait queue rather than requeuing the
     request's work item.

     Note that this flag replaces the NETFS_RREQ_NEED_RETRY flag which is
     no longer used.

 (5) Whilst not strictly anything to do with the hang,
     netfs_retry_read_subrequests() was also doubly incrementing the
     subreq_counter and re-setting the debug index, leaving a gap in the
     trace.  This is also fixed.

One of these hangs was observed with 9p and with cifs.  Others were forced
by manual code injection into fs/afs/file.c.  Firstly, afs_prepare_read()
was created to provide an changing pattern of maximum subrequest sizes:

	static int afs_prepare_read(struct netfs_io_subrequest *subreq)
	{
		struct netfs_io_request *rreq = subreq->rreq;
		if (!S_ISREG(subreq->rreq->inode->i_mode))
			return 0;
		if (subreq->retry_count < 20)
			rreq->io_streams[0].sreq_max_len =
				umax(200, 2222 - subreq->retry_count * 40);
		else
			rreq->io_streams[0].sreq_max_len = 3333;
		return 0;
	}

and pointed to by afs_req_ops.  Then the following:

	struct netfs_io_subrequest *subreq = op->fetch.subreq;
	if (subreq->error == 0 &&
	    S_ISREG(subreq->rreq->inode->i_mode) &&
	    subreq->retry_count < 20) {
		subreq->transferred = subreq->already_done;
		__clear_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
		__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
		afs_fetch_data_notify(op);
		return;
	}

was inserted into afs_fetch_data_success() at the beginning and struct
netfs_io_subrequest given an extra field, "already_done" that was set to
the value in "subreq->transferred" by netfs_reissue_read().

When reading a 4K file, the subrequests would get gradually smaller, a new
subrequest would be allocated around the 3rd retry and then eventually be
rendered superfluous when the 20th retry was hit and the limit on the first
subrequest was eased.

Fixes: e2d46f2ec3 ("netfs: Change the read result collector to only use one work item")
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20250212222402.3618494-2-dhowells@redhat.com
Tested-by: Marc Dionne <marc.dionne@auristor.com>
Tested-by: Steve French <stfrench@microsoft.com>
cc: Ihor Solodrai <ihor.solodrai@pm.me>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: v9fs@lists.linux.dev
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-02-13 16:00:38 +01:00

310 lines
9.2 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/* Network filesystem read subrequest retrying.
*
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "internal.h"
static void netfs_reissue_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
subreq->rreq->netfs_ops->issue_read(subreq);
}
/*
* Go through the list of failed/short reads, retrying all retryable ones. We
* need to switch failed cache reads to network downloads.
*/
static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
struct list_head *next;
_enter("R=%x", rreq->debug_id);
if (list_empty(&stream->subrequests))
return;
if (rreq->netfs_ops->retry_request)
rreq->netfs_ops->retry_request(rreq, NULL);
/* If there's no renegotiation to do, just resend each retryable subreq
* up to the first permanently failed one.
*/
if (!rreq->netfs_ops->prepare_read &&
!rreq->cache_resources.ops) {
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
netfs_reset_iter(subreq);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
}
}
return;
}
/* Okay, we need to renegotiate all the download requests and flip any
* failed cache reads over to being download requests and negotiate
* those also. All fully successful subreqs have been removed from the
* list and any spare data from those has been donated.
*
* What we do is decant the list and rebuild it one subreq at a time so
* that we don't end up with donations jumping over a gap we're busy
* populating with smaller subrequests. In the event that the subreq
* we just launched finishes before we insert the next subreq, it'll
* fill in rreq->prev_donated instead.
*
* Note: Alternatively, we could split the tail subrequest right before
* we reissue it and fix up the donations under lock.
*/
next = stream->subrequests.next;
do {
struct netfs_io_subrequest *from, *to, *tmp;
struct iov_iter source;
unsigned long long start, len;
size_t part;
bool boundary = false, subreq_superfluous = false;
/* Go through the subreqs and find the next span of contiguous
* buffer that we then rejig (cifs, for example, needs the
* rsize renegotiating) and reissue.
*/
from = list_entry(next, struct netfs_io_subrequest, rreq_link);
to = from;
start = from->start + from->transferred;
len = from->len - from->transferred;
_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
rreq->debug_id, from->debug_index,
from->start, from->transferred, from->len);
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
goto abandon;
list_for_each_continue(next, &stream->subrequests) {
subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
if (subreq->start + subreq->transferred != start + len ||
test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
break;
to = subreq;
len += to->len;
}
_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
/* Determine the set of buffers we're going to use. Each
* subreq gets a subset of a single overall contiguous buffer.
*/
netfs_reset_iter(from);
source = from->io_iter;
source.count = len;
/* Work through the sublist. */
subreq = from;
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
if (!len) {
subreq_superfluous = true;
break;
}
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start - subreq->transferred;
subreq->len = len + subreq->transferred;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
/* Renegotiate max_len (rsize) */
stream->sreq_max_len = subreq->len;
if (rreq->netfs_ops->prepare_read &&
rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
goto abandon;
}
part = umin(len, stream->sreq_max_len);
if (unlikely(stream->sreq_max_segs))
part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
subreq->len = subreq->transferred + part;
subreq->io_iter = source;
iov_iter_truncate(&subreq->io_iter, part);
iov_iter_advance(&source, part);
len -= part;
start += part;
if (!len) {
if (boundary)
__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
} else {
__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
}
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
if (subreq == to) {
subreq_superfluous = false;
break;
}
}
/* If we managed to use fewer subreqs, we can discard the
* excess; if we used the same number, then we're done.
*/
if (!len) {
if (!subreq_superfluous)
continue;
list_for_each_entry_safe_from(subreq, tmp,
&stream->subrequests, rreq_link) {
trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
if (subreq == to)
break;
}
continue;
}
/* We ran out of subrequests, so we need to allocate some more
* and insert them after.
*/
do {
subreq = netfs_alloc_subrequest(rreq);
if (!subreq) {
subreq = to;
goto abandon_after;
}
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start;
subreq->len = len;
subreq->stream_nr = stream->stream_nr;
subreq->retry_count = 1;
trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
netfs_sreq_trace_new);
list_add(&subreq->rreq_link, &to->rreq_link);
to = list_next_entry(to, rreq_link);
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
stream->sreq_max_len = umin(len, rreq->rsize);
stream->sreq_max_segs = 0;
if (unlikely(stream->sreq_max_segs))
part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
goto abandon;
}
part = umin(len, stream->sreq_max_len);
subreq->len = subreq->transferred + part;
subreq->io_iter = source;
iov_iter_truncate(&subreq->io_iter, part);
iov_iter_advance(&source, part);
len -= part;
start += part;
if (!len && boundary) {
__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
boundary = false;
}
netfs_reissue_read(rreq, subreq);
} while (len);
} while (!list_is_head(next, &stream->subrequests));
return;
/* If we hit an error, fail all remaining incomplete subrequests */
abandon_after:
if (list_is_last(&subreq->rreq_link, &stream->subrequests))
return;
subreq = list_next_entry(subreq, rreq_link);
abandon:
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
if (!subreq->error &&
!test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
continue;
subreq->error = -ENOMEM;
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
}
}
/*
* Retry reads.
*/
void netfs_retry_reads(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
DEFINE_WAIT(myself);
set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
/* Wait for all outstanding I/O to quiesce before performing retries as
* we may need to renegotiate the I/O sizes.
*/
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
continue;
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
for (;;) {
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
if (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags))
break;
trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for);
schedule();
trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
}
finish_wait(&rreq->waitq, &myself);
}
clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
netfs_retry_read_subrequests(rreq);
}
/*
* Unlock any the pages that haven't been unlocked yet due to abandoned
* subrequests.
*/
void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
{
struct folio_queue *p;
for (p = rreq->buffer.tail; p; p = p->next) {
for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
if (folio && !folioq_is_marked2(p, slot)) {
trace_netfs_folio(folio, netfs_folio_trace_abandon);
folio_unlock(folio);
}
}
}
}