fuse: convert readahead to use folios
Currently we're using the __readahead_batch() helper which populates our fuse_args_pages->pages array with pages. Convert this to use the newer folio based pattern which is to call readahead_folio() to get the next folio in the read ahead batch. I've updated the code to use things like folio_size() and to take into account larger folio sizes, but this is purely to make that eventual work easier to do, we currently will not get large folios so this is more future proofing than actual support. [SzM: remove check for readahead_folio() won't return NULL (at least for now) so remove ugly assign in conditional.] Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
aaa32429da
commit
3eab9d7bc2
1 changed files with 23 additions and 13 deletions
|
@ -945,7 +945,6 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
|
|||
struct folio *folio = page_folio(ap->pages[i]);
|
||||
|
||||
folio_end_read(folio, !err);
|
||||
folio_put(folio);
|
||||
}
|
||||
if (ia->ff)
|
||||
fuse_file_put(ia->ff, false);
|
||||
|
@ -994,7 +993,7 @@ static void fuse_readahead(struct readahead_control *rac)
|
|||
struct inode *inode = rac->mapping->host;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
unsigned int i, max_pages, nr_pages = 0;
|
||||
unsigned int max_pages, nr_pages;
|
||||
pgoff_t first = readahead_index(rac);
|
||||
pgoff_t last = first + readahead_count(rac) - 1;
|
||||
|
||||
|
@ -1006,9 +1005,22 @@ static void fuse_readahead(struct readahead_control *rac)
|
|||
max_pages = min_t(unsigned int, fc->max_pages,
|
||||
fc->max_read / PAGE_SIZE);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* This is only accurate the first time through, since readahead_folio()
|
||||
* doesn't update readahead_count() from the previous folio until the
|
||||
* next call. Grab nr_pages here so we know how many pages we're going
|
||||
* to have to process. This means that we will exit here with
|
||||
* readahead_count() == folio_nr_pages(last_folio), but we will have
|
||||
* consumed all of the folios, and read_pages() will call
|
||||
* readahead_folio() again which will clean up the rac.
|
||||
*/
|
||||
nr_pages = readahead_count(rac);
|
||||
|
||||
while (nr_pages) {
|
||||
struct fuse_io_args *ia;
|
||||
struct fuse_args_pages *ap;
|
||||
struct folio *folio;
|
||||
unsigned cur_pages = min(max_pages, nr_pages);
|
||||
|
||||
if (fc->num_background >= fc->congestion_threshold &&
|
||||
rac->ra->async_size >= readahead_count(rac))
|
||||
|
@ -1018,21 +1030,19 @@ static void fuse_readahead(struct readahead_control *rac)
|
|||
*/
|
||||
break;
|
||||
|
||||
nr_pages = readahead_count(rac) - nr_pages;
|
||||
if (nr_pages > max_pages)
|
||||
nr_pages = max_pages;
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
ia = fuse_io_alloc(NULL, nr_pages);
|
||||
ia = fuse_io_alloc(NULL, cur_pages);
|
||||
if (!ia)
|
||||
return;
|
||||
ap = &ia->ap;
|
||||
nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
ap->descs[i].length = PAGE_SIZE;
|
||||
|
||||
while (ap->num_pages < cur_pages) {
|
||||
folio = readahead_folio(rac);
|
||||
ap->pages[ap->num_pages] = &folio->page;
|
||||
ap->descs[ap->num_pages].length = folio_size(folio);
|
||||
ap->num_pages++;
|
||||
}
|
||||
ap->num_pages = nr_pages;
|
||||
fuse_send_readpages(ia, rac->file);
|
||||
nr_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue