summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/bcachefs/buckets.c4
-rw-r--r--fs/btrfs/extent_io.c20
-rw-r--r--fs/fuse/file.c2
-rw-r--r--include/linux/pagemap.h24
-rw-r--r--mm/readahead.c56
5 files changed, 51 insertions, 55 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 97a8af31ded1..026851ed761e 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1501,6 +1501,10 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
}
}
+/*
+ * XXX account for compressed, uncompresed, incompressible sectors
+ */
+
/* trans_mark: */
static struct btree_iter *trans_get_update(struct btree_trans *trans,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 60278e52c37a..27ca6e53c693 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4389,28 +4389,20 @@ void extent_readahead(struct readahead_control *rac)
{
struct bio *bio = NULL;
unsigned long bio_flags = 0;
- struct page *pagepool[16];
struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1;
- int nr;
- while ((nr = readahead_page_batch(rac, pagepool))) {
- u64 contig_start = page_offset(pagepool[0]);
- u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
+ u64 contig_start = (u64) rac->index << PAGE_SHIFT;
+ u64 contig_end = ((u64) (rac->index + rac->nr) << PAGE_SHIFT) - 1;
- ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
-
- contiguous_readpages(pagepool, nr, contig_start, contig_end,
- &em_cached, &bio, &bio_flags, &prev_em_start);
- }
+ contiguous_readpages(rac->pagevec, rac->nr, contig_start, contig_end,
+ &em_cached, &bio, &bio_flags, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
- if (bio) {
- if (submit_one_bio(bio, 0, bio_flags))
- return;
- }
+ if (bio)
+ submit_one_bio(bio, 0, bio_flags);
}
/*
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 83d917f7e542..bc384cdca6b0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -958,7 +958,7 @@ static void fuse_readahead(struct readahead_control *rac)
return;
ap = &ia->ap;
nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < rac->nr; i++) {
fuse_wait_on_page_writeback(inode,
readahead_index(rac) + i);
ap->descs[i].length = PAGE_SIZE;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 25cadac5e90d..4679ef2b4584 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -689,10 +689,12 @@ void page_cache_readahead_unbounded(struct address_space *, struct file *,
struct readahead_control {
struct file *file;
struct address_space *mapping;
-/* private: use the readahead_* accessors instead */
- pgoff_t _index;
- unsigned int _nr_pages;
- unsigned int _batch_count;
+
+ pgoff_t index;
+ unsigned int nr;
+ unsigned int size;
+ struct page **pagevec;
+ struct page *pagevec_onstack[8];
};
/**
@@ -762,20 +764,6 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
}
/**
- * readahead_page_batch - Get a batch of pages to read.
- * @rac: The current readahead request.
- * @array: An array of pointers to struct page.
- *
- * Context: The pages are locked and have an elevated refcount. The caller
- * should decreases the refcount once the page has been submitted for I/O
- * and unlock the page once all I/O to that page has completed.
- * Return: The number of pages placed in the array. 0 indicates the request
- * is complete.
- */
-#define readahead_page_batch(rac, array) \
- __readahead_batch(rac, array, ARRAY_SIZE(array))
-
-/**
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
diff --git a/mm/readahead.c b/mm/readahead.c
index d9429b3b1fde..421cb76478a7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -207,52 +207,55 @@ static void readpages_page_cache_readahead_unbounded(struct address_space *mappi
/* New path: .readahead */
-static void readahead_read_pages(struct readahead_control *rac, bool skip_page)
+static void readahead_read_pages(struct readahead_control *rac)
{
const struct address_space_operations *aops = rac->mapping->a_ops;
struct page *page;
struct blk_plug plug;
if (!readahead_count(rac))
- goto out;
+ return;
blk_start_plug(&plug);
-
aops->readahead(rac);
+ blk_finish_plug(&plug);
+
/* Clean up the remaining pages */
while ((page = readahead_page(rac))) {
unlock_page(page);
put_page(page);
}
- blk_finish_plug(&plug);
BUG_ON(readahead_count(rac));
-
-out:
- if (skip_page)
- rac->_index++;
}
static void readahead_page_cache_readahead_unbounded(struct address_space *mapping,
struct file *file, pgoff_t index, unsigned long nr_to_read,
unsigned long lookahead_size)
{
+ pgoff_t end_index = index + nr_to_read;
+ pgoff_t lookahead_index = index + nr_to_read - lookahead_size;
gfp_t gfp_mask = readahead_gfp_mask(mapping);
struct readahead_control rac = {
.mapping = mapping,
.file = file,
._index = index,
};
- unsigned long i;
+ unsigned long nr_batch = nr_to_read;
- /*
- * Preallocate as many pages as we will need.
- */
- for (i = 0; i < nr_to_read; i++) {
- struct page *page = xa_load(&mapping->i_pages, index + i);
+ if (nr_to_read > ARRAY_SIZE(rac.pagevec_onstack))
+ rac.pagevec = kmalloc_array(nr_to_read, sizeof(void *), gfp_mask);
- BUG_ON(index + i != rac._index + rac._nr_pages);
+ if (rac.pagevec) {
+ rac.size = nr_to_read;
+ } else {
+ rac.pagevec = rac.pagevec_onstack;
+ rac.size = ARRAY_SIZE(rac.pagevec_onstack);
+ }
+
+ while ((index = rac->index + rac->nr) < end_index) {
+ struct page *page = xa_load(&rac->mapping->i_pages, index);
if (page && !xa_is_value(page)) {
/*
@@ -263,7 +266,8 @@ static void readahead_page_cache_readahead_unbounded(struct address_space *mappi
* have a stable reference to this page, and it's
* not worth getting one just for that.
*/
- readahead_read_pages(&rac, true);
+ readahead_read_pages(rac);
+ rac->index++;
continue;
}
@@ -271,15 +275,20 @@ static void readahead_page_cache_readahead_unbounded(struct address_space *mappi
if (!page)
break;
- if (add_to_page_cache_lru(page, mapping, index + i,
- gfp_mask) < 0) {
+ if (add_to_page_cache_lru(page, rac->mapping, index,
+ gfp_mask) < 0) {
put_page(page);
- readahead_read_pages(&rac, true);
+ readahead_read_pages(rac);
+ rac->index++;
continue;
}
- if (i == nr_to_read - lookahead_size)
+ if (index == lookahead_index)
SetPageReadahead(page);
- rac._nr_pages++;
+
+ rac->pagevec[rac->nr++] = page;
+
+ if (rac->nr == rac->size)
+ readahead_read_pages(rac);
}
/*
@@ -287,7 +296,10 @@ static void readahead_page_cache_readahead_unbounded(struct address_space *mappi
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
- readahead_read_pages(&rac, false);
+ readahead_read_pages(rac);
+
+ if (rac.pagevec != rac.pagevec_onstack)
+ kfree(rac.pagevec);
}
/**