summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/9p/cache.c27
-rw-r--r--fs/afs/cache.c28
-rw-r--r--fs/afs/write.c32
-rw-r--r--fs/btrfs/extent_io.c214
-rw-r--r--fs/ceph/cache.c27
-rw-r--r--fs/cifs/cache.c27
-rw-r--r--fs/dax.c35
-rw-r--r--fs/fscache/page.c24
-rw-r--r--fs/nfs/fscache-index.c29
-rw-r--r--fs/nilfs2/btree.c28
-rw-r--r--fs/nilfs2/page.c63
-rw-r--r--fs/nilfs2/segment.c70
12 files changed, 198 insertions, 406 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 103ca5e1267b..87ebff7ad7b5 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -154,29 +154,12 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
{
struct v9fs_inode *v9inode = cookie_netfs_data;
- struct pagevec pvec;
- pgoff_t first;
- int loop, nr_pages;
+ struct pagecache_iter iter;
+ struct page *page;
- pagevec_init(&pvec, 0);
- first = 0;
-
- for (;;) {
- nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
- first,
- PAGEVEC_SIZE - pagevec_count(&pvec));
- if (!nr_pages)
- break;
-
- for (loop = 0; loop < nr_pages; loop++)
- ClearPageFsCache(pvec.pages[loop]);
-
- first = pvec.pages[nr_pages - 1]->index + 1;
-
- pvec.nr = nr_pages;
- pagevec_release(&pvec);
- cond_resched();
- }
+ for_each_pagecache_page(&iter, v9inode->vfs_inode.i_mapping,
+ 0, ULONG_MAX, page)
+ ClearPageFsCache(page);
}
const struct fscache_cookie_def v9fs_cache_inode_index_def = {
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index 577763c3d88b..639cce1f11ef 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -370,33 +370,15 @@ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
static void afs_vnode_cache_now_uncached(void *cookie_netfs_data)
{
struct afs_vnode *vnode = cookie_netfs_data;
- struct pagevec pvec;
- pgoff_t first;
- int loop, nr_pages;
+ struct pagecache_iter iter;
+ struct page *page;
_enter("{%x,%x,%Lx}",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version);
- pagevec_init(&pvec, 0);
- first = 0;
-
- for (;;) {
- /* grab a bunch of pages to clean */
- nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping,
- first,
- PAGEVEC_SIZE - pagevec_count(&pvec));
- if (!nr_pages)
- break;
-
- for (loop = 0; loop < nr_pages; loop++)
- ClearPageFsCache(pvec.pages[loop]);
-
- first = pvec.pages[nr_pages - 1]->index + 1;
-
- pvec.nr = nr_pages;
- pagevec_release(&pvec);
- cond_resched();
- }
+ for_each_pagecache_page(&iter, vnode->vfs_inode.i_mapping,
+ 0, ULONG_MAX, page)
+ ClearPageFsCache(page);
_leave("");
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..537ac2b55eb2 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -277,33 +277,19 @@ int afs_write_end(struct file *file, struct address_space *mapping,
static void afs_kill_pages(struct afs_vnode *vnode, bool error,
pgoff_t first, pgoff_t last)
{
- struct pagevec pv;
- unsigned count, loop;
+ struct pagecache_iter iter;
+ struct page *page;
_enter("{%x:%u},%lx-%lx",
vnode->fid.vid, vnode->fid.vnode, first, last);
- pagevec_init(&pv, 0);
-
- do {
- _debug("kill %lx-%lx", first, last);
-
- count = last - first + 1;
- if (count > PAGEVEC_SIZE)
- count = PAGEVEC_SIZE;
- pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
- first, count, pv.pages);
- ASSERTCMP(pv.nr, ==, count);
-
- for (loop = 0; loop < count; loop++) {
- ClearPageUptodate(pv.pages[loop]);
- if (error)
- SetPageError(pv.pages[loop]);
- end_page_writeback(pv.pages[loop]);
- }
-
- __pagevec_release(&pv);
- } while (first < last);
+ for_each_pagecache_page_contig(&iter, vnode->vfs_inode.i_mapping,
+ first, last, page) {
+ ClearPageUptodate(page);
+ if (error)
+ SetPageError(page);
+ end_page_writeback(page);
+ }
_leave("");
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c8597d98..5d166944b086 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3792,14 +3792,13 @@ int btree_write_cache_pages(struct address_space *mapping,
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
- struct pagevec pvec;
- int nr_pages;
+ struct pagecache_iter iter;
+ struct page *page;
pgoff_t index;
pgoff_t end; /* Inclusive */
int scanned = 0;
int tag;
- pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -3815,76 +3814,68 @@ int btree_write_cache_pages(struct address_space *mapping,
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
- while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
- unsigned i;
-
- scanned = 1;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- if (!PagePrivate(page))
- continue;
-
- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- break;
- }
+ for_each_pagecache_tag(&iter, mapping, tag, index, ULONG_MAX, page) {
+ if (!PagePrivate(page))
+ continue;
- spin_lock(&mapping->private_lock);
- if (!PagePrivate(page)) {
- spin_unlock(&mapping->private_lock);
- continue;
- }
+ if (!wbc->range_cyclic && page->index > end) {
+ done = 1;
+ break;
+ }
- eb = (struct extent_buffer *)page->private;
+ spin_lock(&mapping->private_lock);
+ if (!PagePrivate(page)) {
+ spin_unlock(&mapping->private_lock);
+ continue;
+ }
- /*
- * Shouldn't happen and normally this would be a BUG_ON
- * but no sense in crashing the users box for something
- * we can survive anyway.
- */
- if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
- continue;
- }
+ eb = (struct extent_buffer *)page->private;
- if (eb == prev_eb) {
- spin_unlock(&mapping->private_lock);
- continue;
- }
+ /*
+ * Shouldn't happen and normally this would be a BUG_ON
+ * but no sense in crashing the users box for something
+ * we can survive anyway.
+ */
+ if (WARN_ON(!eb)) {
+ spin_unlock(&mapping->private_lock);
+ continue;
+ }
- ret = atomic_inc_not_zero(&eb->refs);
+ if (eb == prev_eb) {
spin_unlock(&mapping->private_lock);
- if (!ret)
- continue;
+ continue;
+ }
- prev_eb = eb;
- ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
- if (!ret) {
- free_extent_buffer(eb);
- continue;
- }
+ ret = atomic_inc_not_zero(&eb->refs);
+ spin_unlock(&mapping->private_lock);
+ if (!ret)
+ continue;
- ret = write_one_eb(eb, fs_info, wbc, &epd);
- if (ret) {
- done = 1;
- free_extent_buffer(eb);
- break;
- }
+ prev_eb = eb;
+ ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
+ if (!ret) {
free_extent_buffer(eb);
+ continue;
+ }
- /*
- * the filesystem may choose to bump up nr_to_write.
- * We have to make sure to honor the new nr_to_write
- * at any time
- */
- nr_to_write_done = wbc->nr_to_write <= 0;
+ ret = write_one_eb(eb, fs_info, wbc, &epd);
+ if (ret) {
+ done = 1;
+ free_extent_buffer(eb);
+ break;
}
- pagevec_release(&pvec);
- cond_resched();
+ free_extent_buffer(eb);
+
+ /*
+ * the filesystem may choose to bump up nr_to_write.
+ * We have to make sure to honor the new nr_to_write
+ * at any time
+ */
+ nr_to_write_done = wbc->nr_to_write <= 0;
}
+ pagecache_iter_release(&iter);
+
if (!scanned && !done) {
/*
* We hit the last page and there is more work to be done: wrap
@@ -3924,8 +3915,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
int done = 0;
int err = 0;
int nr_to_write_done = 0;
- struct pagevec pvec;
- int nr_pages;
+ struct pagecache_iter iter;
+ struct page *page;
pgoff_t index;
pgoff_t end; /* Inclusive */
int scanned = 0;
@@ -3943,7 +3934,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
if (!igrab(inode))
return 0;
- pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -3959,69 +3949,59 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
- while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
- unsigned i;
-
- scanned = 1;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- /*
- * At this point we hold neither mapping->tree_lock nor
- * lock on the page itself: the page may be truncated or
- * invalidated (changing page->mapping to NULL), or even
- * swizzled back from swapper_space to tmpfs file
- * mapping
- */
- if (!trylock_page(page)) {
- flush_fn(data);
- lock_page(page);
- }
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- continue;
- }
+ for_each_pagecache_tag(&iter, mapping, tag, index, ULONG_MAX, page) {
+ /*
+ * At this point we hold neither mapping->tree_lock nor lock on
+ * the page itself: the page may be truncated or invalidated
+ * (changing page->mapping to NULL), or even swizzled back from
+ * swapper_space to tmpfs file mapping
+ */
+ if (!trylock_page(page)) {
+ flush_fn(data);
+ lock_page(page);
+ }
- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- unlock_page(page);
- continue;
- }
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ continue;
+ }
- if (wbc->sync_mode != WB_SYNC_NONE) {
- if (PageWriteback(page))
- flush_fn(data);
- wait_on_page_writeback(page);
- }
+ if (!wbc->range_cyclic && page->index > end) {
+ done = 1;
+ unlock_page(page);
+ continue;
+ }
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- unlock_page(page);
- continue;
- }
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ if (PageWriteback(page))
+ flush_fn(data);
+ wait_on_page_writeback(page);
+ }
- ret = (*writepage)(page, wbc, data);
+ if (PageWriteback(page) ||
+ !clear_page_dirty_for_io(page)) {
+ unlock_page(page);
+ continue;
+ }
- if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
- unlock_page(page);
- ret = 0;
- }
- if (!err && ret < 0)
- err = ret;
+ ret = (*writepage)(page, wbc, data);
- /*
- * the filesystem may choose to bump up nr_to_write.
- * We have to make sure to honor the new nr_to_write
- * at any time
- */
- nr_to_write_done = wbc->nr_to_write <= 0;
+ if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+ unlock_page(page);
+ ret = 0;
}
- pagevec_release(&pvec);
- cond_resched();
+ if (!err && ret < 0)
+ err = ret;
+
+ /*
+ * the filesystem may choose to bump up nr_to_write. We have
+ * to make sure to honor the new nr_to_write at any time
+ */
+ nr_to_write_done = wbc->nr_to_write <= 0;
}
+ pagecache_iter_release(&iter);
+
if (!scanned && !done && !err) {
/*
* We hit the last page and there is more work to be done: wrap
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a351480dbabc..8b3ac70b855b 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -144,31 +144,14 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
static void ceph_fscache_inode_now_uncached(void* cookie_netfs_data)
{
struct ceph_inode_info* ci = cookie_netfs_data;
- struct pagevec pvec;
- pgoff_t first;
- int loop, nr_pages;
-
- pagevec_init(&pvec, 0);
- first = 0;
+ struct pagecache_iter iter;
+ struct page *page;
dout("ceph inode 0x%p now uncached", ci);
- while (1) {
- nr_pages = pagevec_lookup(&pvec, ci->vfs_inode.i_mapping, first,
- PAGEVEC_SIZE - pagevec_count(&pvec));
-
- if (!nr_pages)
- break;
-
- for (loop = 0; loop < nr_pages; loop++)
- ClearPageFsCache(pvec.pages[loop]);
-
- first = pvec.pages[nr_pages - 1]->index + 1;
-
- pvec.nr = nr_pages;
- pagevec_release(&pvec);
- cond_resched();
- }
+ for_each_pagecache_page(&iter, ci->vfs_inode.i_mapping,
+ 0, ULONG_MAX, page)
+ ClearPageFsCache(page);
}
static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 6c665bf4a27c..3d3b22a158b7 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -295,31 +295,14 @@ fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
{
struct cifsInodeInfo *cifsi = cookie_netfs_data;
- struct pagevec pvec;
- pgoff_t first;
- int loop, nr_pages;
-
- pagevec_init(&pvec, 0);
- first = 0;
+ struct pagecache_iter iter;
+ struct page *page;
cifs_dbg(FYI, "%s: cifs inode 0x%p now uncached\n", __func__, cifsi);
- for (;;) {
- nr_pages = pagevec_lookup(&pvec,
- cifsi->vfs_inode.i_mapping, first,
- PAGEVEC_SIZE - pagevec_count(&pvec));
- if (!nr_pages)
- break;
-
- for (loop = 0; loop < nr_pages; loop++)
- ClearPageFsCache(pvec.pages[loop]);
-
- first = pvec.pages[nr_pages - 1]->index + 1;
-
- pvec.nr = nr_pages;
- pagevec_release(&pvec);
- cond_resched();
- }
+ for_each_pagecache_page(&iter, cifsi->vfs_inode.i_mapping,
+ 0, ULONG_MAX, page)
+ ClearPageFsCache(page);
}
const struct fscache_cookie_def cifs_fscache_inode_object_def = {
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb7498c..ac8f96b80fa3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -493,11 +493,10 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- pgoff_t start_index, end_index, pmd_index;
- pgoff_t indices[PAGEVEC_SIZE];
- struct pagevec pvec;
- bool done = false;
- int i, ret = 0;
+ pgoff_t start_index, end_index, pmd_index, index;
+ struct pagecache_iter iter;
+ struct page *page;
+ int ret;
void *entry;
if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
@@ -520,27 +519,15 @@ int dax_writeback_mapping_range(struct address_space *mapping,
tag_pages_for_writeback(mapping, start_index, end_index);
- pagevec_init(&pvec, 0);
- while (!done) {
- pvec.nr = find_get_entries_tag(mapping, start_index,
- PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
- pvec.pages, indices);
-
- if (pvec.nr == 0)
- break;
-
- for (i = 0; i < pvec.nr; i++) {
- if (indices[i] > end_index) {
- done = true;
- break;
- }
-
- ret = dax_writeback_one(bdev, mapping, indices[i],
- pvec.pages[i]);
- if (ret < 0)
- return ret;
+ for_each_pagecache_entry_tag(&iter, mapping, PAGECACHE_TAG_TOWRITE,
+ start_index, end_index, page, index) {
+ ret = dax_writeback_one(bdev, mapping, index, page);
+ if (ret < 0) {
+ pagecache_iter_release(&iter);
+ return ret;
}
}
+
wmb_pmem();
return 0;
}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..49f562a82fb0 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -1162,9 +1162,8 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
- struct pagevec pvec;
- pgoff_t next;
- int i;
+ struct pagecache_iter iter;
+ struct page *page;
_enter("%p,%p", cookie, inode);
@@ -1173,22 +1172,11 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
return;
}
- pagevec_init(&pvec, 0);
- next = 0;
- do {
- if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
- break;
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- next = page->index;
- if (PageFsCache(page)) {
- __fscache_wait_on_page_write(cookie, page);
- __fscache_uncache_page(cookie, page);
- }
+ for_each_pagecache_page(&iter, mapping, 0, ULONG_MAX, page)
+ if (PageFsCache(page)) {
+ __fscache_wait_on_page_write(cookie, page);
+ __fscache_uncache_page(cookie, page);
}
- pagevec_release(&pvec);
- cond_resched();
- } while (++next);
_leave("");
}
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 777b055063f6..4ea00cbdbb4a 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -261,33 +261,14 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
static void nfs_fscache_inode_now_uncached(void *cookie_netfs_data)
{
struct nfs_inode *nfsi = cookie_netfs_data;
- struct pagevec pvec;
- pgoff_t first;
- int loop, nr_pages;
-
- pagevec_init(&pvec, 0);
- first = 0;
+ struct pagecache_iter iter;
+ struct page *page;
dprintk("NFS: nfs_inode_now_uncached: nfs_inode 0x%p\n", nfsi);
- for (;;) {
- /* grab a bunch of pages to unmark */
- nr_pages = pagevec_lookup(&pvec,
- nfsi->vfs_inode.i_mapping,
- first,
- PAGEVEC_SIZE - pagevec_count(&pvec));
- if (!nr_pages)
- break;
-
- for (loop = 0; loop < nr_pages; loop++)
- ClearPageFsCache(pvec.pages[loop]);
-
- first = pvec.pages[nr_pages - 1]->index + 1;
-
- pvec.nr = nr_pages;
- pagevec_release(&pvec);
- cond_resched();
- }
+ for_each_pagecache_page(&iter, nfsi->vfs_inode.i_mapping,
+ 0, ULONG_MAX, page)
+ ClearPageFsCache(page);
}
/*
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 3a3821b00486..2ebd22f680ae 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -2135,30 +2135,24 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
{
struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
- struct pagevec pvec;
+ struct pagecache_iter iter;
+ struct page *page;
struct buffer_head *bh, *head;
- pgoff_t index = 0;
- int level, i;
+ int level;
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < NILFS_BTREE_LEVEL_MAX;
level++)
INIT_LIST_HEAD(&lists[level]);
- pagevec_init(&pvec, 0);
-
- while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- bh = head = page_buffers(pvec.pages[i]);
- do {
- if (buffer_dirty(bh))
- nilfs_btree_add_dirty_buffer(btree,
- lists, bh);
- } while ((bh = bh->b_this_page) != head);
- }
- pagevec_release(&pvec);
- cond_resched();
+ for_each_pagecache_tag(&iter, btcache, PAGECACHE_TAG_DIRTY,
+ 0, ULONG_MAX, page) {
+ bh = head = page_buffers(page);
+ do {
+ if (buffer_dirty(bh))
+ nilfs_btree_add_dirty_buffer(btree,
+ lists, bh);
+ } while ((bh = bh->b_this_page) != head);
}
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77eff99..addab02537d8 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -255,20 +255,12 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
int nilfs_copy_dirty_pages(struct address_space *dmap,
struct address_space *smap)
{
- struct pagevec pvec;
- unsigned int i;
- pgoff_t index = 0;
+ struct pagecache_iter iter;
+ struct page *page, *dpage;
int err = 0;
- pagevec_init(&pvec, 0);
-repeat:
- if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE))
- return 0;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i], *dpage;
-
+ for_each_pagecache_tag(&iter, smap, PAGECACHE_TAG_DIRTY,
+ 0, ULONG_MAX, page) {
lock_page(page);
if (unlikely(!PageDirty(page)))
NILFS_PAGE_BUG(page, "inconsistent dirty state");
@@ -291,11 +283,8 @@ repeat:
page_cache_release(dpage);
unlock_page(page);
}
- pagevec_release(&pvec);
- cond_resched();
+ pagecache_iter_release(&iter);
- if (likely(!err))
- goto repeat;
return err;
}
@@ -310,20 +299,11 @@ repeat:
void nilfs_copy_back_pages(struct address_space *dmap,
struct address_space *smap)
{
- struct pagevec pvec;
- unsigned int i, n;
- pgoff_t index = 0;
+ struct pagecache_iter iter;
+ struct page *page, *dpage;
int err;
- pagevec_init(&pvec, 0);
-repeat:
- n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
- if (!n)
- return;
- index = pvec.pages[n - 1]->index + 1;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i], *dpage;
+ for_each_pagecache_page(&iter, smap, 0, ULONG_MAX, page) {
pgoff_t offset = page->index;
lock_page(page);
@@ -363,10 +343,6 @@ repeat:
}
unlock_page(page);
}
- pagevec_release(&pvec);
- cond_resched();
-
- goto repeat;
}
/**
@@ -376,23 +352,14 @@ repeat:
*/
void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
{
- struct pagevec pvec;
- unsigned int i;
- pgoff_t index = 0;
-
- pagevec_init(&pvec, 0);
-
- while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
+ struct pagecache_iter iter;
+ struct page *page;
- lock_page(page);
- nilfs_clear_dirty_page(page, silent);
- unlock_page(page);
- }
- pagevec_release(&pvec);
- cond_resched();
+ for_each_pagecache_tag(&iter, mapping, PAGECACHE_TAG_DIRTY,
+ 0, ULONG_MAX, page) {
+ lock_page(page);
+ nilfs_clear_dirty_page(page, silent);
+ unlock_page(page);
}
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..db6c9ad1b37f 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -688,10 +688,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
loff_t start, loff_t end)
{
struct address_space *mapping = inode->i_mapping;
- struct pagevec pvec;
+ struct pagecache_iter iter;
+ struct page *page;
+ struct buffer_head *bh, *head;
pgoff_t index = 0, last = ULONG_MAX;
size_t ndirties = 0;
- int i;
if (unlikely(start != 0 || end != LLONG_MAX)) {
/*
@@ -702,21 +703,9 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
index = start >> PAGE_SHIFT;
last = end >> PAGE_SHIFT;
}
- pagevec_init(&pvec, 0);
- repeat:
- if (unlikely(index > last) ||
- !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- min_t(pgoff_t, last - index,
- PAGEVEC_SIZE - 1) + 1))
- return ndirties;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct buffer_head *bh, *head;
- struct page *page = pvec.pages[i];
-
- if (unlikely(page->index > last))
- break;
+ for_each_pagecache_tag(&iter, mapping, PAGECACHE_TAG_DIRTY,
+ 0, last, page) {
lock_page(page);
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
@@ -729,16 +718,12 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
get_bh(bh);
list_add_tail(&bh->b_assoc_buffers, listp);
ndirties++;
- if (unlikely(ndirties >= nlimit)) {
- pagevec_release(&pvec);
- cond_resched();
- return ndirties;
- }
+ if (unlikely(ndirties >= nlimit))
+ break;
} while (bh = bh->b_this_page, bh != head);
}
- pagevec_release(&pvec);
- cond_resched();
- goto repeat;
+ pagecache_iter_release(&iter);
+ return ndirties;
}
static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
@@ -746,29 +731,22 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct address_space *mapping = &ii->i_btnode_cache;
- struct pagevec pvec;
+ struct pagecache_iter iter;
+ struct page *page;
struct buffer_head *bh, *head;
- unsigned int i;
- pgoff_t index = 0;
-
- pagevec_init(&pvec, 0);
-
- while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- bh = head = page_buffers(pvec.pages[i]);
- do {
- if (buffer_dirty(bh) &&
- !buffer_async_write(bh)) {
- get_bh(bh);
- list_add_tail(&bh->b_assoc_buffers,
- listp);
- }
- bh = bh->b_this_page;
- } while (bh != head);
- }
- pagevec_release(&pvec);
- cond_resched();
+
+ for_each_pagecache_tag(&iter, mapping, PAGECACHE_TAG_DIRTY,
+ 0, ULONG_MAX, page) {
+ bh = head = page_buffers(page);
+ do {
+ if (buffer_dirty(bh) &&
+ !buffer_async_write(bh)) {
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers,
+ listp);
+ }
+ bh = bh->b_this_page;
+ } while (bh != head);
}
}