diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-09-19 17:11:53 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:37:09 -0800 |
commit | 741afff52574436a533d4ba74360dd4f0014273a (patch) | |
tree | 95c8e12731dbb9569a4a96183872796d4a86a821 | |
parent | 70ddc2326b6472649f50cc59902dea3e29ff48b5 (diff) |
mm: Real pagecache iterators
Introduce for_each_pagecache_page() and related macros, with the goal of
replacing most/all uses of pagevec_lookup().
For the most part this shouldn't be a functional change. The one functional
difference with the new macros is that they now take an @end parameter, so we're
able to avoid grabbing pages in __find_get_pages() that we'll never use.
This patch only does some of the conversions, the ones I was able to easily test
myself - the conversions are mechanical but tricky enough they generally warrent
testing.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | include/linux/pagevec.h | 67 | ||||
-rw-r--r-- | mm/filemap.c | 76 | ||||
-rw-r--r-- | mm/page-writeback.c | 148 | ||||
-rw-r--r-- | mm/swap.c | 33 | ||||
-rw-r--r-- | mm/truncate.c | 279 |
5 files changed, 284 insertions, 319 deletions
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index b45d391b4540..e60d74148d0b 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -22,10 +22,6 @@ struct pagevec { void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); -unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_entries, - pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages); @@ -69,4 +65,67 @@ static inline void pagevec_release(struct pagevec *pvec) __pagevec_release(pvec); } +struct pagecache_iter { + unsigned nr; + unsigned idx; + pgoff_t index; + struct page *pages[PAGEVEC_SIZE]; + pgoff_t indices[PAGEVEC_SIZE]; +}; + +static inline void pagecache_iter_init(struct pagecache_iter *iter, + pgoff_t start) +{ + iter->nr = 0; + iter->idx = 0; + iter->index = start; +} + +void __pagecache_iter_release(struct pagecache_iter *iter); + +/** + * pagecache_iter_release - release cached pages from pagacache_iter + * + * Must be called if breaking out of for_each_pagecache_page() etc. early - not + * needed if pagecache_iter_next() returned NULL and loop terminated normally + */ +static inline void pagecache_iter_release(struct pagecache_iter *iter) +{ + if (iter->nr) + __pagecache_iter_release(iter); +} + +struct page *pagecache_iter_next(struct pagecache_iter *iter, + struct address_space *mapping, + pgoff_t end, pgoff_t *index, + unsigned flags); + +#define __pagecache_iter_for_each(_iter, _mapping, _start, _end, \ + _page, _index, _flags) \ + for (pagecache_iter_init((_iter), (_start)); \ + ((_page) = pagecache_iter_next((_iter), (_mapping), \ + (_end), (_index), (_flags)));) + +#define for_each_pagecache_page(_iter, _mapping, _start, _end, _page) \ + __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\ + (_page), NULL, 0) + +#define for_each_pagecache_page_contig(_iter, _mapping, _start, _end, _page)\ + __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\ + (_page), NULL, RADIX_TREE_ITER_CONTIG) + +#define for_each_pagecache_tag(_iter, _mapping, _tag, _start, _end, _page)\ + __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\ + (_page), NULL, RADIX_TREE_ITER_TAGGED|(_tag)) + +#define for_each_pagecache_entry(_iter, _mapping, _start, _end, _page, _index)\ + __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\ + (_page), &(_index), RADIX_TREE_ITER_EXCEPTIONAL) + +#define for_each_pagecache_entry_tag(_iter, _mapping, _tag, \ + _start, _end, _page, _index) \ + __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\ + (_page), &(_index), RADIX_TREE_ITER_EXCEPTIONAL|\ + RADIX_TREE_ITER_TAGGED|(_tag)) + #endif /* _LINUX_PAGEVEC_H */ diff --git a/mm/filemap.c b/mm/filemap.c index 37ed2a7fb8cf..4ec9e9b69bc5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -480,35 +480,20 @@ EXPORT_SYMBOL(filemap_flush); static int __filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { - pgoff_t index = start_byte >> PAGE_SHIFT; + pgoff_t start = start_byte >> PAGE_SHIFT; pgoff_t end = end_byte >> PAGE_SHIFT; - struct pagevec pvec; - int nr_pages; + struct pagecache_iter iter; + struct page *page; int ret = 0; if (end_byte < start_byte) goto out; - pagevec_init(&pvec, 0); - while ((index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_WRITEBACK, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { - unsigned i; - - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - - /* until radix tree lookup accepts end_index */ - if (page->index > end) - continue; - - wait_on_page_writeback(page); - if (TestClearPageError(page)) - ret = -EIO; - } - pagevec_release(&pvec); - cond_resched(); + for_each_pagecache_tag(&iter, mapping, PAGECACHE_TAG_WRITEBACK, + start, end, page) { + wait_on_page_writeback(page); + if (TestClearPageError(page)) + ret = -EIO; } out: return ret; @@ -1423,6 +1408,51 @@ no_entry: } EXPORT_SYMBOL(__find_get_pages); +void __pagecache_iter_release(struct pagecache_iter *iter) +{ + lru_add_drain(); + release_pages(iter->pages, iter->nr, 0); + iter->nr = 0; + iter->idx = 0; +} +EXPORT_SYMBOL(__pagecache_iter_release); + +/** + * pagecache_iter_next - get next page from pagecache iterator and advance + * iterator + * @iter: The iterator to advance + * @mapping: The address_space to search + * @end: Page cache index to stop at (inclusive) + * @index: if non NULL, index of page or entry will be returned here + * @flags: radix tree iter flags and tag for __find_get_pages() + */ +struct page *pagecache_iter_next(struct pagecache_iter *iter, + struct address_space *mapping, + pgoff_t end, pgoff_t *index, + unsigned flags) +{ + struct page *page; + + if (iter->idx >= iter->nr) { + pagecache_iter_release(iter); + cond_resched(); + + iter->nr = __find_get_pages(mapping, iter->index, end, + PAGEVEC_SIZE, iter->pages, + iter->indices, flags); + if (!iter->nr) + return NULL; + } + + iter->index = iter->indices[iter->idx] + 1; + if (index) + *index = iter->indices[iter->idx]; + page = iter->pages[iter->idx]; + iter->idx++; + return page; +} +EXPORT_SYMBOL(pagecache_iter_next); + /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 76c944af8f18..e95a84e41840 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2188,10 +2188,10 @@ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) { + struct pagecache_iter iter; + struct page *page; int ret = 0; int done = 0; - struct pagevec pvec; - int nr_pages; pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ @@ -2200,7 +2200,6 @@ int write_cache_pages(struct address_space *mapping, int range_whole = 0; int tag; - pagevec_init(&pvec, 0); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; @@ -2223,105 +2222,80 @@ int write_cache_pages(struct address_space *mapping, retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); - done_index = index; - while (!done && (index <= end)) { - int i; - - nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); - if (nr_pages == 0) - break; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - - /* - * At this point, the page may be truncated or - * invalidated (changing page->mapping to NULL), or - * even swizzled back from swapper_space to tmpfs file - * mapping. However, page->index will not change - * because we have a reference on the page. - */ - if (page->index > end) { - /* - * can't be range_cyclic (1st pass) because - * end == -1 in that case. - */ - done = 1; - break; - } + done_index = index; - done_index = page->index; + for_each_pagecache_tag(&iter, mapping, tag, index, end, page) { + done_index = page->index; - lock_page(page); + lock_page(page); - /* - * Page truncated or invalidated. We can freely skip it - * then, even for data integrity operations: the page - * has disappeared concurrently, so there could be no - * real expectation of this data interity operation - * even if there is now a new, dirty page at the same - * pagecache address. - */ - if (unlikely(page->mapping != mapping)) { + /* + * Page truncated or invalidated. We can freely skip it + * then, even for data integrity operations: the page + * has disappeared concurrently, so there could be no + * real expectation of this data interity operation + * even if there is now a new, dirty page at the same + * pagecache address. + */ + if (unlikely(page->mapping != mapping)) { continue_unlock: - unlock_page(page); - continue; - } - - if (!PageDirty(page)) { - /* someone wrote it for us */ - goto continue_unlock; - } + unlock_page(page); + continue; + } - if (PageWriteback(page)) { - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - else - goto continue_unlock; - } + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } - BUG_ON(PageWriteback(page)); - if (!clear_page_dirty_for_io(page)) + if (PageWriteback(page)) { + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + else goto continue_unlock; + } - trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); - ret = (*writepage)(page, wbc, data); - if (unlikely(ret)) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - unlock_page(page); - ret = 0; - } else { - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - done_index = page->index + 1; - done = 1; - break; - } - } + BUG_ON(PageWriteback(page)); + if (!clear_page_dirty_for_io(page)) + goto continue_unlock; - /* - * We stop writing back only if we are not doing - * integrity sync. In case of integrity sync we have to - * keep going until we have written all the pages - * we tagged for writeback prior to entering this loop. - */ - if (--wbc->nr_to_write <= 0 && - wbc->sync_mode == WB_SYNC_NONE) { + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); + ret = (*writepage)(page, wbc, data); + if (unlikely(ret)) { + if (ret == AOP_WRITEPAGE_ACTIVATE) { + unlock_page(page); + ret = 0; + } else { + /* + * done_index is set past this page, + * so media errors will not choke + * background writeout for the entire + * file. This has consequences for + * range_cyclic semantics (ie. it may + * not be suitable for data integrity + * writeout). + */ + done_index = page->index + 1; done = 1; break; } } - pagevec_release(&pvec); - cond_resched(); + + /* + * We stop writing back only if we are not doing + * integrity sync. In case of integrity sync we have to + * keep going until we have written all the pages + * we tagged for writeback prior to entering this loop. + */ + if (--wbc->nr_to_write <= 0 && + wbc->sync_mode == WB_SYNC_NONE) { + done = 1; + break; + } } + pagecache_iter_release(&iter); + if (!cycled && !done) { /* * range_cyclic: diff --git a/mm/swap.c b/mm/swap.c index 75c63bb2a1da..9697f8ccb7bc 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -738,6 +738,9 @@ void release_pages(struct page **pages, int nr, bool cold) for (i = 0; i < nr; i++) { struct page *page = pages[i]; + if (radix_tree_exceptional_entry(page)) + continue; + /* * Make sure the IRQ-safe lock-holding time does not get * excessive with a continuous string of pages from the @@ -882,36 +885,6 @@ void __pagevec_lru_add(struct pagevec *pvec) EXPORT_SYMBOL(__pagevec_lru_add); /** - * pagevec_lookup_entries - gang pagecache lookup - * @pvec: Where the resulting entries are placed - * @mapping: The address_space to search - * @start: The starting entry index - * @nr_entries: The maximum number of entries - * @indices: The cache indices corresponding to the entries in @pvec - * - * pagevec_lookup_entries() will search for and return a group of up - * to @nr_entries pages and shadow entries in the mapping. All - * entries are placed in @pvec. pagevec_lookup_entries() takes a - * reference against actual pages in @pvec. - * - * The search returns a group of mapping-contiguous entries with - * ascending indexes. There may be holes in the indices due to - * not-present entries. - * - * pagevec_lookup_entries() returns the number of entries which were - * found. - */ -unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_pages, - pgoff_t *indices) -{ - pvec->nr = find_get_entries(mapping, start, nr_pages, - pvec->pages, indices); - return pagevec_count(pvec); -} - -/** * pagevec_remove_exceptionals - pagevec exceptionals pruning * @pvec: The pagevec to prune * diff --git a/mm/truncate.c b/mm/truncate.c index a01cce450a26..0a137550d071 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -233,10 +233,10 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t end; /* exclusive */ unsigned int partial_start; /* inclusive */ unsigned int partial_end; /* exclusive */ - struct pagevec pvec; - pgoff_t indices[PAGEVEC_SIZE]; - pgoff_t index; - int i; + struct pagecache_iter iter; + struct page *page; + pgoff_t index; + bool found; cleancache_invalidate_inode(mapping); if (mapping->nrpages == 0 && mapping->nrexceptional == 0) @@ -252,51 +252,36 @@ void truncate_inode_pages_range(struct address_space *mapping, * start of the range and 'partial_end' at the end of the range. * Note that 'end' is exclusive while 'lend' is inclusive. */ - start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = round_up(lstart, PAGE_SIZE) >> PAGE_SHIFT; if (lend == -1) /* - * lend == -1 indicates end-of-file so we have to set 'end' - * to the highest possible pgoff_t and since the type is - * unsigned we're using -1. + * lend == -1 indicates end-of-file so we have to set 'end' to + * the highest possible pgoff_t */ - end = -1; + end = ULONG_MAX; else end = (lend + 1) >> PAGE_SHIFT; - pagevec_init(&pvec, 0); - index = start; - while (index < end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), - indices)) { - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - - /* We rely upon deletion not changing page->index */ - index = indices[i]; - if (index >= end) - break; + if (start >= end) + goto do_partial; - if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); - continue; - } + for_each_pagecache_entry(&iter, mapping, start, end - 1, page, index) { + if (radix_tree_exceptional_entry(page)) { + clear_exceptional_entry(mapping, index, page); + continue; + } - if (!trylock_page(page)) - continue; - WARN_ON(page_to_pgoff(page) != index); - if (PageWriteback(page)) { - unlock_page(page); - continue; - } - truncate_inode_page(mapping, page); + if (!trylock_page(page)) + continue; + WARN_ON(page_to_pgoff(page) != index); + if (PageWriteback(page)) { unlock_page(page); + continue; } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - cond_resched(); - index++; + truncate_inode_page(mapping, page); + unlock_page(page); } - +do_partial: if (partial_start) { struct page *page = find_lock_page(mapping, start - 1); if (page) { @@ -336,34 +321,12 @@ void truncate_inode_pages_range(struct address_space *mapping, if (start >= end) return; - index = start; - for ( ; ; ) { - cond_resched(); - if (!pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { - /* If all gone from start onwards, we're done */ - if (index == start) - break; - /* Otherwise restart to make sure all gone */ - index = start; - continue; - } - if (index == start && indices[0] >= end) { - /* All gone out of hole to be punched, we're done */ - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - break; - } - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - - /* We rely upon deletion not changing page->index */ - index = indices[i]; - if (index >= end) { - /* Restart punch to make sure all gone */ - index = start - 1; - break; - } + do { + found = false; + + for_each_pagecache_entry(&iter, mapping, start, + end - 1, page, index) { + found = true; if (radix_tree_exceptional_entry(page)) { clear_exceptional_entry(mapping, index, page); @@ -376,10 +339,8 @@ void truncate_inode_pages_range(struct address_space *mapping, truncate_inode_page(mapping, page); unlock_page(page); } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - index++; - } + } while (found); + cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -465,62 +426,46 @@ EXPORT_SYMBOL(truncate_inode_pages_final); unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { - pgoff_t indices[PAGEVEC_SIZE]; - struct pagevec pvec; - pgoff_t index = start; + struct pagecache_iter iter; + struct page *page; + pgoff_t index; unsigned long ret; unsigned long count = 0; - int i; - - pagevec_init(&pvec, 0); - while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - indices)) { - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - - /* We rely upon deletion not changing page->index */ - index = indices[i]; - if (index > end) - break; - - if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); - continue; - } - if (!trylock_page(page)) - continue; + for_each_pagecache_entry(&iter, mapping, start, end, page, index) { + if (radix_tree_exceptional_entry(page)) { + clear_exceptional_entry(mapping, index, page); + continue; + } - WARN_ON(page_to_pgoff(page) != index); + if (!trylock_page(page)) + continue; - /* Middle of THP: skip */ - if (PageTransTail(page)) { - unlock_page(page); - continue; - } else if (PageTransHuge(page)) { - index += HPAGE_PMD_NR - 1; - i += HPAGE_PMD_NR - 1; - /* 'end' is in the middle of THP */ - if (index == round_down(end, HPAGE_PMD_NR)) - continue; - } + WARN_ON(page_to_pgoff(page) != index); - ret = invalidate_inode_page(page); + /* Middle of THP: skip */ + if (PageTransTail(page)) { unlock_page(page); - /* - * Invalidation is a hint that the page is no longer - * of interest and try to speed up its reclaim. - */ - if (!ret) - deactivate_file_page(page); - count += ret; + continue; + } else if (PageTransHuge(page)) { + index += HPAGE_PMD_NR - 1; + iter.idx += HPAGE_PMD_NR - 1; + /* 'end' is in the middle of THP */ + if (index == round_down(end, HPAGE_PMD_NR)) + continue; } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - cond_resched(); - index++; + + ret = invalidate_inode_page(page); + unlock_page(page); + /* + * Invalidation is a hint that the page is no longer + * of interest and try to speed up its reclaim. + */ + if (!ret) + deactivate_file_page(page); + count += ret; } + return count; } EXPORT_SYMBOL(invalidate_mapping_pages); @@ -584,75 +529,59 @@ static int do_launder_page(struct address_space *mapping, struct page *page) int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { - pgoff_t indices[PAGEVEC_SIZE]; - struct pagevec pvec; + struct pagecache_iter iter; + struct page *page; pgoff_t index; - int i; int ret = 0; int ret2 = 0; int did_range_unmap = 0; cleancache_invalidate_inode(mapping); - pagevec_init(&pvec, 0); - index = start; - while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - indices)) { - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - - /* We rely upon deletion not changing page->index */ - index = indices[i]; - if (index > end) - break; - if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); - continue; - } + for_each_pagecache_entry(&iter, mapping, start, end, page, index) { + if (radix_tree_exceptional_entry(page)) { + clear_exceptional_entry(mapping, index, page); + continue; + } - lock_page(page); - WARN_ON(page_to_pgoff(page) != index); - if (page->mapping != mapping) { - unlock_page(page); - continue; - } - wait_on_page_writeback(page); - if (page_mapped(page)) { - if (!did_range_unmap) { - /* - * Zap the rest of the file in one hit. - */ - unmap_mapping_range(mapping, - (loff_t)index << PAGE_SHIFT, - (loff_t)(1 + end - index) - << PAGE_SHIFT, - 0); - did_range_unmap = 1; - } else { - /* - * Just zap this page - */ - unmap_mapping_range(mapping, - (loff_t)index << PAGE_SHIFT, - PAGE_SIZE, 0); - } - } - BUG_ON(page_mapped(page)); - ret2 = do_launder_page(mapping, page); - if (ret2 == 0) { - if (!invalidate_complete_page2(mapping, page)) - ret2 = -EBUSY; - } - if (ret2 < 0) - ret = ret2; + lock_page(page); + WARN_ON(page_to_pgoff(page) != index); + if (page->mapping != mapping) { unlock_page(page); + continue; } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - cond_resched(); - index++; + wait_on_page_writeback(page); + if (page_mapped(page)) { + if (!did_range_unmap) { + /* + * Zap the rest of the file in one hit. + */ + unmap_mapping_range(mapping, + (loff_t)index << PAGE_SHIFT, + (loff_t)(1 + end - index) + << PAGE_SHIFT, + 0); + did_range_unmap = 1; + } else { + /* + * Just zap this page + */ + unmap_mapping_range(mapping, + (loff_t)index << PAGE_SHIFT, + PAGE_SIZE, 0); + } + } + BUG_ON(page_mapped(page)); + ret2 = do_launder_page(mapping, page); + if (ret2 == 0) { + if (!invalidate_complete_page2(mapping, page)) + ret2 = -EBUSY; + } + if (ret2 < 0) + ret = ret2; + unlock_page(page); } + cleancache_invalidate_inode(mapping); return ret; } |