diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-08-22 20:03:26 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-05-06 17:14:15 -0400 |
commit | 5eb2111fca77777e9e211adfa59d48515520b295 (patch) | |
tree | 59079fc8c74fa7b51f463c41daf12ed583015034 | |
parent | d2e0f799bec259aa7feec69659ba702ef1c1a014 (diff) |
add_to_page_cache_vec()
-rw-r--r-- | mm/filemap.c | 129 |
1 files changed, 84 insertions, 45 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 0b05f376c71e..b82fd3be69ca 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -946,65 +946,99 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(replace_page_cache_page); -static int __add_to_page_cache(struct page *page, - struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask, - void **shadowp) +static int add_to_page_cache_vec(struct page **pages, + unsigned nr_pages, + struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask, + void *shadow[]) { - int huge = PageHuge(page); struct mem_cgroup *memcg; - int error; + int i, nr_charged = 0, nr_added = 0, error; - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageSwapBacked(page), page); + for (i = 0; i < nr_pages; i++) { + struct page *page = pages[i]; - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_get(&mapping->add_lock); + VM_BUG_ON_PAGE(PageSwapBacked(page), page); + } + + for (; nr_charged < nr_pages; nr_charged++) { + struct page *page = pages[nr_charged]; + + if (PageHuge(page)) + continue; - if (!huge) { error = mem_cgroup_try_charge(page, current->mm, gfp_mask, &memcg, false); if (error) - goto err; + break; + } + + for (i = 0; i < nr_charged; i++) { + struct page *page = pages[i]; + + __SetPageLocked(page); + get_page(page); + page->mapping = mapping; + page->index = offset + i; } - error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); - if (error) - goto err_uncharge; + if (current->pagecache_lock != &mapping->add_lock) + pagecache_add_get(&mapping->add_lock); - __SetPageLocked(page); - get_page(page); - page->mapping = mapping; - page->index = offset; + while (nr_added < nr_charged) { + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); + if (error) + break; - xa_lock_irq(&mapping->i_pages); - error = page_cache_tree_insert(mapping, page, shadowp); - radix_tree_preload_end(); - if (unlikely(error)) - goto err_insert; + xa_lock_irq(&mapping->i_pages); + error = page_cache_tree_insert_vec(mapping, + pages + nr_added, + nr_charged - nr_added, + shadow + nr_added, + offset + nr_added); + if (error < 0) { + xa_unlock_irq(&mapping->i_pages); + radix_tree_preload_end(); + break; + } + + while (error--) { + struct page *page = pages[nr_added++]; + + /* hugetlb pages do not participate in page cache accounting. */ + if (!PageHuge(page)) + __inc_node_page_state(page, NR_FILE_PAGES); + } + + xa_unlock_irq(&mapping->i_pages); + radix_tree_preload_end(); + } - /* hugetlb pages do not participate in page cache accounting. */ - if (!huge) - __inc_node_page_state(page, NR_FILE_PAGES); - xa_unlock_irq(&mapping->i_pages); - if (!huge) - mem_cgroup_commit_charge(page, memcg, false, false); - trace_mm_filemap_add_to_page_cache(page); -err: if (current->pagecache_lock != &mapping->add_lock) pagecache_add_put(&mapping->add_lock); - return error; -err_insert: - page->mapping = NULL; - /* Leave page->index set: truncation relies upon it */ - xa_unlock_irq(&mapping->i_pages); - put_page(page); - __ClearPageLocked(page); -err_uncharge: - if (!huge) - mem_cgroup_cancel_charge(page, memcg, false); - goto err; + for (i = 0; i < nr_added; i++) { + struct page *page = pages[i]; + + if (!PageHuge(page)) + mem_cgroup_commit_charge(page, memcg, false, false); + + trace_mm_filemap_add_to_page_cache(page); + } + + for (i = nr_added; i < nr_charged; i++) { + struct page *page = pages[i]; + + if (!PageHuge(page)) + mem_cgroup_cancel_charge(page, memcg, false); + + /* Leave page->index set: truncation relies upon it */ + page->mapping = NULL; + put_page(page); + __ClearPageLocked(page); + } + + return nr_added ?: error; } /** @@ -1021,7 +1055,11 @@ err_uncharge: int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { - return __add_to_page_cache(page, mapping, offset, gfp_mask, NULL); + int ret = add_to_page_cache_vec(&page, 1, mapping, offset, + gfp_mask, NULL); + if (ret < 0) + return ret; + return 0; } EXPORT_SYMBOL(add_to_page_cache); @@ -1031,7 +1069,8 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, void *shadow = NULL; int ret; - ret = __add_to_page_cache(page, mapping, offset, gfp_mask, &shadow); + ret = add_to_page_cache_vec(&page, 1, mapping, offset, + gfp_mask, &shadow); if (unlikely(ret)) return ret; |