summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-08-28 18:36:42 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-05-06 17:14:15 -0400
commit458f2a536b8ae29256746aa382cd13aa634262c2 (patch)
treebc942df5dd0e44f5a47aa702b03951866f5dacd7
parent01d2b5f0a13e7217ed6a6b7a405931da7e62fef7 (diff)
fs: kill add_to_page_cache_locked()
No longer has any users, so remove it
-rw-r--r--include/linux/pagemap.h20
-rw-r--r--mm/filemap.c66
2 files changed, 34 insertions, 52 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b1bd2186e6d2..b61fab5a1317 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -613,8 +613,8 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
return 0;
}
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+int add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
@@ -623,22 +623,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec);
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
- */
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
-{
- int error;
-
- __SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- __ClearPageLocked(page);
- return error;
-}
-
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
diff --git a/mm/filemap.c b/mm/filemap.c
index df727c1ecaa0..4b091be17905 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -913,10 +913,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
-static int __add_to_page_cache_locked(struct page *page,
- struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask,
- void **shadowp)
+static int __add_to_page_cache(struct page *page,
+ struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask,
+ void **shadowp)
{
int huge = PageHuge(page);
struct mem_cgroup *memcg;
@@ -939,6 +939,7 @@ static int __add_to_page_cache_locked(struct page *page,
if (error)
goto err_uncharge;
+ __SetPageLocked(page);
get_page(page);
page->mapping = mapping;
page->index = offset;
@@ -966,6 +967,7 @@ err_insert:
/* Leave page->index set: truncation relies upon it */
xa_unlock_irq(&mapping->i_pages);
put_page(page);
+ __ClearPageLocked(page);
err_uncharge:
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
@@ -973,22 +975,22 @@ err_uncharge:
}
/**
- * add_to_page_cache_locked - add a locked page to the pagecache
+ * add_to_page_cache - add a newly allocated page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
*
- * This function is used to add a page to the pagecache. It must be locked.
- * This function does not add the page to the LRU. The caller must do that.
+ * This function is used to add a page to the pagecache. It must be newly
+ * allocated. This function does not add the page to the LRU. The caller must
+ * do that.
*/
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
+int add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask)
{
- return __add_to_page_cache_locked(page, mapping, offset,
- gfp_mask, NULL);
+ return __add_to_page_cache(page, mapping, offset, gfp_mask, NULL);
}
-EXPORT_SYMBOL(add_to_page_cache_locked);
+EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
@@ -996,29 +998,25 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
void *shadow = NULL;
int ret;
- __SetPageLocked(page);
- ret = __add_to_page_cache_locked(page, mapping, offset,
- gfp_mask, &shadow);
+ ret = __add_to_page_cache(page, mapping, offset, gfp_mask, &shadow);
if (unlikely(ret))
- __ClearPageLocked(page);
- else {
- /*
- * The page might have been evicted from cache only
- * recently, in which case it should be activated like
- * any other repeatedly accessed page.
- * The exception is pages getting rewritten; evicting other
- * data from the working set, only to cache data that will
- * get overwritten with something else, is a waste of memory.
- */
- if (!(gfp_mask & __GFP_WRITE) &&
- shadow && workingset_refault(shadow)) {
- SetPageActive(page);
- workingset_activation(page);
- } else
- ClearPageActive(page);
- lru_cache_add(page);
- }
- return ret;
+ return ret;
+
+ /*
+ * The page might have been evicted from cache only recently, in which
+ * case it should be activated like any other repeatedly accessed page.
+ * The exception is pages getting rewritten; evicting other data from
+ * the working set, only to cache data that will get overwritten with
+ * something else, is a waste of memory.
+ */
+ if (!(gfp_mask & __GFP_WRITE) &&
+ shadow && workingset_refault(shadow)) {
+ SetPageActive(page);
+ workingset_activation(page);
+ } else
+ ClearPageActive(page);
+ lru_cache_add(page);
+ return 0;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);