diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-11-29 13:15:57 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-05-06 17:14:17 -0400 |
commit | a94d2540036e2a9b6da593e497efaa64b08c3f54 (patch) | |
tree | f22cdc337c50b43f737edc7ad745739e9c354f6d | |
parent | 30b096e91ef276e27e5a9bdb74d87bb9f3952a97 (diff) |
Revert "mm: pagecache add lock"
This reverts commit 8675f74ac343b24c14b3991dc6f999c138723eec.
-rw-r--r-- | fs/inode.c | 1 | ||||
-rw-r--r-- | include/linux/fs.h | 24 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | init/init_task.c | 1 | ||||
-rw-r--r-- | mm/filemap.c | 82 |
5 files changed, 1 insertions, 111 deletions
diff --git a/fs/inode.c b/fs/inode.c index 6dec6f6336ce..36792cd800e5 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -351,7 +351,6 @@ EXPORT_SYMBOL(inc_nlink); static void __address_space_init_once(struct address_space *mapping) { INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT); - pagecache_lock_init(&mapping->add_lock); init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index 207ccf1b9bfe..73dc9d477447 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -412,28 +412,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -/* - * Two-state lock - can be taken for add or block - both states are shared, - * like read side of rwsem, but conflict with other state: - */ -struct pagecache_lock { - atomic_long_t v; - wait_queue_head_t wait; -}; - -static inline void pagecache_lock_init(struct pagecache_lock *lock) -{ - atomic_long_set(&lock->v, 0); - init_waitqueue_head(&lock->wait); -} - -void pagecache_add_put(struct pagecache_lock *); -void pagecache_add_get(struct pagecache_lock *); -void __pagecache_block_put(struct pagecache_lock *); -void __pagecache_block_get(struct pagecache_lock *); -void pagecache_block_put(struct pagecache_lock *); -void pagecache_block_get(struct pagecache_lock *); - struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root i_pages; /* cached pages */ @@ -452,8 +430,6 @@ struct address_space { struct list_head private_list; /* for use by the address_space */ void *private_data; /* ditto */ errseq_t wb_err; - struct pagecache_lock add_lock - ____cacheline_aligned_in_smp; /* protects adding new pages */ } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but diff --git a/include/linux/sched.h b/include/linux/sched.h index 80629ad3d05a..c69f308f3a53 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -41,7 +41,6 @@ struct io_context; struct mempolicy; struct nameidata; struct nsproxy; -struct pagecache_lock; struct perf_event_context; struct pid_namespace; struct pipe_inode_info; @@ -941,9 +940,6 @@ struct task_struct { unsigned int in_ubsan; #endif - /* currently held lock, for avoiding recursing in fault path: */ - struct pagecache_lock *pagecache_lock; - /* Journalling filesystem info: */ void *journal_info; diff --git a/init/init_task.c b/init/init_task.c index f4be590da394..5aebe3be4d7c 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -114,7 +114,6 @@ struct task_struct init_task }, .blocked = {{0}}, .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock), - .pagecache_lock = NULL, .journal_info = NULL, INIT_CPU_TIMERS(init_task) .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), diff --git a/mm/filemap.c b/mm/filemap.c index 37e16bd7b0fd..7e0dee304b54 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -111,73 +111,6 @@ * ->tasklist_lock (memory_failure, collect_procs_ao) */ -static void __pagecache_lock_put(struct pagecache_lock *lock, long i) -{ - BUG_ON(atomic_long_read(&lock->v) == 0); - - if (atomic_long_sub_return_release(i, &lock->v) == 0) - wake_up_all(&lock->wait); -} - -static bool __pagecache_lock_tryget(struct pagecache_lock *lock, long i) -{ - long v = atomic_long_read(&lock->v), old; - - do { - old = v; - - if (i > 0 ? v < 0 : v > 0) - return false; - } while ((v = atomic_long_cmpxchg_acquire(&lock->v, - old, old + i)) != old); - return true; -} - -static void __pagecache_lock_get(struct pagecache_lock *lock, long i) -{ - wait_event(lock->wait, __pagecache_lock_tryget(lock, i)); -} - -void pagecache_add_put(struct pagecache_lock *lock) -{ - __pagecache_lock_put(lock, 1); -} -EXPORT_SYMBOL(pagecache_add_put); - -void pagecache_add_get(struct pagecache_lock *lock) -{ - __pagecache_lock_get(lock, 1); -} -EXPORT_SYMBOL(pagecache_add_get); - -void __pagecache_block_put(struct pagecache_lock *lock) -{ - __pagecache_lock_put(lock, -1); -} -EXPORT_SYMBOL(__pagecache_block_put); - -void __pagecache_block_get(struct pagecache_lock *lock) -{ - __pagecache_lock_get(lock, -1); -} -EXPORT_SYMBOL(__pagecache_block_get); - -void pagecache_block_put(struct pagecache_lock *lock) -{ - BUG_ON(current->pagecache_lock != lock); - current->pagecache_lock = NULL; - __pagecache_lock_put(lock, -1); -} -EXPORT_SYMBOL(pagecache_block_put); - -void pagecache_block_get(struct pagecache_lock *lock) -{ - __pagecache_lock_get(lock, -1); - BUG_ON(current->pagecache_lock); - current->pagecache_lock = lock; -} -EXPORT_SYMBOL(pagecache_block_get); - static int page_cache_tree_insert_vec(struct address_space *mapping, struct page *pages[], unsigned nr_pages, @@ -982,9 +915,6 @@ static int add_to_page_cache_vec(struct page **pages, page->index = offset + i; } - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_get(&mapping->add_lock); - while (nr_added < nr_charged) { error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); if (error) @@ -1014,9 +944,6 @@ static int add_to_page_cache_vec(struct page **pages, radix_tree_preload_end(); } - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_put(&mapping->add_lock); - for (i = 0; i < nr_added; i++) { struct page *page = pages[i]; @@ -2772,14 +2699,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) * Do we have something in the page cache already? */ page = find_get_page(mapping, offset); - if (unlikely(current->pagecache_lock == &mapping->add_lock)) { - /* - * fault from e.g. dio -> get_user_pages() - _don't_ want to do - * readahead, only read in page we need: - */ - if (!page) - goto no_cached_page; - } else if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { + if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { /* * We found the page, so try async readahead before * waiting for the lock. |