summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiam R. Howlett <Liam.Howlett@Oracle.com>2020-07-24 15:06:25 -0400
committerLiam R. Howlett <Liam.Howlett@Oracle.com>2020-07-25 21:07:09 -0400
commitd1c33367447884830b4087a429a814c2d09d88dd (patch)
tree366d2ead2c052acf17656fd416fd0d5c559b787a
parent7fd58758bd618605182f7b22ff9942a947539a1c (diff)
mm/mmap and friends: Remove rb tree.howlett/maple/20200727
Remove the RB tree and start using the maple tree for vm_area_struct tracking. Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--kernel/fork.c8
-rw-r--r--mm/init-mm.c2
-rw-r--r--mm/mmap.c492
-rw-r--r--mm/util.c8
6 files changed, 105 insertions, 407 deletions
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5114cae4ec97..8eaf97febbc8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -51,7 +51,6 @@ static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
MMAP_LOCK_INITIALIZER(efi_mm)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 90021d0c0530..104d45ba921d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -386,7 +386,6 @@ struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct maple_tree mm_mt;
- struct rb_root mm_rb;
u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
diff --git a/kernel/fork.c b/kernel/fork.c
index 04b480e532b2..ee4d44519441 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -486,7 +486,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
- struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
@@ -512,8 +511,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
mm->exec_vm = oldmm->exec_vm;
mm->stack_vm = oldmm->stack_vm;
- rb_link = &mm->mm_rb.rb_node;
- rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
@@ -609,10 +606,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
tmp->vm_prev = prev;
prev = tmp;
- __vma_link_rb(mm, tmp, rb_link, rb_parent);
- rb_link = &tmp->vm_rb.rb_right;
- rb_parent = &tmp->vm_rb;
-
/* Link the vma into the MT */
vma_store(mm, tmp);
@@ -1034,7 +1027,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
mm->mmap = NULL;
- mm->mm_rb = RB_ROOT;
mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 27229044a070..49d803fc4cde 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm_types.h>
-#include <linux/rbtree.h>
#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
@@ -28,7 +27,6 @@
* and size this cpu_bitmask to NR_CPUS.
*/
struct mm_struct init_mm = {
- .mm_rb = RB_ROOT,
.mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
diff --git a/mm/mmap.c b/mm/mmap.c
index c2d1fac3ba53..50cbb1150e0b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -38,7 +38,6 @@
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
-#include <linux/rbtree_augmented.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
@@ -289,93 +288,6 @@ out:
return retval;
}
-static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
-{
- unsigned long gap, prev_end;
-
- /*
- * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
- * allow two stack_guard_gaps between them here, and when choosing
- * an unmapped area; whereas when expanding we only require one.
- * That's a little inconsistent, but keeps the code here simpler.
- */
- gap = vm_start_gap(vma);
- if (vma->vm_prev) {
- prev_end = vm_end_gap(vma->vm_prev);
- if (gap > prev_end)
- gap -= prev_end;
- else
- gap = 0;
- }
- return gap;
-}
-
-#ifdef CONFIG_DEBUG_VM_RB
-static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
-{
- unsigned long max = vma_compute_gap(vma), subtree_gap;
- if (vma->vm_rb.rb_left) {
- subtree_gap = rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- if (vma->vm_rb.rb_right) {
- subtree_gap = rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- return max;
-}
-
-static int browse_rb(struct mm_struct *mm)
-{
- struct rb_root *root = &mm->mm_rb;
- int i = 0, j, bug = 0;
- struct rb_node *nd, *pn = NULL;
- unsigned long prev = 0, pend = 0;
-
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
- struct vm_area_struct *vma;
- vma = rb_entry(nd, struct vm_area_struct, vm_rb);
- if (vma->vm_start < prev) {
- pr_emerg("vm_start %lx < prev %lx\n",
- vma->vm_start, prev);
- bug = 1;
- }
- if (vma->vm_start < pend) {
- pr_emerg("vm_start %lx < pend %lx\n",
- vma->vm_start, pend);
- bug = 1;
- }
- if (vma->vm_start > vma->vm_end) {
- pr_emerg("vm_start %lx > vm_end %lx\n",
- vma->vm_start, vma->vm_end);
- bug = 1;
- }
- spin_lock(&mm->page_table_lock);
- if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
- pr_emerg("free gap %lx, correct %lx\n",
- vma->rb_subtree_gap,
- vma_compute_subtree_gap(vma));
- bug = 1;
- }
- spin_unlock(&mm->page_table_lock);
- i++;
- pn = nd;
- prev = vma->vm_start;
- pend = vma->vm_end;
- }
- j = 0;
- for (nd = pn; nd; nd = rb_prev(nd))
- j++;
- if (i != j) {
- pr_emerg("backwards %d, forwards %d\n", j, i);
- bug = 1;
- }
- return bug ? -1 : i;
-}
#if defined(CONFIG_DEBUG_MAPLE_TREE)
extern void mt_dump(const struct maple_tree *mt);
@@ -403,17 +315,25 @@ static void validate_mm_mt(struct mm_struct *mm)
dump_stack();
#ifdef CONFIG_DEBUG_VM
dump_vma(vma_mt);
- pr_emerg("and next in rb\n");
+ pr_emerg("and vm_next\n");
dump_vma(vma->vm_next);
-#endif
+#endif // CONFIG_DEBUG_VM
pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
mas.index, mas.last);
pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
vma_mt->vm_start, vma_mt->vm_end);
- pr_emerg("rb vma: %px %lu - %lu\n", vma,
+ if (vma->vm_prev) {
+ pr_emerg("ll prev: %px %lu - %lu\n",
+ vma->vm_prev, vma->vm_prev->vm_start,
+ vma->vm_prev->vm_end);
+ }
+ pr_emerg("ll vma: %px %lu - %lu\n", vma,
vma->vm_start, vma->vm_end);
- pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next,
- vma->vm_next->vm_start, vma->vm_next->vm_end);
+ if (vma->vm_next) {
+ pr_emerg("ll next: %px %lu - %lu\n",
+ vma->vm_next, vma->vm_next->vm_start,
+ vma->vm_next->vm_end);
+ }
mt_dump(mas.tree);
if (vma_mt->vm_end != mas.last + 1) {
@@ -439,20 +359,6 @@ static void validate_mm_mt(struct mm_struct *mm)
rcu_read_unlock();
mt_validate(&mm->mm_mt);
}
-#endif
-static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
-{
- struct rb_node *nd;
-
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
- struct vm_area_struct *vma;
- vma = rb_entry(nd, struct vm_area_struct, vm_rb);
- VM_BUG_ON_VMA(vma != ignore &&
- vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
- vma);
- }
-}
-
static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
@@ -460,6 +366,8 @@ static void validate_mm(struct mm_struct *mm)
unsigned long highest_address = 0;
struct vm_area_struct *vma = mm->mmap;
+ validate_mm_mt(mm);
+
while (vma) {
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
@@ -484,82 +392,12 @@ static void validate_mm(struct mm_struct *mm)
mm->highest_vm_end, highest_address);
bug = 1;
}
- i = browse_rb(mm);
- if (i != mm->map_count) {
- if (i != -1)
- pr_emerg("map_count %d rb %d\n", mm->map_count, i);
- bug = 1;
- }
VM_BUG_ON_MM(bug, mm);
}
-#else
-#define validate_mm_rb(root, ignore) do { } while (0)
+#else // !CONFIG_DEBUG_MAPLE_TREE
#define validate_mm_mt(root) do { } while (0)
#define validate_mm(mm) do { } while (0)
-#endif
-
-RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
- struct vm_area_struct, vm_rb,
- unsigned long, rb_subtree_gap, vma_compute_gap)
-
-/*
- * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
- * vma->vm_prev->vm_end values changed, without modifying the vma's position
- * in the rbtree.
- */
-static void vma_gap_update(struct vm_area_struct *vma)
-{
- /*
- * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
- * a callback function that does exactly what we want.
- */
- vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
-}
-
-static inline void vma_rb_insert(struct vm_area_struct *vma,
- struct rb_root *root)
-{
- /* All rb_subtree_gap values must be consistent prior to insertion */
- validate_mm_rb(root, NULL);
-
- rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-
-static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
-{
- /*
- * Note rb_erase_augmented is a fairly large inline function,
- * so make sure we instantiate it only once with our desired
- * augmented rbtree callbacks.
- */
- rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-
-static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
- struct rb_root *root,
- struct vm_area_struct *ignore)
-{
- /*
- * All rb_subtree_gap values must be consistent prior to erase,
- * with the possible exception of the "next" vma being erased if
- * next->vm_start was reduced.
- */
- validate_mm_rb(root, ignore);
-
- __vma_rb_erase(vma, root);
-}
-
-static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
- struct rb_root *root)
-{
- /*
- * All rb_subtree_gap values must be consistent prior to erase,
- * with the possible exception of the vma being erased.
- */
- validate_mm_rb(root, vma);
-
- __vma_rb_erase(vma, root);
-}
+#endif // CONFIG_DEBUG_MAPLE_TREE
/*
* vma has some anon_vma assigned, and is already inserted on that
@@ -593,38 +431,29 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
}
-static int find_vma_links(struct mm_struct *mm, unsigned long addr,
- unsigned long end, struct vm_area_struct **pprev,
- struct rb_node ***rb_link, struct rb_node **rb_parent)
+/* Private
+ * range_has_overlap() - Check the @start - @end range for overlapping VMAs and
+ * sets up a pointer to the previous VMA
+ *
+ * @mm - the mm struct
+ * @start - the start address of the range
+ * @end - the end address of the range
+ * @pprev - the pointer to the pointer of the previous VMA
+ *
+ * Returns: True if there is an overlapping VMA, false otherwise
+ */
+static bool range_has_overlap(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct vm_area_struct **pprev)
{
- struct rb_node **__rb_link, *__rb_parent, *rb_prev;
-
- __rb_link = &mm->mm_rb.rb_node;
- rb_prev = __rb_parent = NULL;
-
- while (*__rb_link) {
- struct vm_area_struct *vma_tmp;
+ struct vm_area_struct *existing;
- __rb_parent = *__rb_link;
- vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
-
- if (vma_tmp->vm_end > addr) {
- /* Fail if an existing vma overlaps the area */
- if (vma_tmp->vm_start < end)
- return -ENOMEM;
- __rb_link = &__rb_parent->rb_left;
- } else {
- rb_prev = __rb_parent;
- __rb_link = &__rb_parent->rb_right;
- }
- }
+ MA_STATE(mas, &mm->mm_mt, start, start);
+ existing = mas_find(&mas, end - 1);
+ if (mas.node == MAS_NONE)
+ mas_reset(&mas);
- *pprev = NULL;
- if (rb_prev)
- *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
- *rb_link = __rb_link;
- *rb_parent = __rb_parent;
- return 0;
+ *pprev = mas_prev(&mas, 0);
+ return existing ? true : false;
}
/* Private
@@ -655,83 +484,43 @@ static inline struct vm_area_struct *next_vma_or_first(struct mm_struct *mm,
* @start: The start address.
* @len: The length of the range.
* @pprev: pointer to the pointer that will be set to previous vm_area_struct
- * @rb_link - the rb_node
- * @rb_parent - the parent rb_node
*
* Returns: -ENOMEM on munmap failure or 0 on success.
*/
static inline int
munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
- struct vm_area_struct **pprev, struct rb_node ***rb_link,
- struct rb_node **rb_parent, struct list_head *uf)
+ struct vm_area_struct **pprev, struct list_head *uf)
{
-
- while (find_vma_links(mm, start, start + len, pprev, rb_link, rb_parent))
+ // Needs optimization.
+ while (range_has_overlap(mm, start, start + len, pprev))
if (do_munmap(mm, start, len, uf))
return -ENOMEM;
-
return 0;
}
static unsigned long count_vma_pages_range(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
unsigned long nr_pages = 0;
- unsigned long nr_mt_pages = 0;
struct vm_area_struct *vma;
+ unsigned long vm_start, vm_end;
/* Find first overlaping mapping */
vma = find_vma_intersection(mm, addr, end);
if (!vma)
return 0;
- nr_pages = (min(end, vma->vm_end) -
- max(addr, vma->vm_start)) >> PAGE_SHIFT;
-
- /* Iterate over the rest of the overlaps */
- for (vma = vma->vm_next; vma; vma = vma->vm_next) {
- unsigned long overlap_len;
-
- if (vma->vm_start > end)
- break;
-
- overlap_len = min(end, vma->vm_end) - vma->vm_start;
- nr_pages += overlap_len >> PAGE_SHIFT;
- }
+ vm_start = vma->vm_start;
+ vm_end = vma->vm_end;
- mt_for_each(&mm->mm_mt, vma, addr, end) {
- nr_mt_pages +=
- (min(end, vma->vm_end) - vma->vm_start) >> PAGE_SHIFT;
- }
+ nr_pages = (min(end, vm_end) - max(addr, vm_start)) >> PAGE_SHIFT;
- VM_BUG_ON_MM(nr_pages != nr_mt_pages, mm);
+ /* Iterate over the rest of the overlaps */
+ mt_for_each(&mm->mm_mt, vma, addr, end)
+ nr_pages += (min(end, vm_end) - vm_start) >> PAGE_SHIFT;
return nr_pages;
}
-void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
- struct rb_node **rb_link, struct rb_node *rb_parent)
-{
- /* Update tracking information for the gap following the new vma. */
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
- mm->highest_vm_end = vm_end_gap(vma);
-
- /*
- * vma->vm_prev wasn't known when we followed the rbtree to find the
- * correct insertion point for that vma. As a result, we could not
- * update the vma vm_rb parents rb_subtree_gap values on the way down.
- * So, we first insert the vma with a zero rb_subtree_gap value
- * (to be consistent with what we did on the way down), and then
- * immediately update the gap to the correct value. Finally we
- * rebalance the rbtree after all augmented values have been set.
- */
- rb_link_node(&vma->vm_rb, rb_parent, rb_link);
- vma->rb_subtree_gap = 0;
- vma_gap_update(vma);
- vma_rb_insert(vma, &mm->mm_rb);
-}
-
static void __vma_link_file(struct vm_area_struct *vma)
{
struct file *file;
@@ -799,17 +588,14 @@ void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
static void
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
+ struct vm_area_struct *prev)
{
vma_mt_store(mm, vma);
__vma_link_list(mm, vma, prev);
- __vma_link_rb(mm, vma, rb_link, rb_parent);
}
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
+ struct vm_area_struct *prev)
{
struct address_space *mapping = NULL;
@@ -818,7 +604,7 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
i_mmap_lock_write(mapping);
}
- __vma_link(mm, vma, prev, rb_link, rb_parent);
+ __vma_link(mm, vma, prev);
__vma_link_file(vma);
if (mapping)
@@ -830,30 +616,18 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
- * mm's list and rbtree. It has already been inserted into the interval tree.
+ * mm's list and the mm tree. It has already been inserted into the interval tree.
*/
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
- struct rb_node **rb_link, *rb_parent;
- if (find_vma_links(mm, vma->vm_start, vma->vm_end,
- &prev, &rb_link, &rb_parent))
+ if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev))
BUG();
- __vma_link(mm, vma, prev, rb_link, rb_parent);
+ __vma_link(mm, vma, prev);
mm->map_count++;
}
-static __always_inline void __vma_unlink_common(struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct vm_area_struct *ignore)
-{
- vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
- __vma_unlink_list(mm, vma);
- /* Kill the cache */
- vmacache_invalidate(mm);
-}
-
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
@@ -875,9 +649,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
long adjust_next = 0;
int remove_next = 0;
- validate_mm(mm);
- validate_mm_mt(mm);
-
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
@@ -1038,25 +809,9 @@ again:
}
if (remove_next) {
- /*
- * vma_merge has merged next into vma, and needs
- * us to remove next before dropping the locks.
- * Since we have expanded over this vma, the maple tree will
- * have overwritten by storing the value
- */
- if (remove_next != 3)
- __vma_unlink_common(mm, next, next);
- else
- /*
- * vma is not before next if they've been
- * swapped.
- *
- * pre-swap() next->vm_start was reduced so
- * tell validate_mm_rb to ignore pre-swap()
- * "next" (which is stored in post-swap()
- * "vma").
- */
- __vma_unlink_common(mm, next, vma);
+ __vma_unlink_list(mm, next);
+ /* Kill the cache */
+ vmacache_invalidate(mm);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
@@ -1066,15 +821,8 @@ again:
* (it may either follow vma or precede it).
*/
__insert_vm_struct(mm, insert);
- } else {
- if (start_changed)
- vma_gap_update(vma);
- if (end_changed) {
- if (!next)
- mm->highest_vm_end = vm_end_gap(vma);
- else if (!adjust_next)
- vma_gap_update(next);
- }
+ } else if (end_changed && !next) {
+ mm->highest_vm_end = vm_end_gap(vma);
}
if (anon_vma) {
@@ -1133,10 +881,7 @@ again:
remove_next = 1;
end = next->vm_end;
goto again;
- }
- else if (next)
- vma_gap_update(next);
- else {
+ } else if (!next) {
/*
* If remove_next == 2 we obviously can't
* reach this path.
@@ -1163,8 +908,6 @@ again:
uprobe_mmap(insert);
validate_mm(mm);
- validate_mm_mt(mm);
-
return 0;
}
@@ -1311,7 +1054,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *area, *next;
int err;
- validate_mm_mt(mm);
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
@@ -1387,7 +1129,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
khugepaged_enter_vma_merge(area, vm_flags);
return area;
}
- validate_mm_mt(mm);
return NULL;
}
@@ -1558,6 +1299,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
int pkey = 0;
+ validate_mm(mm);
*populate = 0;
if (!len)
@@ -1877,10 +1619,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
int error;
- struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
- validate_mm_mt(mm);
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
@@ -1896,8 +1636,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return -ENOMEM;
}
- /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
- if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+ /* Clear old maps, set up prev and uf */
+ if (munmap_vma_range(mm, addr, len, &prev, uf))
return -ENOMEM;
/*
* Private writable mapping: check memory availability
@@ -1960,8 +1700,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
- * Bug: If addr is changed, prev, rb_link, rb_parent should
- * be updated for vma_link()
+ * Bug: If addr is changed, prev and the maple tree data should
+ * be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
@@ -1975,7 +1715,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma_set_anonymous(vma);
}
- vma_link(mm, vma, prev, rb_link, rb_parent);
+ vma_link(mm, vma, prev);
/* Once vma denies write, undo our temporary denial count */
if (file) {
if (vm_flags & VM_SHARED)
@@ -2011,7 +1751,6 @@ out:
vma_set_page_prot(vma);
- validate_mm_mt(mm);
return addr;
unmap_and_free_vma:
@@ -2031,7 +1770,6 @@ free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
- validate_mm_mt(mm);
return error;
}
@@ -2049,7 +1787,6 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
unsigned long length, gap;
MA_STATE(mas, &current->mm->mm_mt, 0, 0);
- validate_mm(current->mm);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
@@ -2082,7 +1819,6 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
unsigned long length, gap;
MA_STATE(mas, &current->mm->mm_mt, 0, 0);
- validate_mm_mt(current->mm);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
@@ -2323,7 +2059,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
rcu_read_lock();
vma = mas_find(&mas, ULONG_MAX);
- if (!vma)
+ if (mas.node == MAS_NONE)
mas_reset(&mas);
*pprev = mas_prev(&mas, 0);
@@ -2389,7 +2125,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
unsigned long gap_addr;
int error = 0;
- validate_mm_mt(mm);
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
@@ -2436,15 +2171,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_lock
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * anon_vma_lock_write() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
+ * We only hold a shared mmap_lock lock here, so
+ * we need to protect against concurrent vma
+ * expansions. anon_vma_lock_write() doesn't
+ * help here, as we don't guarantee that all
+ * growable vmas in a mm share the same root
+ * anon vma. So, we reuse mm->page_table_lock
+ * to guard against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
@@ -2452,10 +2185,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
+ vma_mt_store(mm, vma);
anon_vma_interval_tree_post_update_vma(vma);
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
+ if (!vma->vm_next)
mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
@@ -2466,7 +2198,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
- validate_mm_mt(mm);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2481,7 +2212,6 @@ int expand_downwards(struct vm_area_struct *vma,
struct vm_area_struct *prev;
int error = 0;
- validate_mm(mm);
address &= PAGE_MASK;
if (address < mmap_min_addr)
return -EPERM;
@@ -2518,15 +2248,13 @@ int expand_downwards(struct vm_area_struct *vma,
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_lock
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * anon_vma_lock_write() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
+ * We only hold a shared mmap_lock lock here, so
+ * we need to protect against concurrent vma
+ * expansions. anon_vma_lock_write() doesn't
+ * help here, as we don't guarantee that all
+ * growable vmas in a mm share the same root
+ * anon vma. So, we reuse mm->page_table_lock
+ * to guard against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
@@ -2538,7 +2266,6 @@ int expand_downwards(struct vm_area_struct *vma,
/* Overwrite old entry in mtree. */
vma_mt_store(mm, vma);
anon_vma_interval_tree_post_update_vma(vma);
- vma_gap_update(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2682,17 +2409,15 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
- vma_rb_erase(vma, &mm->mm_rb);
vma_mt_erase(mm, vma);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
*insertion_point = vma;
- if (vma) {
+ if (vma)
vma->vm_prev = prev;
- vma_gap_update(vma);
- } else
+ else
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
@@ -2798,11 +2523,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
if (len == 0)
return -EINVAL;
- /*
- * arch_unmap() might do unmaps itself. It must be called
- * and finish any rbtree manipulation before this code
- * runs and also starts to manipulate the rbtree.
- */
+ /* arch_unmap() might do unmaps itself. */
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */
@@ -2810,7 +2531,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
if (!vma)
return 0;
prev = vma->vm_prev;
- /* we have start < vma->vm_end */
+ /* we have start < vma->vm_end */
/* if it doesn't overlap, we have nothing.. */
if (vma->vm_start >= end)
@@ -2879,7 +2600,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
}
}
- /* Detach vmas from rbtree */
+ /* Detach vmas from the MM linked list and remove from the mm tree*/
detach_vmas_to_be_unmapped(mm, vma, prev, end);
if (downgrade)
@@ -3042,11 +2763,11 @@ out:
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len,
+ unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;
@@ -3065,8 +2786,8 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
if (error)
return error;
- /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
- if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+ /* Clear old maps, set up prev and uf */
+ if (munmap_vma_range(mm, addr, len, &prev, uf))
return -ENOMEM;
/* Check against address space limits *after* clearing old maps... */
@@ -3100,7 +2821,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
- vma_link(mm, vma, prev, rb_link, rb_parent);
+ vma_link(mm, vma, prev);
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
@@ -3225,26 +2946,10 @@ void exit_mmap(struct mm_struct *mm)
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
- struct rb_node **rb_link, *rb_parent;
- unsigned long start = vma->vm_start;
- struct vm_area_struct *overlap = NULL;
- if (find_vma_links(mm, vma->vm_start, vma->vm_end,
- &prev, &rb_link, &rb_parent))
+ if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev))
return -ENOMEM;
- overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1);
- if (overlap) {
-
- pr_err("Found vma ending at %lu\n", start - 1);
- pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap,
- overlap->vm_start, overlap->vm_end - 1);
-#if defined(CONFIG_DEBUG_MAPLE_TREE)
- mt_dump(&mm->mm_mt);
-#endif
- BUG();
- }
-
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -3266,7 +2971,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
- vma_link(mm, vma, prev, rb_link, rb_parent);
+ vma_link(mm, vma, prev);
return 0;
}
@@ -3282,9 +2987,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long vma_start = vma->vm_start;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
- struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
- unsigned long index = addr;
validate_mm_mt(mm);
/*
@@ -3296,10 +2999,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
faulted_in_anon_vma = false;
}
- if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
+ if (range_has_overlap(mm, addr, addr + len, &prev))
return NULL; /* should never get here */
- if (mt_find(&mm->mm_mt, &index, addr+len - 1))
- BUG();
+
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx);
@@ -3340,7 +3042,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
- vma_link(mm, new_vma, prev, rb_link, rb_parent);
+ vma_link(mm, new_vma, prev);
*need_rmap_locks = false;
}
validate_mm_mt(mm);
diff --git a/mm/util.c b/mm/util.c
index c63c8e47be57..c9fae684912f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -286,6 +286,8 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_next = next;
if (next)
next->vm_prev = vma;
+ else
+ mm->highest_vm_end = vm_end_gap(vma);
}
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
@@ -300,6 +302,12 @@ void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
mm->mmap = next;
if (next)
next->vm_prev = prev;
+ else {
+ if (prev)
+ mm->highest_vm_end = vm_end_gap(prev);
+ else
+ mm->highest_vm_end = 0;
+ }
}
/* Check if the vma is being used as a stack by this task */