blob: 32e390c42c53f24fc4d5b3eea4319c5eda46681f [file] [log] [blame]
Thomas Gleixner8092f732019-06-03 07:45:04 +02001// SPDX-License-Identifier: GPL-2.0-only
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07002/*
3 * mm/interval_tree.c - interval tree for mapping->i_mmap
4 *
5 * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07006 */
7
8#include <linux/mm.h>
9#include <linux/fs.h>
Michel Lespinassebf181b92012-10-08 16:31:39 -070010#include <linux/rmap.h>
Michel Lespinasse9826a512012-10-08 16:31:35 -070011#include <linux/interval_tree_generic.h>
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070012
Michel Lespinasse9826a512012-10-08 16:31:35 -070013static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
14{
15 return v->vm_pgoff;
16}
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070017
Michel Lespinasse9826a512012-10-08 16:31:35 -070018static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
19{
Vasyl Gomonovyche025f052018-01-31 16:17:03 -080020 return v->vm_pgoff + vma_pages(v) - 1;
Michel Lespinasse9826a512012-10-08 16:31:35 -070021}
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070022
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080023INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
24 unsigned long, shared.rb_subtree_last,
Zhiyuan Dai0c1dcb02021-04-29 22:57:09 -070025 vma_start_pgoff, vma_last_pgoff, /* empty */, vma_interval_tree)
Michel Lespinasse9826a512012-10-08 16:31:35 -070026
27/* Insert node immediately after prev in the interval tree */
28void vma_interval_tree_insert_after(struct vm_area_struct *node,
29 struct vm_area_struct *prev,
Davidlohr Buesof808c132017-09-08 16:15:08 -070030 struct rb_root_cached *root)
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070031{
32 struct rb_node **link;
33 struct vm_area_struct *parent;
Michel Lespinasse9826a512012-10-08 16:31:35 -070034 unsigned long last = vma_last_pgoff(node);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070035
Sasha Levin81d1b092014-10-09 15:28:10 -070036 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070037
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080038 if (!prev->shared.rb.rb_right) {
Michel Lespinasse9826a512012-10-08 16:31:35 -070039 parent = prev;
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080040 link = &prev->shared.rb.rb_right;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070041 } else {
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080042 parent = rb_entry(prev->shared.rb.rb_right,
43 struct vm_area_struct, shared.rb);
44 if (parent->shared.rb_subtree_last < last)
45 parent->shared.rb_subtree_last = last;
46 while (parent->shared.rb.rb_left) {
47 parent = rb_entry(parent->shared.rb.rb_left,
48 struct vm_area_struct, shared.rb);
49 if (parent->shared.rb_subtree_last < last)
50 parent->shared.rb_subtree_last = last;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070051 }
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080052 link = &parent->shared.rb.rb_left;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070053 }
54
Kirill A. Shutemovac51b932015-02-10 14:10:02 -080055 node->shared.rb_subtree_last = last;
56 rb_link_node(&node->shared.rb, &parent->shared.rb, link);
Davidlohr Buesof808c132017-09-08 16:15:08 -070057 rb_insert_augmented(&node->shared.rb, &root->rb_root,
Michel Lespinasse9826a512012-10-08 16:31:35 -070058 &vma_interval_tree_augment);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070059}
Michel Lespinassebf181b92012-10-08 16:31:39 -070060
61static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
62{
63 return vma_start_pgoff(avc->vma);
64}
65
66static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
67{
68 return vma_last_pgoff(avc->vma);
69}
70
71INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
Michel Lespinasseed8ea812012-10-08 16:31:45 -070072 avc_start_pgoff, avc_last_pgoff,
73 static inline, __anon_vma_interval_tree)
74
75void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
Davidlohr Buesof808c132017-09-08 16:15:08 -070076 struct rb_root_cached *root)
Michel Lespinasseed8ea812012-10-08 16:31:45 -070077{
78#ifdef CONFIG_DEBUG_VM_RB
79 node->cached_vma_start = avc_start_pgoff(node);
80 node->cached_vma_last = avc_last_pgoff(node);
81#endif
82 __anon_vma_interval_tree_insert(node, root);
83}
84
85void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
Davidlohr Buesof808c132017-09-08 16:15:08 -070086 struct rb_root_cached *root)
Michel Lespinasseed8ea812012-10-08 16:31:45 -070087{
88 __anon_vma_interval_tree_remove(node, root);
89}
90
91struct anon_vma_chain *
Davidlohr Buesof808c132017-09-08 16:15:08 -070092anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
Michel Lespinasseed8ea812012-10-08 16:31:45 -070093 unsigned long first, unsigned long last)
94{
95 return __anon_vma_interval_tree_iter_first(root, first, last);
96}
97
98struct anon_vma_chain *
99anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
100 unsigned long first, unsigned long last)
101{
102 return __anon_vma_interval_tree_iter_next(node, first, last);
103}
104
105#ifdef CONFIG_DEBUG_VM_RB
106void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
107{
108 WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
109 WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
110}
111#endif