blob: e48b1e453ff5109292f7a735b7e4c73a0caf43b9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -07002#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
Hugh Dickins5ad64682009-12-14 17:59:24 -080013#include <linux/pagemap.h>
14#include <linux/rmap.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070015#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010016#include <linux/sched/coredump.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070017
Hugh Dickins08beca42009-12-14 17:59:21 -080018struct stable_node;
Hugh Dickins5ad64682009-12-14 17:59:24 -080019struct mem_cgroup;
Hugh Dickins08beca42009-12-14 17:59:21 -080020
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070021#ifdef CONFIG_KSM
22int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
23 unsigned long end, int advice, unsigned long *vm_flags);
24int __ksm_enter(struct mm_struct *mm);
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070025void __ksm_exit(struct mm_struct *mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070026
27static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
28{
29 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
30 return __ksm_enter(mm);
31 return 0;
32}
33
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070034static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070035{
36 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070037 __ksm_exit(mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070038}
Hugh Dickins9a840892009-09-21 17:02:01 -070039
Hugh Dickins5ad64682009-12-14 17:59:24 -080040/*
41 * When do_swap_page() first faults in from swap what used to be a KSM page,
42 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
43 * it might be faulted into a different anon_vma (or perhaps to a different
44 * offset in the same anon_vma). do_swap_page() cannot do all the locking
45 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
46 * a copy, and leave remerging the pages to a later pass of ksmd.
47 *
48 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
49 * but what if the vma was unmerged while the page was swapped out?
50 */
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080051struct page *ksm_might_need_to_copy(struct page *page,
52 struct vm_area_struct *vma, unsigned long address);
Hugh Dickins5ad64682009-12-14 17:59:24 -080053
Minchan Kim1df631a2017-05-03 14:54:23 -070054void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
Hugh Dickinse9995ef2009-12-14 17:59:31 -080055void ksm_migrate_page(struct page *newpage, struct page *oldpage);
Kirill Tkhai52d1e602019-03-05 15:43:06 -080056bool reuse_ksm_page(struct page *page,
57 struct vm_area_struct *vma, unsigned long address);
Hugh Dickins5ad64682009-12-14 17:59:24 -080058
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070059#else /* !CONFIG_KSM */
60
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070061static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
62{
63 return 0;
64}
65
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070066static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070067{
68}
Hugh Dickins9a840892009-09-21 17:02:01 -070069
Hugh Dickinsf42647a2009-12-16 08:56:57 +000070#ifdef CONFIG_MMU
71static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
72 unsigned long end, int advice, unsigned long *vm_flags)
73{
74 return 0;
75}
76
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080077static inline struct page *ksm_might_need_to_copy(struct page *page,
Hugh Dickins5ad64682009-12-14 17:59:24 -080078 struct vm_area_struct *vma, unsigned long address)
79{
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080080 return page;
Hugh Dickins5ad64682009-12-14 17:59:24 -080081}
82
Minchan Kim1df631a2017-05-03 14:54:23 -070083static inline void rmap_walk_ksm(struct page *page,
Joonsoo Kim051ac832014-01-21 15:49:48 -080084 struct rmap_walk_control *rwc)
Hugh Dickinse9995ef2009-12-14 17:59:31 -080085{
Hugh Dickinse9995ef2009-12-14 17:59:31 -080086}
87
88static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
89{
90}
Kirill Tkhai52d1e602019-03-05 15:43:06 -080091static inline bool reuse_ksm_page(struct page *page,
92 struct vm_area_struct *vma, unsigned long address)
93{
94 return false;
95}
Hugh Dickinsf42647a2009-12-16 08:56:57 +000096#endif /* CONFIG_MMU */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070097#endif /* !CONFIG_KSM */
98
Hugh Dickins5ad64682009-12-14 17:59:24 -080099#endif /* __LINUX_KSM_H */