blob: 082d1d2a5216977262d3a1817b30a9131efe873a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangeliba761492011-01-13 15:46:58 -08002#ifndef _LINUX_KHUGEPAGED_H
3#define _LINUX_KHUGEPAGED_H
4
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01005#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6
Andrea Arcangeliba761492011-01-13 15:46:58 -08007
8#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07009extern struct attribute_group khugepaged_attr_group;
10
11extern int khugepaged_init(void);
12extern void khugepaged_destroy(void);
13extern int start_stop_khugepaged(void);
Andrea Arcangeliba761492011-01-13 15:46:58 -080014extern int __khugepaged_enter(struct mm_struct *mm);
15extern void __khugepaged_exit(struct mm_struct *mm);
David Rientjes6d50e602014-10-29 14:50:31 -070016extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
17 unsigned long vm_flags);
Andrea Arcangeliba761492011-01-13 15:46:58 -080018
19#define khugepaged_enabled() \
20 (transparent_hugepage_flags & \
21 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
22 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
23#define khugepaged_always() \
24 (transparent_hugepage_flags & \
25 (1<<TRANSPARENT_HUGEPAGE_FLAG))
26#define khugepaged_req_madv() \
27 (transparent_hugepage_flags & \
28 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
29#define khugepaged_defrag() \
30 (transparent_hugepage_flags & \
31 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
32
33static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
34{
35 if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
36 return __khugepaged_enter(mm);
37 return 0;
38}
39
40static inline void khugepaged_exit(struct mm_struct *mm)
41{
42 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
43 __khugepaged_exit(mm);
44}
45
David Rientjes6d50e602014-10-29 14:50:31 -070046static inline int khugepaged_enter(struct vm_area_struct *vma,
47 unsigned long vm_flags)
Andrea Arcangeliba761492011-01-13 15:46:58 -080048{
49 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080050 if ((khugepaged_always() ||
David Rientjes6d50e602014-10-29 14:50:31 -070051 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
Michal Hocko18600332017-07-10 15:48:02 -070052 !(vm_flags & VM_NOHUGEPAGE) &&
53 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
Andrea Arcangeliba761492011-01-13 15:46:58 -080054 if (__khugepaged_enter(vma->vm_mm))
55 return -ENOMEM;
56 return 0;
57}
58#else /* CONFIG_TRANSPARENT_HUGEPAGE */
59static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
60{
61 return 0;
62}
63static inline void khugepaged_exit(struct mm_struct *mm)
64{
65}
David Rientjes6d50e602014-10-29 14:50:31 -070066static inline int khugepaged_enter(struct vm_area_struct *vma,
67 unsigned long vm_flags)
Andrea Arcangeliba761492011-01-13 15:46:58 -080068{
69 return 0;
70}
David Rientjes6d50e602014-10-29 14:50:31 -070071static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
72 unsigned long vm_flags)
Andrea Arcangeliba761492011-01-13 15:46:58 -080073{
74 return 0;
75}
76#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
77
78#endif /* _LINUX_KHUGEPAGED_H */