blob: d896765a15b0e7f4eb403ce79a728ea23f5124bd [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
Vlastimil Babka53853e22014-10-09 15:27:02 -07005/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
Mel Gorman56de7262010-05-24 14:32:30 -07007/* compaction didn't start as it was not possible or direct reclaim was more suitable */
Vlastimil Babka53853e22014-10-09 15:27:02 -07008#define COMPACT_SKIPPED 1
Mel Gorman56de7262010-05-24 14:32:30 -07009/* compaction should continue to another pageblock */
Vlastimil Babka53853e22014-10-09 15:27:02 -070010#define COMPACT_CONTINUE 2
Mel Gorman56de7262010-05-24 14:32:30 -070011/* direct compaction partially compacted a zone and there are suitable pages */
Vlastimil Babka53853e22014-10-09 15:27:02 -070012#define COMPACT_PARTIAL 3
Mel Gorman56de7262010-05-24 14:32:30 -070013/* The full zone was compacted */
Vlastimil Babka53853e22014-10-09 15:27:02 -070014#define COMPACT_COMPLETE 4
Mel Gorman748446b2010-05-24 14:32:27 -070015
Vlastimil Babka1f9efde2014-10-09 15:27:14 -070016/* Used to signal whether compaction detected need_sched() or lock contention */
17/* No contention detected */
18#define COMPACT_CONTENDED_NONE 0
19/* Either need_sched() was true or fatal signal pending */
20#define COMPACT_CONTENDED_SCHED 1
21/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2
23
Mel Gorman76ab0f52010-05-24 14:32:28 -070024#ifdef CONFIG_COMPACTION
25extern int sysctl_compact_memory;
26extern int sysctl_compaction_handler(struct ctl_table *table, int write,
27 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070028extern int sysctl_extfrag_threshold;
29extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
30 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070031
32extern int fragmentation_index(struct zone *zone, unsigned int order);
33extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080034 int order, gfp_t gfp_mask, nodemask_t *mask,
Vlastimil Babka1f9efde2014-10-09 15:27:14 -070035 enum migrate_mode mode, int *contended,
Vlastimil Babkaebff3982014-12-10 15:43:22 -080036 int alloc_flags, int classzone_idx,
Vlastimil Babka53853e22014-10-09 15:27:02 -070037 struct zone **candidate_zone);
Andrew Morton7103f162013-02-22 16:32:33 -080038extern void compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman62997022012-10-08 16:32:47 -070039extern void reset_isolation_suitable(pg_data_t *pgdat);
Vlastimil Babkaebff3982014-12-10 15:43:22 -080040extern unsigned long compaction_suitable(struct zone *zone, int order,
41 int alloc_flags, int classzone_idx);
Mel Gorman4f92e252010-05-24 14:32:32 -070042
43/* Do not skip compaction more than 64 times */
44#define COMPACT_MAX_DEFER_SHIFT 6
45
46/*
47 * Compaction is deferred when compaction fails to result in a page
48 * allocation success. 1 << compact_defer_limit compactions are skipped up
49 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
50 */
Rik van Rielaff62242012-03-21 16:33:52 -070051static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070052{
53 zone->compact_considered = 0;
54 zone->compact_defer_shift++;
55
Rik van Rielaff62242012-03-21 16:33:52 -070056 if (order < zone->compact_order_failed)
57 zone->compact_order_failed = order;
58
Mel Gorman4f92e252010-05-24 14:32:32 -070059 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
60 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
61}
62
63/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070064static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070065{
66 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
67
Rik van Rielaff62242012-03-21 16:33:52 -070068 if (order < zone->compact_order_failed)
69 return false;
70
Mel Gorman4f92e252010-05-24 14:32:32 -070071 /* Avoid possible overflow */
72 if (++zone->compact_considered > defer_limit)
73 zone->compact_considered = defer_limit;
74
Gavin Shanc59e2612012-07-31 16:42:49 -070075 return zone->compact_considered < defer_limit;
Mel Gorman4f92e252010-05-24 14:32:32 -070076}
77
Vlastimil Babkade6c60a2014-01-21 15:51:07 -080078/*
79 * Update defer tracking counters after successful compaction of given order,
80 * which means an allocation either succeeded (alloc_success == true) or is
81 * expected to succeed.
82 */
83static inline void compaction_defer_reset(struct zone *zone, int order,
84 bool alloc_success)
85{
86 if (alloc_success) {
87 zone->compact_considered = 0;
88 zone->compact_defer_shift = 0;
89 }
90 if (order >= zone->compact_order_failed)
91 zone->compact_order_failed = order + 1;
92}
93
Mel Gorman62997022012-10-08 16:32:47 -070094/* Returns true if restarting compaction after many failures */
95static inline bool compaction_restarting(struct zone *zone, int order)
96{
97 if (order < zone->compact_order_failed)
98 return false;
99
100 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
101 zone->compact_considered >= 1UL << zone->compact_defer_shift;
102}
103
Mel Gorman56de7262010-05-24 14:32:30 -0700104#else
105static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800106 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700107 enum migrate_mode mode, int *contended,
Vlastimil Babkaebff3982014-12-10 15:43:22 -0800108 int alloc_flags, int classzone_idx,
Vlastimil Babka53853e22014-10-09 15:27:02 -0700109 struct zone **candidate_zone)
Mel Gorman56de7262010-05-24 14:32:30 -0700110{
111 return COMPACT_CONTINUE;
112}
113
Andrew Morton7103f162013-02-22 16:32:33 -0800114static inline void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -0700115{
Rik van Riel7be62de2012-03-21 16:33:52 -0700116}
117
Mel Gorman62997022012-10-08 16:32:47 -0700118static inline void reset_isolation_suitable(pg_data_t *pgdat)
119{
120}
121
Vlastimil Babkaebff3982014-12-10 15:43:22 -0800122static inline unsigned long compaction_suitable(struct zone *zone, int order,
123 int alloc_flags, int classzone_idx)
Mel Gorman3e7d3442011-01-13 15:45:56 -0800124{
125 return COMPACT_SKIPPED;
126}
127
Rik van Rielaff62242012-03-21 16:33:52 -0700128static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700129{
130}
131
Rik van Rielaff62242012-03-21 16:33:52 -0700132static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700133{
Gavin Shanc59e2612012-07-31 16:42:49 -0700134 return true;
Mel Gorman4f92e252010-05-24 14:32:32 -0700135}
136
Mel Gorman76ab0f52010-05-24 14:32:28 -0700137#endif /* CONFIG_COMPACTION */
138
Mel Gormaned4a6d72010-05-24 14:32:29 -0700139#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
140extern int compaction_register_node(struct node *node);
141extern void compaction_unregister_node(struct node *node);
142
143#else
144
145static inline int compaction_register_node(struct node *node)
146{
147 return 0;
148}
149
150static inline void compaction_unregister_node(struct node *node)
151{
152}
153#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
154
Mel Gorman748446b2010-05-24 14:32:27 -0700155#endif /* _LINUX_COMPACTION_H */