blob: b2e4c92d0445f770e78f8f6a1230bc6cacf85ed3 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
Vlastimil Babka53853e22014-10-09 15:27:02 -07005/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
Mel Gorman56de7262010-05-24 14:32:30 -07007/* compaction didn't start as it was not possible or direct reclaim was more suitable */
Vlastimil Babka53853e22014-10-09 15:27:02 -07008#define COMPACT_SKIPPED 1
Mel Gorman56de7262010-05-24 14:32:30 -07009/* compaction should continue to another pageblock */
Vlastimil Babka53853e22014-10-09 15:27:02 -070010#define COMPACT_CONTINUE 2
Mel Gorman56de7262010-05-24 14:32:30 -070011/* direct compaction partially compacted a zone and there are suitable pages */
Vlastimil Babka53853e22014-10-09 15:27:02 -070012#define COMPACT_PARTIAL 3
Mel Gorman56de7262010-05-24 14:32:30 -070013/* The full zone was compacted */
Vlastimil Babka53853e22014-10-09 15:27:02 -070014#define COMPACT_COMPLETE 4
Mel Gorman748446b2010-05-24 14:32:27 -070015
Mel Gorman76ab0f52010-05-24 14:32:28 -070016#ifdef CONFIG_COMPACTION
17extern int sysctl_compact_memory;
18extern int sysctl_compaction_handler(struct ctl_table *table, int write,
19 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070020extern int sysctl_extfrag_threshold;
21extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
22 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070023
24extern int fragmentation_index(struct zone *zone, unsigned int order);
25extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080026 int order, gfp_t gfp_mask, nodemask_t *mask,
Vlastimil Babka53853e22014-10-09 15:27:02 -070027 enum migrate_mode mode, bool *contended,
28 struct zone **candidate_zone);
Andrew Morton7103f162013-02-22 16:32:33 -080029extern void compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman62997022012-10-08 16:32:47 -070030extern void reset_isolation_suitable(pg_data_t *pgdat);
Mel Gorman3e7d3442011-01-13 15:45:56 -080031extern unsigned long compaction_suitable(struct zone *zone, int order);
Mel Gorman4f92e252010-05-24 14:32:32 -070032
33/* Do not skip compaction more than 64 times */
34#define COMPACT_MAX_DEFER_SHIFT 6
35
36/*
37 * Compaction is deferred when compaction fails to result in a page
38 * allocation success. 1 << compact_defer_limit compactions are skipped up
39 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
40 */
Rik van Rielaff62242012-03-21 16:33:52 -070041static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070042{
43 zone->compact_considered = 0;
44 zone->compact_defer_shift++;
45
Rik van Rielaff62242012-03-21 16:33:52 -070046 if (order < zone->compact_order_failed)
47 zone->compact_order_failed = order;
48
Mel Gorman4f92e252010-05-24 14:32:32 -070049 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
50 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
51}
52
53/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070054static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070055{
56 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
57
Rik van Rielaff62242012-03-21 16:33:52 -070058 if (order < zone->compact_order_failed)
59 return false;
60
Mel Gorman4f92e252010-05-24 14:32:32 -070061 /* Avoid possible overflow */
62 if (++zone->compact_considered > defer_limit)
63 zone->compact_considered = defer_limit;
64
Gavin Shanc59e2612012-07-31 16:42:49 -070065 return zone->compact_considered < defer_limit;
Mel Gorman4f92e252010-05-24 14:32:32 -070066}
67
Vlastimil Babkade6c60a2014-01-21 15:51:07 -080068/*
69 * Update defer tracking counters after successful compaction of given order,
70 * which means an allocation either succeeded (alloc_success == true) or is
71 * expected to succeed.
72 */
73static inline void compaction_defer_reset(struct zone *zone, int order,
74 bool alloc_success)
75{
76 if (alloc_success) {
77 zone->compact_considered = 0;
78 zone->compact_defer_shift = 0;
79 }
80 if (order >= zone->compact_order_failed)
81 zone->compact_order_failed = order + 1;
82}
83
Mel Gorman62997022012-10-08 16:32:47 -070084/* Returns true if restarting compaction after many failures */
85static inline bool compaction_restarting(struct zone *zone, int order)
86{
87 if (order < zone->compact_order_failed)
88 return false;
89
90 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
91 zone->compact_considered >= 1UL << zone->compact_defer_shift;
92}
93
Mel Gorman56de7262010-05-24 14:32:30 -070094#else
95static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080096 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Vlastimil Babka53853e22014-10-09 15:27:02 -070097 enum migrate_mode mode, bool *contended,
98 struct zone **candidate_zone)
Mel Gorman56de7262010-05-24 14:32:30 -070099{
100 return COMPACT_CONTINUE;
101}
102
Andrew Morton7103f162013-02-22 16:32:33 -0800103static inline void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -0700104{
Rik van Riel7be62de2012-03-21 16:33:52 -0700105}
106
Mel Gorman62997022012-10-08 16:32:47 -0700107static inline void reset_isolation_suitable(pg_data_t *pgdat)
108{
109}
110
Mel Gorman3e7d3442011-01-13 15:45:56 -0800111static inline unsigned long compaction_suitable(struct zone *zone, int order)
112{
113 return COMPACT_SKIPPED;
114}
115
Rik van Rielaff62242012-03-21 16:33:52 -0700116static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700117{
118}
119
Rik van Rielaff62242012-03-21 16:33:52 -0700120static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700121{
Gavin Shanc59e2612012-07-31 16:42:49 -0700122 return true;
Mel Gorman4f92e252010-05-24 14:32:32 -0700123}
124
Mel Gorman76ab0f52010-05-24 14:32:28 -0700125#endif /* CONFIG_COMPACTION */
126
Mel Gormaned4a6d72010-05-24 14:32:29 -0700127#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
128extern int compaction_register_node(struct node *node);
129extern void compaction_unregister_node(struct node *node);
130
131#else
132
133static inline int compaction_register_node(struct node *node)
134{
135 return 0;
136}
137
138static inline void compaction_unregister_node(struct node *node)
139{
140}
141#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
142
Mel Gorman748446b2010-05-24 14:32:27 -0700143#endif /* _LINUX_COMPACTION_H */