blob: e988037abd2a1afa25b9e22607295c0e915f1541 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Bartlomiej Zolnierkiewicz5ceb9ce2012-05-29 15:06:37 -07004#include <linux/node.h>
5
Mel Gorman56de7262010-05-24 14:32:30 -07006/* Return values for compact_zone() and try_to_compact_pages() */
7/* compaction didn't start as it was not possible or direct reclaim was more suitable */
8#define COMPACT_SKIPPED 0
9/* compaction should continue to another pageblock */
10#define COMPACT_CONTINUE 1
11/* direct compaction partially compacted a zone and there are suitable pages */
12#define COMPACT_PARTIAL 2
13/* The full zone was compacted */
14#define COMPACT_COMPLETE 3
Mel Gorman748446b2010-05-24 14:32:27 -070015
Bartlomiej Zolnierkiewicz5ceb9ce2012-05-29 15:06:37 -070016/*
17 * compaction supports three modes
18 *
19 * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
20 * MIGRATE_MOVABLE pageblocks as migration sources and targets.
21 * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
22 * MIGRATE_MOVABLE pageblocks as migration sources.
23 * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
24 * targets and convers them to MIGRATE_MOVABLE if possible
25 * COMPACT_SYNC uses synchronous migration and scans all pageblocks
26 */
27enum compact_mode {
28 COMPACT_ASYNC_MOVABLE,
29 COMPACT_ASYNC_UNMOVABLE,
30 COMPACT_SYNC,
31};
32
Mel Gorman76ab0f52010-05-24 14:32:28 -070033#ifdef CONFIG_COMPACTION
34extern int sysctl_compact_memory;
35extern int sysctl_compaction_handler(struct ctl_table *table, int write,
36 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070037extern int sysctl_extfrag_threshold;
38extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
39 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070040
41extern int fragmentation_index(struct zone *zone, unsigned int order);
42extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080043 int order, gfp_t gfp_mask, nodemask_t *mask,
44 bool sync);
Rik van Riel7be62de2012-03-21 16:33:52 -070045extern int compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman3e7d3442011-01-13 15:45:56 -080046extern unsigned long compaction_suitable(struct zone *zone, int order);
Mel Gorman4f92e252010-05-24 14:32:32 -070047
48/* Do not skip compaction more than 64 times */
49#define COMPACT_MAX_DEFER_SHIFT 6
50
51/*
52 * Compaction is deferred when compaction fails to result in a page
53 * allocation success. 1 << compact_defer_limit compactions are skipped up
54 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
55 */
Rik van Rielaff62242012-03-21 16:33:52 -070056static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070057{
58 zone->compact_considered = 0;
59 zone->compact_defer_shift++;
60
Rik van Rielaff62242012-03-21 16:33:52 -070061 if (order < zone->compact_order_failed)
62 zone->compact_order_failed = order;
63
Mel Gorman4f92e252010-05-24 14:32:32 -070064 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
65 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
66}
67
68/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070069static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070070{
71 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
72
Rik van Rielaff62242012-03-21 16:33:52 -070073 if (order < zone->compact_order_failed)
74 return false;
75
Mel Gorman4f92e252010-05-24 14:32:32 -070076 /* Avoid possible overflow */
77 if (++zone->compact_considered > defer_limit)
78 zone->compact_considered = defer_limit;
79
80 return zone->compact_considered < (1UL << zone->compact_defer_shift);
81}
82
Mel Gorman56de7262010-05-24 14:32:30 -070083#else
84static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080085 int order, gfp_t gfp_mask, nodemask_t *nodemask,
86 bool sync)
Mel Gorman56de7262010-05-24 14:32:30 -070087{
88 return COMPACT_CONTINUE;
89}
90
Rik van Riel7be62de2012-03-21 16:33:52 -070091static inline int compact_pgdat(pg_data_t *pgdat, int order)
92{
93 return COMPACT_CONTINUE;
94}
95
Mel Gorman3e7d3442011-01-13 15:45:56 -080096static inline unsigned long compaction_suitable(struct zone *zone, int order)
97{
98 return COMPACT_SKIPPED;
99}
100
Rik van Rielaff62242012-03-21 16:33:52 -0700101static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700102{
103}
104
Rik van Rielaff62242012-03-21 16:33:52 -0700105static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700106{
107 return 1;
108}
109
Mel Gorman76ab0f52010-05-24 14:32:28 -0700110#endif /* CONFIG_COMPACTION */
111
Mel Gormaned4a6d72010-05-24 14:32:29 -0700112#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
113extern int compaction_register_node(struct node *node);
114extern void compaction_unregister_node(struct node *node);
115
116#else
117
118static inline int compaction_register_node(struct node *node)
119{
120 return 0;
121}
122
123static inline void compaction_unregister_node(struct node *node)
124{
125}
126#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
127
Mel Gorman748446b2010-05-24 14:32:27 -0700128#endif /* _LINUX_COMPACTION_H */