mm, page_alloc: split alloc_pages_nodemask()
alloc_pages_nodemask does a number of preperation steps that determine
what zones can be used for the allocation depending on a variety of
factors. This is fine but a hypothetical caller that wanted multiple
order-0 pages has to do the preparation steps multiple times. This
patch structures __alloc_pages_nodemask such that it's relatively easy
to build a bulk order-0 page allocator. There is no functional change.
Link: http://lkml.kernel.org/r/20170123153906.3122-3-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 284153d..678b288 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3855,6 +3855,60 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
return page;
}
+static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask,
+ struct alloc_context *ac, gfp_t *alloc_mask,
+ unsigned int *alloc_flags)
+{
+ ac->high_zoneidx = gfp_zone(gfp_mask);
+ ac->zonelist = zonelist;
+ ac->nodemask = nodemask;
+ ac->migratetype = gfpflags_to_migratetype(gfp_mask);
+
+ if (cpusets_enabled()) {
+ *alloc_mask |= __GFP_HARDWALL;
+ *alloc_flags |= ALLOC_CPUSET;
+ if (!ac->nodemask)
+ ac->nodemask = &cpuset_current_mems_allowed;
+ }
+
+ lockdep_trace_alloc(gfp_mask);
+
+ might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
+
+ if (should_fail_alloc_page(gfp_mask, order))
+ return false;
+
+ /*
+ * Check the zones suitable for the gfp_mask contain at least one
+ * valid zone. It's possible to have an empty zonelist as a result
+ * of __GFP_THISNODE and a memoryless node
+ */
+ if (unlikely(!ac->zonelist->_zonerefs->zone))
+ return false;
+
+ if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
+ *alloc_flags |= ALLOC_CMA;
+
+ return true;
+}
+
+/* Determine whether to spread dirty pages and what the first usable zone */
+static inline void finalise_ac(gfp_t gfp_mask,
+ unsigned int order, struct alloc_context *ac)
+{
+ /* Dirty zone balancing only done in the fast path */
+ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
+
+ /*
+ * The preferred zone is used for statistics but crucially it is
+ * also used as the starting point for the zonelist iterator. It
+ * may get reset for allocations that ignore memory policies.
+ */
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+}
+
/*
* This is the 'heart' of the zoned buddy allocator.
*/
@@ -3865,50 +3919,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
- struct alloc_context ac = {
- .high_zoneidx = gfp_zone(gfp_mask),
- .zonelist = zonelist,
- .nodemask = nodemask,
- .migratetype = gfpflags_to_migratetype(gfp_mask),
- };
-
- if (cpusets_enabled()) {
- alloc_mask |= __GFP_HARDWALL;
- alloc_flags |= ALLOC_CPUSET;
- if (!ac.nodemask)
- ac.nodemask = &cpuset_current_mems_allowed;
- }
+ struct alloc_context ac = { };
gfp_mask &= gfp_allowed_mask;
-
- lockdep_trace_alloc(gfp_mask);
-
- might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
-
- if (should_fail_alloc_page(gfp_mask, order))
+ if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
return NULL;
- /*
- * Check the zones suitable for the gfp_mask contain at least one
- * valid zone. It's possible to have an empty zonelist as a result
- * of __GFP_THISNODE and a memoryless node
- */
- if (unlikely(!zonelist->_zonerefs->zone))
- return NULL;
-
- if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
- alloc_flags |= ALLOC_CMA;
-
- /* Dirty zone balancing only done in the fast path */
- ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
-
- /*
- * The preferred zone is used for statistics but crucially it is
- * also used as the starting point for the zonelist iterator. It
- * may get reset for allocations that ignore memory policies.
- */
- ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
- ac.high_zoneidx, ac.nodemask);
+ finalise_ac(gfp_mask, order, &ac);
if (!ac.preferred_zoneref->zone) {
page = NULL;
/*