revert "memcg, vmscan: integrate soft reclaim tighter with zone shrinking code"
Revert commit 3b38722efd9f ("memcg, vmscan: integrate soft reclaim
tighter with zone shrinking code")
I merged this prematurely - Michal and Johannes still disagree about the
overall design direction and the future remains unclear.
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0e081ca..beb3577 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -139,21 +139,11 @@
{
return !sc->target_mem_cgroup;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- return !mem_cgroup_disabled() && global_reclaim(sc);
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- return false;
-}
#endif
unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -2174,8 +2164,7 @@
}
}
-static void
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
unsigned long nr_reclaimed, nr_scanned;
@@ -2194,12 +2183,6 @@
do {
struct lruvec *lruvec;
- if (soft_reclaim &&
- !mem_cgroup_soft_reclaim_eligible(memcg)) {
- memcg = mem_cgroup_iter(root, memcg, &reclaim);
- continue;
- }
-
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc);
@@ -2230,24 +2213,6 @@
sc->nr_scanned - nr_scanned, sc));
}
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
- bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
- unsigned long nr_scanned = sc->nr_scanned;
-
- __shrink_zone(zone, sc, do_soft_reclaim);
-
- /*
- * No group is over the soft limit or those that are do not have
- * pages in the zone we are reclaiming so we have to reclaim everybody
- */
- if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
- __shrink_zone(zone, sc, false);
- return;
- }
-}
-
/* Returns true if compaction should go ahead for a high-order request */
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
@@ -2309,6 +2274,8 @@
{
struct zoneref *z;
struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
bool aborted_reclaim = false;
/*
@@ -2348,6 +2315,18 @@
continue;
}
}
+ /*
+ * This steals pages from memory cgroups over softlimit
+ * and returns the number of reclaimed pages and
+ * scanned pages. This works for global memory pressure
+ * and balancing, not for a memcg's limit.
+ */
+ nr_soft_scanned = 0;
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ sc->order, sc->gfp_mask,
+ &nr_soft_scanned);
+ sc->nr_reclaimed += nr_soft_reclaimed;
+ sc->nr_scanned += nr_soft_scanned;
/* need some check for avoid more shrink_zone() */
}
@@ -2941,6 +2920,8 @@
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -3055,6 +3036,15 @@
sc.nr_scanned = 0;
+ nr_soft_scanned = 0;
+ /*
+ * Call soft limit reclaim before calling shrink_zone.
+ */
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ order, sc.gfp_mask,
+ &nr_soft_scanned);
+ sc.nr_reclaimed += nr_soft_reclaimed;
+
/*
* There should be no need to raise the scanning
* priority if enough pages are already being scanned