Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Linux VM pressure |
| 4 | * |
| 5 | * Copyright 2012 Linaro Ltd. |
| 6 | * Anton Vorontsov <anton.vorontsov@linaro.org> |
| 7 | * |
| 8 | * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro, |
| 9 | * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg. |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/cgroup.h> |
| 13 | #include <linux/fs.h> |
| 14 | #include <linux/log2.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/vmstat.h> |
| 18 | #include <linux/eventfd.h> |
Tejun Heo | 1ff6bbf | 2014-01-28 18:10:37 -0500 | [diff] [blame] | 19 | #include <linux/slab.h> |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 20 | #include <linux/swap.h> |
| 21 | #include <linux/printk.h> |
| 22 | #include <linux/vmpressure.h> |
| 23 | |
| 24 | /* |
| 25 | * The window size (vmpressure_win) is the number of scanned pages before |
| 26 | * we try to analyze scanned/reclaimed ratio. So the window is used as a |
| 27 | * rate-limit tunable for the "low" level notification, and also for |
| 28 | * averaging the ratio for medium/critical levels. Using small window |
| 29 | * sizes can cause lot of false positives, but too big window size will |
| 30 | * delay the notifications. |
| 31 | * |
| 32 | * As the vmscan reclaimer logic works with chunks which are multiple of |
| 33 | * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. |
| 34 | * |
| 35 | * TODO: Make the window size depend on machine size, as we do for vmstat |
| 36 | * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). |
| 37 | */ |
| 38 | static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; |
| 39 | |
| 40 | /* |
| 41 | * These thresholds are used when we account memory pressure through |
| 42 | * scanned/reclaimed ratio. The current values were chosen empirically. In |
| 43 | * essence, they are percents: the higher the value, the more number |
| 44 | * unsuccessful reclaims there were. |
| 45 | */ |
| 46 | static const unsigned int vmpressure_level_med = 60; |
| 47 | static const unsigned int vmpressure_level_critical = 95; |
| 48 | |
| 49 | /* |
| 50 | * When there are too little pages left to scan, vmpressure() may miss the |
| 51 | * critical pressure as number of pages will be less than "window size". |
| 52 | * However, in that case the vmscan priority will raise fast as the |
| 53 | * reclaimer will try to scan LRUs more deeply. |
| 54 | * |
| 55 | * The vmscan logic considers these special priorities: |
| 56 | * |
| 57 | * prio == DEF_PRIORITY (12): reclaimer starts with that value |
| 58 | * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed |
| 59 | * prio == 0 : close to OOM, kernel scans every page in an lru |
| 60 | * |
| 61 | * Any value in this range is acceptable for this tunable (i.e. from 12 to |
| 62 | * 0). Current value for the vmpressure_level_critical_prio is chosen |
| 63 | * empirically, but the number, in essence, means that we consider |
| 64 | * critical level when scanning depth is ~10% of the lru size (vmscan |
| 65 | * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one |
| 66 | * eights). |
| 67 | */ |
| 68 | static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10); |
| 69 | |
| 70 | static struct vmpressure *work_to_vmpressure(struct work_struct *work) |
| 71 | { |
| 72 | return container_of(work, struct vmpressure, work); |
| 73 | } |
| 74 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 75 | static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) |
| 76 | { |
Hui Su | 9647875 | 2021-09-02 14:59:36 -0700 | [diff] [blame] | 77 | struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 78 | |
| 79 | memcg = parent_mem_cgroup(memcg); |
| 80 | if (!memcg) |
| 81 | return NULL; |
| 82 | return memcg_to_vmpressure(memcg); |
| 83 | } |
| 84 | |
| 85 | enum vmpressure_levels { |
| 86 | VMPRESSURE_LOW = 0, |
| 87 | VMPRESSURE_MEDIUM, |
| 88 | VMPRESSURE_CRITICAL, |
| 89 | VMPRESSURE_NUM_LEVELS, |
| 90 | }; |
| 91 | |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 92 | enum vmpressure_modes { |
| 93 | VMPRESSURE_NO_PASSTHROUGH = 0, |
| 94 | VMPRESSURE_HIERARCHY, |
| 95 | VMPRESSURE_LOCAL, |
| 96 | VMPRESSURE_NUM_MODES, |
| 97 | }; |
| 98 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 99 | static const char * const vmpressure_str_levels[] = { |
| 100 | [VMPRESSURE_LOW] = "low", |
| 101 | [VMPRESSURE_MEDIUM] = "medium", |
| 102 | [VMPRESSURE_CRITICAL] = "critical", |
| 103 | }; |
| 104 | |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 105 | static const char * const vmpressure_str_modes[] = { |
| 106 | [VMPRESSURE_NO_PASSTHROUGH] = "default", |
| 107 | [VMPRESSURE_HIERARCHY] = "hierarchy", |
| 108 | [VMPRESSURE_LOCAL] = "local", |
| 109 | }; |
| 110 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 111 | static enum vmpressure_levels vmpressure_level(unsigned long pressure) |
| 112 | { |
| 113 | if (pressure >= vmpressure_level_critical) |
| 114 | return VMPRESSURE_CRITICAL; |
| 115 | else if (pressure >= vmpressure_level_med) |
| 116 | return VMPRESSURE_MEDIUM; |
| 117 | return VMPRESSURE_LOW; |
| 118 | } |
| 119 | |
| 120 | static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, |
| 121 | unsigned long reclaimed) |
| 122 | { |
| 123 | unsigned long scale = scanned + reclaimed; |
Vinayak Menon | e1587a4 | 2017-02-24 14:59:39 -0800 | [diff] [blame] | 124 | unsigned long pressure = 0; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 125 | |
| 126 | /* |
zhongjiang | d7143e3 | 2017-06-16 14:02:40 -0700 | [diff] [blame] | 127 | * reclaimed can be greater than scanned for things such as reclaimed |
| 128 | * slab pages. shrink_node() just adds reclaimed pages without a |
| 129 | * related increment to scanned pages. |
Vinayak Menon | e1587a4 | 2017-02-24 14:59:39 -0800 | [diff] [blame] | 130 | */ |
| 131 | if (reclaimed >= scanned) |
| 132 | goto out; |
| 133 | /* |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 134 | * We calculate the ratio (in percents) of how many pages were |
| 135 | * scanned vs. reclaimed in a given time frame (window). Note that |
| 136 | * time is in VM reclaimer's "ticks", i.e. number of pages |
| 137 | * scanned. This makes it possible to set desired reaction time |
| 138 | * and serves as a ratelimit. |
| 139 | */ |
| 140 | pressure = scale - (reclaimed * scale / scanned); |
| 141 | pressure = pressure * 100 / scale; |
| 142 | |
Vinayak Menon | e1587a4 | 2017-02-24 14:59:39 -0800 | [diff] [blame] | 143 | out: |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 144 | pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, |
| 145 | scanned, reclaimed); |
| 146 | |
| 147 | return vmpressure_level(pressure); |
| 148 | } |
| 149 | |
| 150 | struct vmpressure_event { |
| 151 | struct eventfd_ctx *efd; |
| 152 | enum vmpressure_levels level; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 153 | enum vmpressure_modes mode; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 154 | struct list_head node; |
| 155 | }; |
| 156 | |
| 157 | static bool vmpressure_event(struct vmpressure *vmpr, |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 158 | const enum vmpressure_levels level, |
| 159 | bool ancestor, bool signalled) |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 160 | { |
| 161 | struct vmpressure_event *ev; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 162 | bool ret = false; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 163 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 164 | mutex_lock(&vmpr->events_lock); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 165 | list_for_each_entry(ev, &vmpr->events, node) { |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 166 | if (ancestor && ev->mode == VMPRESSURE_LOCAL) |
| 167 | continue; |
| 168 | if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH) |
| 169 | continue; |
| 170 | if (level < ev->level) |
| 171 | continue; |
| 172 | eventfd_signal(ev->efd, 1); |
| 173 | ret = true; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 174 | } |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 175 | mutex_unlock(&vmpr->events_lock); |
| 176 | |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 177 | return ret; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | static void vmpressure_work_fn(struct work_struct *work) |
| 181 | { |
| 182 | struct vmpressure *vmpr = work_to_vmpressure(work); |
| 183 | unsigned long scanned; |
| 184 | unsigned long reclaimed; |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 185 | enum vmpressure_levels level; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 186 | bool ancestor = false; |
| 187 | bool signalled = false; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 188 | |
Andrew Morton | 91b5719 | 2014-12-02 15:59:28 -0800 | [diff] [blame] | 189 | spin_lock(&vmpr->sr_lock); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 190 | /* |
| 191 | * Several contexts might be calling vmpressure(), so it is |
| 192 | * possible that the work was rescheduled again before the old |
| 193 | * work context cleared the counters. In that case we will run |
| 194 | * just after the old work returns, but then scanned might be zero |
| 195 | * here. No need for any locks here since we don't care if |
| 196 | * vmpr->reclaimed is in sync. |
| 197 | */ |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 198 | scanned = vmpr->tree_scanned; |
Andrew Morton | 91b5719 | 2014-12-02 15:59:28 -0800 | [diff] [blame] | 199 | if (!scanned) { |
| 200 | spin_unlock(&vmpr->sr_lock); |
| 201 | return; |
| 202 | } |
| 203 | |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 204 | reclaimed = vmpr->tree_reclaimed; |
| 205 | vmpr->tree_scanned = 0; |
| 206 | vmpr->tree_reclaimed = 0; |
Michal Hocko | 22f2020 | 2013-07-31 13:53:48 -0700 | [diff] [blame] | 207 | spin_unlock(&vmpr->sr_lock); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 208 | |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 209 | level = vmpressure_calc_level(scanned, reclaimed); |
| 210 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 211 | do { |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 212 | if (vmpressure_event(vmpr, level, ancestor, signalled)) |
| 213 | signalled = true; |
| 214 | ancestor = true; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 215 | } while ((vmpr = vmpressure_parent(vmpr))); |
| 216 | } |
| 217 | |
| 218 | /** |
| 219 | * vmpressure() - Account memory pressure through scanned/reclaimed ratio |
| 220 | * @gfp: reclaimer's gfp mask |
| 221 | * @memcg: cgroup memory controller handle |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 222 | * @tree: legacy subtree mode |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 223 | * @scanned: number of pages scanned |
| 224 | * @reclaimed: number of pages reclaimed |
| 225 | * |
| 226 | * This function should be called from the vmscan reclaim path to account |
| 227 | * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw |
| 228 | * pressure index is then further refined and averaged over time. |
| 229 | * |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 230 | * If @tree is set, vmpressure is in traditional userspace reporting |
| 231 | * mode: @memcg is considered the pressure root and userspace is |
| 232 | * notified of the entire subtree's reclaim efficiency. |
| 233 | * |
| 234 | * If @tree is not set, reclaim efficiency is recorded for @memcg, and |
| 235 | * only in-kernel users are notified. |
| 236 | * |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 237 | * This function does not return any value. |
| 238 | */ |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 239 | void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 240 | unsigned long scanned, unsigned long reclaimed) |
| 241 | { |
Suren Baghdasaryan | 56cab28 | 2021-09-02 14:54:47 -0700 | [diff] [blame] | 242 | struct vmpressure *vmpr; |
| 243 | |
| 244 | if (mem_cgroup_disabled()) |
| 245 | return; |
| 246 | |
| 247 | vmpr = memcg_to_vmpressure(memcg); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 248 | |
| 249 | /* |
| 250 | * Here we only want to account pressure that userland is able to |
| 251 | * help us with. For example, suppose that DMA zone is under |
| 252 | * pressure; if we notify userland about that kind of pressure, |
| 253 | * then it will be mostly a waste as it will trigger unnecessary |
| 254 | * freeing of memory by userland (since userland is more likely to |
| 255 | * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That |
| 256 | * is why we include only movable, highmem and FS/IO pages. |
| 257 | * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so |
| 258 | * we account it too. |
| 259 | */ |
| 260 | if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) |
| 261 | return; |
| 262 | |
| 263 | /* |
| 264 | * If we got here with no pages scanned, then that is an indicator |
| 265 | * that reclaimer was unable to find any shrinkable LRUs at the |
| 266 | * current scanning depth. But it does not mean that we should |
| 267 | * report the critical pressure, yet. If the scanning priority |
| 268 | * (scanning depth) goes too high (deep), we will be notified |
| 269 | * through vmpressure_prio(). But so far, keep calm. |
| 270 | */ |
| 271 | if (!scanned) |
| 272 | return; |
| 273 | |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 274 | if (tree) { |
| 275 | spin_lock(&vmpr->sr_lock); |
Vladimir Davydov | 3c1da7b | 2016-02-02 16:57:49 -0800 | [diff] [blame] | 276 | scanned = vmpr->tree_scanned += scanned; |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 277 | vmpr->tree_reclaimed += reclaimed; |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 278 | spin_unlock(&vmpr->sr_lock); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 279 | |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 280 | if (scanned < vmpressure_win) |
| 281 | return; |
| 282 | schedule_work(&vmpr->work); |
| 283 | } else { |
| 284 | enum vmpressure_levels level; |
| 285 | |
| 286 | /* For now, no users for root-level efficiency */ |
Yang Shi | d8a1c03 | 2020-04-01 21:10:02 -0700 | [diff] [blame] | 287 | if (!memcg || mem_cgroup_is_root(memcg)) |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 288 | return; |
| 289 | |
| 290 | spin_lock(&vmpr->sr_lock); |
| 291 | scanned = vmpr->scanned += scanned; |
| 292 | reclaimed = vmpr->reclaimed += reclaimed; |
| 293 | if (scanned < vmpressure_win) { |
| 294 | spin_unlock(&vmpr->sr_lock); |
| 295 | return; |
| 296 | } |
| 297 | vmpr->scanned = vmpr->reclaimed = 0; |
| 298 | spin_unlock(&vmpr->sr_lock); |
| 299 | |
| 300 | level = vmpressure_calc_level(scanned, reclaimed); |
| 301 | |
| 302 | if (level > VMPRESSURE_LOW) { |
| 303 | /* |
| 304 | * Let the socket buffer allocator know that |
| 305 | * we are having trouble reclaiming LRU pages. |
| 306 | * |
| 307 | * For hysteresis keep the pressure state |
| 308 | * asserted for a second in which subsequent |
| 309 | * pressure events can occur. |
| 310 | */ |
Yuanzheng Song | 7e6ec49 | 2021-11-05 13:42:52 -0700 | [diff] [blame] | 311 | WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 312 | } |
| 313 | } |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | /** |
| 317 | * vmpressure_prio() - Account memory pressure through reclaimer priority level |
| 318 | * @gfp: reclaimer's gfp mask |
| 319 | * @memcg: cgroup memory controller handle |
| 320 | * @prio: reclaimer's priority |
| 321 | * |
| 322 | * This function should be called from the reclaim path every time when |
| 323 | * the vmscan's reclaiming priority (scanning depth) changes. |
| 324 | * |
| 325 | * This function does not return any value. |
| 326 | */ |
| 327 | void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) |
| 328 | { |
| 329 | /* |
| 330 | * We only use prio for accounting critical level. For more info |
| 331 | * see comment for vmpressure_level_critical_prio variable above. |
| 332 | */ |
| 333 | if (prio > vmpressure_level_critical_prio) |
| 334 | return; |
| 335 | |
| 336 | /* |
| 337 | * OK, the prio is below the threshold, updating vmpressure |
| 338 | * information before shrinker dives into long shrinking of long |
| 339 | * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0 |
| 340 | * to the vmpressure() basically means that we signal 'critical' |
| 341 | * level. |
| 342 | */ |
Johannes Weiner | 8e8ae64 | 2016-01-14 15:21:32 -0800 | [diff] [blame] | 343 | vmpressure(gfp, memcg, true, vmpressure_win, 0); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 344 | } |
| 345 | |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 346 | #define MAX_VMPRESSURE_ARGS_LEN (strlen("critical") + strlen("hierarchy") + 2) |
| 347 | |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 348 | /** |
| 349 | * vmpressure_register_event() - Bind vmpressure notifications to an eventfd |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 350 | * @memcg: memcg that is interested in vmpressure notifications |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 351 | * @eventfd: eventfd context to link notifications with |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 352 | * @args: event arguments (pressure level threshold, optional mode) |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 353 | * |
| 354 | * This function associates eventfd context with the vmpressure |
| 355 | * infrastructure, so that the notifications will be delivered to the |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 356 | * @eventfd. The @args parameter is a comma-delimited string that denotes a |
| 357 | * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium", |
| 358 | * or "critical") and an optional mode (one of vmpressure_str_modes, i.e. |
| 359 | * "hierarchy" or "local"). |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 360 | * |
Tejun Heo | 347c4a8 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 361 | * To be used as memcg event method. |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 362 | * |
| 363 | * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could |
| 364 | * not be parsed. |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 365 | */ |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 366 | int vmpressure_register_event(struct mem_cgroup *memcg, |
Tejun Heo | 347c4a8 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 367 | struct eventfd_ctx *eventfd, const char *args) |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 368 | { |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 369 | struct vmpressure *vmpr = memcg_to_vmpressure(memcg); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 370 | struct vmpressure_event *ev; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 371 | enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH; |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 372 | enum vmpressure_levels level; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 373 | char *spec, *spec_orig; |
| 374 | char *token; |
| 375 | int ret = 0; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 376 | |
Andy Shevchenko | d62ff36 | 2018-06-07 17:07:50 -0700 | [diff] [blame] | 377 | spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL); |
Yang Shi | 565dc84 | 2020-04-01 21:09:59 -0700 | [diff] [blame] | 378 | if (!spec) |
| 379 | return -ENOMEM; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 380 | |
| 381 | /* Find required level */ |
| 382 | token = strsep(&spec, ","); |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 383 | ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token); |
| 384 | if (ret < 0) |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 385 | goto out; |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 386 | level = ret; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 387 | |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 388 | /* Find optional mode */ |
| 389 | token = strsep(&spec, ","); |
| 390 | if (token) { |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 391 | ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token); |
| 392 | if (ret < 0) |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 393 | goto out; |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 394 | mode = ret; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 395 | } |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 396 | |
| 397 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 398 | if (!ev) { |
| 399 | ret = -ENOMEM; |
| 400 | goto out; |
| 401 | } |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 402 | |
| 403 | ev->efd = eventfd; |
| 404 | ev->level = level; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 405 | ev->mode = mode; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 406 | |
| 407 | mutex_lock(&vmpr->events_lock); |
| 408 | list_add(&ev->node, &vmpr->events); |
| 409 | mutex_unlock(&vmpr->events_lock); |
Dan Carpenter | 518a867 | 2019-10-06 17:58:28 -0700 | [diff] [blame] | 410 | ret = 0; |
David Rientjes | b6bb981 | 2017-07-10 15:47:59 -0700 | [diff] [blame] | 411 | out: |
| 412 | kfree(spec_orig); |
| 413 | return ret; |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | /** |
| 417 | * vmpressure_unregister_event() - Unbind eventfd from vmpressure |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 418 | * @memcg: memcg handle |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 419 | * @eventfd: eventfd context that was used to link vmpressure with the @cg |
| 420 | * |
| 421 | * This function does internal manipulations to detach the @eventfd from |
| 422 | * the vmpressure notifications, and then frees internal resources |
| 423 | * associated with the @eventfd (but the @eventfd itself is not freed). |
| 424 | * |
Tejun Heo | 347c4a8 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 425 | * To be used as memcg event method. |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 426 | */ |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 427 | void vmpressure_unregister_event(struct mem_cgroup *memcg, |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 428 | struct eventfd_ctx *eventfd) |
| 429 | { |
Tejun Heo | 59b6f87 | 2013-11-22 18:20:43 -0500 | [diff] [blame] | 430 | struct vmpressure *vmpr = memcg_to_vmpressure(memcg); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 431 | struct vmpressure_event *ev; |
| 432 | |
| 433 | mutex_lock(&vmpr->events_lock); |
| 434 | list_for_each_entry(ev, &vmpr->events, node) { |
| 435 | if (ev->efd != eventfd) |
| 436 | continue; |
| 437 | list_del(&ev->node); |
| 438 | kfree(ev); |
| 439 | break; |
| 440 | } |
| 441 | mutex_unlock(&vmpr->events_lock); |
| 442 | } |
| 443 | |
| 444 | /** |
| 445 | * vmpressure_init() - Initialize vmpressure control structure |
| 446 | * @vmpr: Structure to be initialized |
| 447 | * |
| 448 | * This function should be called on every allocated vmpressure structure |
| 449 | * before any usage. |
| 450 | */ |
| 451 | void vmpressure_init(struct vmpressure *vmpr) |
| 452 | { |
Michal Hocko | 22f2020 | 2013-07-31 13:53:48 -0700 | [diff] [blame] | 453 | spin_lock_init(&vmpr->sr_lock); |
Anton Vorontsov | 70ddf63 | 2013-04-29 15:08:31 -0700 | [diff] [blame] | 454 | mutex_init(&vmpr->events_lock); |
| 455 | INIT_LIST_HEAD(&vmpr->events); |
| 456 | INIT_WORK(&vmpr->work, vmpressure_work_fn); |
| 457 | } |
Michal Hocko | 33cb876 | 2013-07-31 13:53:51 -0700 | [diff] [blame] | 458 | |
| 459 | /** |
| 460 | * vmpressure_cleanup() - shuts down vmpressure control structure |
| 461 | * @vmpr: Structure to be cleaned up |
| 462 | * |
| 463 | * This function should be called before the structure in which it is |
| 464 | * embedded is cleaned up. |
| 465 | */ |
| 466 | void vmpressure_cleanup(struct vmpressure *vmpr) |
| 467 | { |
| 468 | /* |
| 469 | * Make sure there is no pending work before eventfd infrastructure |
| 470 | * goes away. |
| 471 | */ |
| 472 | flush_work(&vmpr->work); |
| 473 | } |