blob: d5120b326e1b6c0d30078790cd2518f13e85de7e [file] [log] [blame]
SeongJae Park2224d842021-09-07 19:56:28 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8#define pr_fmt(fmt) "damon: " fmt
9
10#include <linux/damon.h>
11#include <linux/delay.h>
12#include <linux/kthread.h>
SeongJae Parkee801b72021-11-05 13:47:47 -070013#include <linux/mm.h>
SeongJae Park2224d842021-09-07 19:56:28 -070014#include <linux/slab.h>
SeongJae Park38683e02021-11-05 13:47:33 -070015#include <linux/string.h>
SeongJae Park2224d842021-09-07 19:56:28 -070016
SeongJae Park2fcb9362021-09-07 19:56:48 -070017#define CREATE_TRACE_POINTS
18#include <trace/events/damon.h>
19
SeongJae Park17ccae82021-09-07 19:57:09 -070020#ifdef CONFIG_DAMON_KUNIT_TEST
21#undef DAMON_MIN_REGION
22#define DAMON_MIN_REGION 1
23#endif
24
SeongJae Park2224d842021-09-07 19:56:28 -070025static DEFINE_MUTEX(damon_lock);
26static int nr_running_ctxs;
27
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070028/*
29 * Construct a damon_region struct
30 *
31 * Returns the pointer to the new struct if success, or NULL otherwise
32 */
33struct damon_region *damon_new_region(unsigned long start, unsigned long end)
34{
35 struct damon_region *region;
36
37 region = kmalloc(sizeof(*region), GFP_KERNEL);
38 if (!region)
39 return NULL;
40
41 region->ar.start = start;
42 region->ar.end = end;
43 region->nr_accesses = 0;
44 INIT_LIST_HEAD(&region->list);
45
SeongJae Parkfda504f2021-11-05 13:46:18 -070046 region->age = 0;
47 region->last_nr_accesses = 0;
48
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070049 return region;
50}
51
52/*
53 * Add a region between two other regions
54 */
55inline void damon_insert_region(struct damon_region *r,
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070056 struct damon_region *prev, struct damon_region *next,
57 struct damon_target *t)
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070058{
59 __list_add(&r->list, &prev->list, &next->list);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070060 t->nr_regions++;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070061}
62
63void damon_add_region(struct damon_region *r, struct damon_target *t)
64{
65 list_add_tail(&r->list, &t->regions_list);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070066 t->nr_regions++;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070067}
68
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070069static void damon_del_region(struct damon_region *r, struct damon_target *t)
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070070{
71 list_del(&r->list);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070072 t->nr_regions--;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070073}
74
75static void damon_free_region(struct damon_region *r)
76{
77 kfree(r);
78}
79
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070080void damon_destroy_region(struct damon_region *r, struct damon_target *t)
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070081{
SeongJae Parkb9a6ac42021-09-07 19:56:36 -070082 damon_del_region(r, t);
SeongJae Parkf23b8ee2021-09-07 19:56:32 -070083 damon_free_region(r);
84}
85
SeongJae Park1f366e42021-11-05 13:46:22 -070086struct damos *damon_new_scheme(
87 unsigned long min_sz_region, unsigned long max_sz_region,
88 unsigned int min_nr_accesses, unsigned int max_nr_accesses,
89 unsigned int min_age_region, unsigned int max_age_region,
SeongJae Parkee801b72021-11-05 13:47:47 -070090 enum damos_action action, struct damos_quota *quota,
91 struct damos_watermarks *wmarks)
SeongJae Park1f366e42021-11-05 13:46:22 -070092{
93 struct damos *scheme;
94
95 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
96 if (!scheme)
97 return NULL;
98 scheme->min_sz_region = min_sz_region;
99 scheme->max_sz_region = max_sz_region;
100 scheme->min_nr_accesses = min_nr_accesses;
101 scheme->max_nr_accesses = max_nr_accesses;
102 scheme->min_age_region = min_age_region;
103 scheme->max_age_region = max_age_region;
104 scheme->action = action;
SeongJae Park0e92c2e2022-01-14 14:10:17 -0800105 scheme->stat = (struct damos_stat){};
SeongJae Park1f366e42021-11-05 13:46:22 -0700106 INIT_LIST_HEAD(&scheme->list);
107
SeongJae Park1cd24302021-11-05 13:47:23 -0700108 scheme->quota.ms = quota->ms;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700109 scheme->quota.sz = quota->sz;
110 scheme->quota.reset_interval = quota->reset_interval;
SeongJae Park38683e02021-11-05 13:47:33 -0700111 scheme->quota.weight_sz = quota->weight_sz;
112 scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
113 scheme->quota.weight_age = quota->weight_age;
SeongJae Park1cd24302021-11-05 13:47:23 -0700114 scheme->quota.total_charged_sz = 0;
115 scheme->quota.total_charged_ns = 0;
116 scheme->quota.esz = 0;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700117 scheme->quota.charged_sz = 0;
118 scheme->quota.charged_from = 0;
SeongJae Park50585192021-11-05 13:47:20 -0700119 scheme->quota.charge_target_from = NULL;
120 scheme->quota.charge_addr_from = 0;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700121
SeongJae Parkee801b72021-11-05 13:47:47 -0700122 scheme->wmarks.metric = wmarks->metric;
123 scheme->wmarks.interval = wmarks->interval;
124 scheme->wmarks.high = wmarks->high;
125 scheme->wmarks.mid = wmarks->mid;
126 scheme->wmarks.low = wmarks->low;
127 scheme->wmarks.activated = true;
128
SeongJae Park1f366e42021-11-05 13:46:22 -0700129 return scheme;
130}
131
132void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
133{
134 list_add_tail(&s->list, &ctx->schemes);
135}
136
137static void damon_del_scheme(struct damos *s)
138{
139 list_del(&s->list);
140}
141
142static void damon_free_scheme(struct damos *s)
143{
144 kfree(s);
145}
146
147void damon_destroy_scheme(struct damos *s)
148{
149 damon_del_scheme(s);
150 damon_free_scheme(s);
151}
152
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700153/*
154 * Construct a damon_target struct
155 *
156 * Returns the pointer to the new struct if success, or NULL otherwise
157 */
158struct damon_target *damon_new_target(unsigned long id)
159{
160 struct damon_target *t;
161
162 t = kmalloc(sizeof(*t), GFP_KERNEL);
163 if (!t)
164 return NULL;
165
166 t->id = id;
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700167 t->nr_regions = 0;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700168 INIT_LIST_HEAD(&t->regions_list);
169
170 return t;
171}
172
173void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
174{
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700175 list_add_tail(&t->list, &ctx->adaptive_targets);
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700176}
177
Xin Haob5ca3e82021-11-05 13:48:07 -0700178bool damon_targets_empty(struct damon_ctx *ctx)
179{
180 return list_empty(&ctx->adaptive_targets);
181}
182
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700183static void damon_del_target(struct damon_target *t)
184{
185 list_del(&t->list);
186}
187
188void damon_free_target(struct damon_target *t)
189{
190 struct damon_region *r, *next;
191
192 damon_for_each_region_safe(r, next, t)
193 damon_free_region(r);
194 kfree(t);
195}
196
197void damon_destroy_target(struct damon_target *t)
198{
199 damon_del_target(t);
200 damon_free_target(t);
201}
202
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700203unsigned int damon_nr_regions(struct damon_target *t)
204{
205 return t->nr_regions;
206}
207
SeongJae Park2224d842021-09-07 19:56:28 -0700208struct damon_ctx *damon_new_ctx(void)
209{
210 struct damon_ctx *ctx;
211
212 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
213 if (!ctx)
214 return NULL;
215
216 ctx->sample_interval = 5 * 1000;
217 ctx->aggr_interval = 100 * 1000;
218 ctx->primitive_update_interval = 60 * 1000 * 1000;
219
220 ktime_get_coarse_ts64(&ctx->last_aggregation);
221 ctx->last_primitive_update = ctx->last_aggregation;
222
223 mutex_init(&ctx->kdamond_lock);
224
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700225 ctx->min_nr_regions = 10;
226 ctx->max_nr_regions = 1000;
227
228 INIT_LIST_HEAD(&ctx->adaptive_targets);
SeongJae Park1f366e42021-11-05 13:46:22 -0700229 INIT_LIST_HEAD(&ctx->schemes);
SeongJae Park2224d842021-09-07 19:56:28 -0700230
231 return ctx;
232}
233
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700234static void damon_destroy_targets(struct damon_ctx *ctx)
235{
236 struct damon_target *t, *next_t;
237
238 if (ctx->primitive.cleanup) {
239 ctx->primitive.cleanup(ctx);
240 return;
241 }
242
243 damon_for_each_target_safe(t, next_t, ctx)
244 damon_destroy_target(t);
245}
246
SeongJae Park2224d842021-09-07 19:56:28 -0700247void damon_destroy_ctx(struct damon_ctx *ctx)
248{
SeongJae Park1f366e42021-11-05 13:46:22 -0700249 struct damos *s, *next_s;
250
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700251 damon_destroy_targets(ctx);
SeongJae Park1f366e42021-11-05 13:46:22 -0700252
253 damon_for_each_scheme_safe(s, next_s, ctx)
254 damon_destroy_scheme(s);
255
SeongJae Park2224d842021-09-07 19:56:28 -0700256 kfree(ctx);
257}
258
259/**
SeongJae Park4bc05952021-09-07 19:56:53 -0700260 * damon_set_targets() - Set monitoring targets.
261 * @ctx: monitoring context
262 * @ids: array of target ids
263 * @nr_ids: number of entries in @ids
264 *
265 * This function should not be called while the kdamond is running.
266 *
267 * Return: 0 on success, negative error code otherwise.
268 */
269int damon_set_targets(struct damon_ctx *ctx,
270 unsigned long *ids, ssize_t nr_ids)
271{
272 ssize_t i;
273 struct damon_target *t, *next;
274
275 damon_destroy_targets(ctx);
276
277 for (i = 0; i < nr_ids; i++) {
278 t = damon_new_target(ids[i]);
279 if (!t) {
SeongJae Park4bc05952021-09-07 19:56:53 -0700280 /* The caller should do cleanup of the ids itself */
281 damon_for_each_target_safe(t, next, ctx)
282 damon_destroy_target(t);
283 return -ENOMEM;
284 }
285 damon_add_target(ctx, t);
286 }
287
288 return 0;
289}
290
291/**
SeongJae Park2224d842021-09-07 19:56:28 -0700292 * damon_set_attrs() - Set attributes for the monitoring.
293 * @ctx: monitoring context
294 * @sample_int: time interval between samplings
295 * @aggr_int: time interval between aggregations
296 * @primitive_upd_int: time interval between monitoring primitive updates
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700297 * @min_nr_reg: minimal number of regions
298 * @max_nr_reg: maximum number of regions
SeongJae Park2224d842021-09-07 19:56:28 -0700299 *
300 * This function should not be called while the kdamond is running.
301 * Every time interval is in micro-seconds.
302 *
303 * Return: 0 on success, negative error code otherwise.
304 */
305int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700306 unsigned long aggr_int, unsigned long primitive_upd_int,
307 unsigned long min_nr_reg, unsigned long max_nr_reg)
SeongJae Park2224d842021-09-07 19:56:28 -0700308{
SeongJae Park1afaf5c2021-12-10 14:46:34 -0800309 if (min_nr_reg < 3)
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700310 return -EINVAL;
SeongJae Park1afaf5c2021-12-10 14:46:34 -0800311 if (min_nr_reg > max_nr_reg)
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700312 return -EINVAL;
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700313
SeongJae Park2224d842021-09-07 19:56:28 -0700314 ctx->sample_interval = sample_int;
315 ctx->aggr_interval = aggr_int;
316 ctx->primitive_update_interval = primitive_upd_int;
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700317 ctx->min_nr_regions = min_nr_reg;
318 ctx->max_nr_regions = max_nr_reg;
SeongJae Park2224d842021-09-07 19:56:28 -0700319
320 return 0;
321}
322
SeongJae Park4bc05952021-09-07 19:56:53 -0700323/**
SeongJae Park1f366e42021-11-05 13:46:22 -0700324 * damon_set_schemes() - Set data access monitoring based operation schemes.
325 * @ctx: monitoring context
326 * @schemes: array of the schemes
327 * @nr_schemes: number of entries in @schemes
328 *
329 * This function should not be called while the kdamond of the context is
330 * running.
331 *
332 * Return: 0 if success, or negative error code otherwise.
333 */
334int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
335 ssize_t nr_schemes)
336{
337 struct damos *s, *next;
338 ssize_t i;
339
340 damon_for_each_scheme_safe(s, next, ctx)
341 damon_destroy_scheme(s);
342 for (i = 0; i < nr_schemes; i++)
343 damon_add_scheme(ctx, schemes[i]);
344 return 0;
345}
346
347/**
SeongJae Park4bc05952021-09-07 19:56:53 -0700348 * damon_nr_running_ctxs() - Return number of currently running contexts.
349 */
350int damon_nr_running_ctxs(void)
351{
352 int nr_ctxs;
353
354 mutex_lock(&damon_lock);
355 nr_ctxs = nr_running_ctxs;
356 mutex_unlock(&damon_lock);
357
358 return nr_ctxs;
359}
360
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700361/* Returns the size upper limit for each monitoring region */
362static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
363{
364 struct damon_target *t;
365 struct damon_region *r;
366 unsigned long sz = 0;
367
368 damon_for_each_target(t, ctx) {
369 damon_for_each_region(r, t)
370 sz += r->ar.end - r->ar.start;
371 }
372
373 if (ctx->min_nr_regions)
374 sz /= ctx->min_nr_regions;
375 if (sz < DAMON_MIN_REGION)
376 sz = DAMON_MIN_REGION;
377
378 return sz;
379}
380
SeongJae Park2224d842021-09-07 19:56:28 -0700381static int kdamond_fn(void *data);
382
383/*
384 * __damon_start() - Starts monitoring with given context.
385 * @ctx: monitoring context
386 *
387 * This function should be called while damon_lock is hold.
388 *
389 * Return: 0 on success, negative error code otherwise.
390 */
391static int __damon_start(struct damon_ctx *ctx)
392{
393 int err = -EBUSY;
394
395 mutex_lock(&ctx->kdamond_lock);
396 if (!ctx->kdamond) {
397 err = 0;
SeongJae Park2224d842021-09-07 19:56:28 -0700398 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
399 nr_running_ctxs);
400 if (IS_ERR(ctx->kdamond)) {
401 err = PTR_ERR(ctx->kdamond);
Colin Ian King7ec19922021-11-05 13:46:15 -0700402 ctx->kdamond = NULL;
SeongJae Park2224d842021-09-07 19:56:28 -0700403 }
404 }
405 mutex_unlock(&ctx->kdamond_lock);
406
407 return err;
408}
409
410/**
411 * damon_start() - Starts the monitorings for a given group of contexts.
412 * @ctxs: an array of the pointers for contexts to start monitoring
413 * @nr_ctxs: size of @ctxs
414 *
415 * This function starts a group of monitoring threads for a group of monitoring
416 * contexts. One thread per each context is created and run in parallel. The
417 * caller should handle synchronization between the threads by itself. If a
418 * group of threads that created by other 'damon_start()' call is currently
419 * running, this function does nothing but returns -EBUSY.
420 *
421 * Return: 0 on success, negative error code otherwise.
422 */
423int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
424{
425 int i;
426 int err = 0;
427
428 mutex_lock(&damon_lock);
429 if (nr_running_ctxs) {
430 mutex_unlock(&damon_lock);
431 return -EBUSY;
432 }
433
434 for (i = 0; i < nr_ctxs; i++) {
435 err = __damon_start(ctxs[i]);
436 if (err)
437 break;
438 nr_running_ctxs++;
439 }
440 mutex_unlock(&damon_lock);
441
442 return err;
443}
444
445/*
446 * __damon_stop() - Stops monitoring of given context.
447 * @ctx: monitoring context
448 *
449 * Return: 0 on success, negative error code otherwise.
450 */
451static int __damon_stop(struct damon_ctx *ctx)
452{
Changbin Du0f91d132021-11-05 13:48:22 -0700453 struct task_struct *tsk;
454
SeongJae Park2224d842021-09-07 19:56:28 -0700455 mutex_lock(&ctx->kdamond_lock);
Changbin Du0f91d132021-11-05 13:48:22 -0700456 tsk = ctx->kdamond;
457 if (tsk) {
458 get_task_struct(tsk);
SeongJae Park2224d842021-09-07 19:56:28 -0700459 mutex_unlock(&ctx->kdamond_lock);
Changbin Du0f91d132021-11-05 13:48:22 -0700460 kthread_stop(tsk);
461 put_task_struct(tsk);
SeongJae Park2224d842021-09-07 19:56:28 -0700462 return 0;
463 }
464 mutex_unlock(&ctx->kdamond_lock);
465
466 return -EPERM;
467}
468
469/**
470 * damon_stop() - Stops the monitorings for a given group of contexts.
471 * @ctxs: an array of the pointers for contexts to stop monitoring
472 * @nr_ctxs: size of @ctxs
473 *
474 * Return: 0 on success, negative error code otherwise.
475 */
476int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
477{
478 int i, err = 0;
479
480 for (i = 0; i < nr_ctxs; i++) {
481 /* nr_running_ctxs is decremented in kdamond_fn */
482 err = __damon_stop(ctxs[i]);
483 if (err)
484 return err;
485 }
486
487 return err;
488}
489
490/*
491 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
492 * @baseline: the time to check whether the interval has elapsed since
493 * @interval: the time interval (microseconds)
494 *
495 * See whether the given time interval has passed since the given baseline
496 * time. If so, it also updates the baseline to current time for next check.
497 *
498 * Return: true if the time interval has passed, or false otherwise.
499 */
500static bool damon_check_reset_time_interval(struct timespec64 *baseline,
501 unsigned long interval)
502{
503 struct timespec64 now;
504
505 ktime_get_coarse_ts64(&now);
506 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
507 interval * 1000)
508 return false;
509 *baseline = now;
510 return true;
511}
512
513/*
514 * Check whether it is time to flush the aggregated information
515 */
516static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
517{
518 return damon_check_reset_time_interval(&ctx->last_aggregation,
519 ctx->aggr_interval);
520}
521
522/*
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700523 * Reset the aggregated monitoring results ('nr_accesses' of each region).
524 */
525static void kdamond_reset_aggregated(struct damon_ctx *c)
526{
527 struct damon_target *t;
528
529 damon_for_each_target(t, c) {
530 struct damon_region *r;
531
SeongJae Park2fcb9362021-09-07 19:56:48 -0700532 damon_for_each_region(r, t) {
533 trace_damon_aggregated(t, r, damon_nr_regions(t));
SeongJae Parkfda504f2021-11-05 13:46:18 -0700534 r->last_nr_accesses = r->nr_accesses;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700535 r->nr_accesses = 0;
SeongJae Park2fcb9362021-09-07 19:56:48 -0700536 }
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700537 }
538}
539
SeongJae Park2b8a2482021-11-05 13:47:16 -0700540static void damon_split_region_at(struct damon_ctx *ctx,
541 struct damon_target *t, struct damon_region *r,
542 unsigned long sz_r);
543
SeongJae Park38683e02021-11-05 13:47:33 -0700544static bool __damos_valid_target(struct damon_region *r, struct damos *s)
545{
546 unsigned long sz;
547
548 sz = r->ar.end - r->ar.start;
549 return s->min_sz_region <= sz && sz <= s->max_sz_region &&
550 s->min_nr_accesses <= r->nr_accesses &&
551 r->nr_accesses <= s->max_nr_accesses &&
552 s->min_age_region <= r->age && r->age <= s->max_age_region;
553}
554
555static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
556 struct damon_region *r, struct damos *s)
557{
558 bool ret = __damos_valid_target(r, s);
559
560 if (!ret || !s->quota.esz || !c->primitive.get_scheme_score)
561 return ret;
562
563 return c->primitive.get_scheme_score(c, t, r, s) >= s->quota.min_score;
564}
565
SeongJae Park1f366e42021-11-05 13:46:22 -0700566static void damon_do_apply_schemes(struct damon_ctx *c,
567 struct damon_target *t,
568 struct damon_region *r)
569{
570 struct damos *s;
SeongJae Park1f366e42021-11-05 13:46:22 -0700571
572 damon_for_each_scheme(s, c) {
SeongJae Park2b8a2482021-11-05 13:47:16 -0700573 struct damos_quota *quota = &s->quota;
574 unsigned long sz = r->ar.end - r->ar.start;
SeongJae Park1cd24302021-11-05 13:47:23 -0700575 struct timespec64 begin, end;
SeongJae Park0e92c2e2022-01-14 14:10:17 -0800576 unsigned long sz_applied = 0;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700577
SeongJae Parkee801b72021-11-05 13:47:47 -0700578 if (!s->wmarks.activated)
579 continue;
580
SeongJae Park2b8a2482021-11-05 13:47:16 -0700581 /* Check the quota */
SeongJae Park1cd24302021-11-05 13:47:23 -0700582 if (quota->esz && quota->charged_sz >= quota->esz)
SeongJae Park2b8a2482021-11-05 13:47:16 -0700583 continue;
584
SeongJae Park50585192021-11-05 13:47:20 -0700585 /* Skip previously charged regions */
586 if (quota->charge_target_from) {
587 if (t != quota->charge_target_from)
588 continue;
589 if (r == damon_last_region(t)) {
590 quota->charge_target_from = NULL;
591 quota->charge_addr_from = 0;
592 continue;
593 }
594 if (quota->charge_addr_from &&
595 r->ar.end <= quota->charge_addr_from)
596 continue;
597
598 if (quota->charge_addr_from && r->ar.start <
599 quota->charge_addr_from) {
600 sz = ALIGN_DOWN(quota->charge_addr_from -
601 r->ar.start, DAMON_MIN_REGION);
602 if (!sz) {
603 if (r->ar.end - r->ar.start <=
604 DAMON_MIN_REGION)
605 continue;
606 sz = DAMON_MIN_REGION;
607 }
608 damon_split_region_at(c, t, r, sz);
609 r = damon_next_region(r);
610 sz = r->ar.end - r->ar.start;
611 }
612 quota->charge_target_from = NULL;
613 quota->charge_addr_from = 0;
614 }
615
SeongJae Park38683e02021-11-05 13:47:33 -0700616 if (!damos_valid_target(c, t, r, s))
SeongJae Park1f366e42021-11-05 13:46:22 -0700617 continue;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700618
619 /* Apply the scheme */
620 if (c->primitive.apply_scheme) {
SeongJae Park1cd24302021-11-05 13:47:23 -0700621 if (quota->esz &&
622 quota->charged_sz + sz > quota->esz) {
623 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
SeongJae Park2b8a2482021-11-05 13:47:16 -0700624 DAMON_MIN_REGION);
625 if (!sz)
626 goto update_stat;
627 damon_split_region_at(c, t, r, sz);
628 }
SeongJae Park1cd24302021-11-05 13:47:23 -0700629 ktime_get_coarse_ts64(&begin);
SeongJae Park0e92c2e2022-01-14 14:10:17 -0800630 sz_applied = c->primitive.apply_scheme(c, t, r, s);
SeongJae Park1cd24302021-11-05 13:47:23 -0700631 ktime_get_coarse_ts64(&end);
632 quota->total_charged_ns += timespec64_to_ns(&end) -
633 timespec64_to_ns(&begin);
SeongJae Park2b8a2482021-11-05 13:47:16 -0700634 quota->charged_sz += sz;
SeongJae Park1cd24302021-11-05 13:47:23 -0700635 if (quota->esz && quota->charged_sz >= quota->esz) {
SeongJae Park50585192021-11-05 13:47:20 -0700636 quota->charge_target_from = t;
637 quota->charge_addr_from = r->ar.end + 1;
638 }
SeongJae Park2b8a2482021-11-05 13:47:16 -0700639 }
SeongJae Park2f0b5482021-11-05 13:46:32 -0700640 if (s->action != DAMOS_STAT)
641 r->age = 0;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700642
643update_stat:
SeongJae Park0e92c2e2022-01-14 14:10:17 -0800644 s->stat.nr_tried++;
645 s->stat.sz_tried += sz;
646 if (sz_applied)
647 s->stat.nr_applied++;
648 s->stat.sz_applied += sz_applied;
SeongJae Park1f366e42021-11-05 13:46:22 -0700649 }
650}
651
SeongJae Park1cd24302021-11-05 13:47:23 -0700652/* Shouldn't be called if quota->ms and quota->sz are zero */
653static void damos_set_effective_quota(struct damos_quota *quota)
654{
655 unsigned long throughput;
656 unsigned long esz;
657
658 if (!quota->ms) {
659 quota->esz = quota->sz;
660 return;
661 }
662
663 if (quota->total_charged_ns)
664 throughput = quota->total_charged_sz * 1000000 /
665 quota->total_charged_ns;
666 else
667 throughput = PAGE_SIZE * 1024;
668 esz = throughput * quota->ms;
669
670 if (quota->sz && quota->sz < esz)
671 esz = quota->sz;
672 quota->esz = esz;
673}
674
SeongJae Park1f366e42021-11-05 13:46:22 -0700675static void kdamond_apply_schemes(struct damon_ctx *c)
676{
677 struct damon_target *t;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700678 struct damon_region *r, *next_r;
679 struct damos *s;
680
681 damon_for_each_scheme(s, c) {
682 struct damos_quota *quota = &s->quota;
SeongJae Park38683e02021-11-05 13:47:33 -0700683 unsigned long cumulated_sz;
684 unsigned int score, max_score = 0;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700685
SeongJae Parkee801b72021-11-05 13:47:47 -0700686 if (!s->wmarks.activated)
687 continue;
688
SeongJae Park1cd24302021-11-05 13:47:23 -0700689 if (!quota->ms && !quota->sz)
SeongJae Park2b8a2482021-11-05 13:47:16 -0700690 continue;
691
692 /* New charge window starts */
693 if (time_after_eq(jiffies, quota->charged_from +
694 msecs_to_jiffies(
695 quota->reset_interval))) {
SeongJae Park6268eac2022-01-14 14:10:20 -0800696 if (quota->esz && quota->charged_sz >= quota->esz)
697 s->stat.qt_exceeds++;
SeongJae Park1cd24302021-11-05 13:47:23 -0700698 quota->total_charged_sz += quota->charged_sz;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700699 quota->charged_from = jiffies;
700 quota->charged_sz = 0;
SeongJae Park1cd24302021-11-05 13:47:23 -0700701 damos_set_effective_quota(quota);
SeongJae Park2b8a2482021-11-05 13:47:16 -0700702 }
SeongJae Park38683e02021-11-05 13:47:33 -0700703
704 if (!c->primitive.get_scheme_score)
705 continue;
706
707 /* Fill up the score histogram */
708 memset(quota->histogram, 0, sizeof(quota->histogram));
709 damon_for_each_target(t, c) {
710 damon_for_each_region(r, t) {
711 if (!__damos_valid_target(r, s))
712 continue;
713 score = c->primitive.get_scheme_score(
714 c, t, r, s);
715 quota->histogram[score] +=
716 r->ar.end - r->ar.start;
717 if (score > max_score)
718 max_score = score;
719 }
720 }
721
722 /* Set the min score limit */
723 for (cumulated_sz = 0, score = max_score; ; score--) {
724 cumulated_sz += quota->histogram[score];
725 if (cumulated_sz >= quota->esz || !score)
726 break;
727 }
728 quota->min_score = score;
SeongJae Park2b8a2482021-11-05 13:47:16 -0700729 }
SeongJae Park1f366e42021-11-05 13:46:22 -0700730
731 damon_for_each_target(t, c) {
SeongJae Park2b8a2482021-11-05 13:47:16 -0700732 damon_for_each_region_safe(r, next_r, t)
SeongJae Park1f366e42021-11-05 13:46:22 -0700733 damon_do_apply_schemes(c, t, r);
734 }
735}
736
SeongJae Park88f86dc2022-01-14 14:09:59 -0800737static inline unsigned long sz_damon_region(struct damon_region *r)
738{
739 return r->ar.end - r->ar.start;
740}
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700741
742/*
743 * Merge two adjacent regions into one region
744 */
745static void damon_merge_two_regions(struct damon_target *t,
746 struct damon_region *l, struct damon_region *r)
747{
748 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
749
750 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
751 (sz_l + sz_r);
SeongJae Parkfda504f2021-11-05 13:46:18 -0700752 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700753 l->ar.end = r->ar.end;
754 damon_destroy_region(r, t);
755}
756
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700757/*
758 * Merge adjacent regions having similar access frequencies
759 *
760 * t target affected by this merge operation
761 * thres '->nr_accesses' diff threshold for the merge
762 * sz_limit size upper limit of each region
763 */
764static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
765 unsigned long sz_limit)
766{
767 struct damon_region *r, *prev = NULL, *next;
768
769 damon_for_each_region_safe(r, next, t) {
Xin Haod720bbb2022-01-14 14:09:40 -0800770 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
SeongJae Parkfda504f2021-11-05 13:46:18 -0700771 r->age = 0;
772 else
773 r->age++;
774
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700775 if (prev && prev->ar.end == r->ar.start &&
Xin Haod720bbb2022-01-14 14:09:40 -0800776 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700777 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
778 damon_merge_two_regions(t, prev, r);
779 else
780 prev = r;
781 }
782}
783
784/*
785 * Merge adjacent regions having similar access frequencies
786 *
787 * threshold '->nr_accesses' diff threshold for the merge
788 * sz_limit size upper limit of each region
789 *
790 * This function merges monitoring target regions which are adjacent and their
791 * access frequencies are similar. This is for minimizing the monitoring
792 * overhead under the dynamically changeable access pattern. If a merge was
793 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
794 */
795static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
796 unsigned long sz_limit)
797{
798 struct damon_target *t;
799
800 damon_for_each_target(t, c)
801 damon_merge_regions_of(t, threshold, sz_limit);
802}
803
804/*
805 * Split a region in two
806 *
807 * r the region to be split
808 * sz_r size of the first sub-region that will be made
809 */
810static void damon_split_region_at(struct damon_ctx *ctx,
811 struct damon_target *t, struct damon_region *r,
812 unsigned long sz_r)
813{
814 struct damon_region *new;
815
816 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
817 if (!new)
818 return;
819
820 r->ar.end = new->ar.start;
821
SeongJae Parkfda504f2021-11-05 13:46:18 -0700822 new->age = r->age;
823 new->last_nr_accesses = r->last_nr_accesses;
824
SeongJae Parkb9a6ac42021-09-07 19:56:36 -0700825 damon_insert_region(new, r, damon_next_region(r), t);
826}
827
828/* Split every region in the given target into 'nr_subs' regions */
829static void damon_split_regions_of(struct damon_ctx *ctx,
830 struct damon_target *t, int nr_subs)
831{
832 struct damon_region *r, *next;
833 unsigned long sz_region, sz_sub = 0;
834 int i;
835
836 damon_for_each_region_safe(r, next, t) {
837 sz_region = r->ar.end - r->ar.start;
838
839 for (i = 0; i < nr_subs - 1 &&
840 sz_region > 2 * DAMON_MIN_REGION; i++) {
841 /*
842 * Randomly select size of left sub-region to be at
843 * least 10 percent and at most 90% of original region
844 */
845 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
846 sz_region / 10, DAMON_MIN_REGION);
847 /* Do not allow blank region */
848 if (sz_sub == 0 || sz_sub >= sz_region)
849 continue;
850
851 damon_split_region_at(ctx, t, r, sz_sub);
852 sz_region = sz_sub;
853 }
854 }
855}
856
857/*
858 * Split every target region into randomly-sized small regions
859 *
860 * This function splits every target region into random-sized small regions if
861 * current total number of the regions is equal or smaller than half of the
862 * user-specified maximum number of regions. This is for maximizing the
863 * monitoring accuracy under the dynamically changeable access patterns. If a
864 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
865 * it.
866 */
867static void kdamond_split_regions(struct damon_ctx *ctx)
868{
869 struct damon_target *t;
870 unsigned int nr_regions = 0;
871 static unsigned int last_nr_regions;
872 int nr_subregions = 2;
873
874 damon_for_each_target(t, ctx)
875 nr_regions += damon_nr_regions(t);
876
877 if (nr_regions > ctx->max_nr_regions / 2)
878 return;
879
880 /* Maybe the middle of the region has different access frequency */
881 if (last_nr_regions == nr_regions &&
882 nr_regions < ctx->max_nr_regions / 3)
883 nr_subregions = 3;
884
885 damon_for_each_target(t, ctx)
886 damon_split_regions_of(ctx, t, nr_subregions);
887
888 last_nr_regions = nr_regions;
889}
890
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700891/*
SeongJae Park2224d842021-09-07 19:56:28 -0700892 * Check whether it is time to check and apply the target monitoring regions
893 *
894 * Returns true if it is.
895 */
896static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
897{
898 return damon_check_reset_time_interval(&ctx->last_primitive_update,
899 ctx->primitive_update_interval);
900}
901
902/*
903 * Check whether current monitoring should be stopped
904 *
905 * The monitoring is stopped when either the user requested to stop, or all
906 * monitoring targets are invalid.
907 *
908 * Returns true if need to stop current monitoring.
909 */
910static bool kdamond_need_stop(struct damon_ctx *ctx)
911{
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700912 struct damon_target *t;
SeongJae Park2224d842021-09-07 19:56:28 -0700913
Changbin Du0f91d132021-11-05 13:48:22 -0700914 if (kthread_should_stop())
SeongJae Park2224d842021-09-07 19:56:28 -0700915 return true;
916
917 if (!ctx->primitive.target_valid)
918 return false;
919
SeongJae Parkf23b8ee2021-09-07 19:56:32 -0700920 damon_for_each_target(t, ctx) {
921 if (ctx->primitive.target_valid(t))
922 return false;
923 }
924
925 return true;
SeongJae Park2224d842021-09-07 19:56:28 -0700926}
927
SeongJae Parkee801b72021-11-05 13:47:47 -0700928static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
929{
930 struct sysinfo i;
931
932 switch (metric) {
933 case DAMOS_WMARK_FREE_MEM_RATE:
934 si_meminfo(&i);
935 return i.freeram * 1000 / i.totalram;
936 default:
937 break;
938 }
939 return -EINVAL;
940}
941
942/*
943 * Returns zero if the scheme is active. Else, returns time to wait for next
944 * watermark check in micro-seconds.
945 */
946static unsigned long damos_wmark_wait_us(struct damos *scheme)
947{
948 unsigned long metric;
949
950 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
951 return 0;
952
953 metric = damos_wmark_metric_value(scheme->wmarks.metric);
954 /* higher than high watermark or lower than low watermark */
955 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
956 if (scheme->wmarks.activated)
Colin Ian King01078652021-11-05 13:48:24 -0700957 pr_debug("deactivate a scheme (%d) for %s wmark\n",
SeongJae Parkee801b72021-11-05 13:47:47 -0700958 scheme->action,
959 metric > scheme->wmarks.high ?
960 "high" : "low");
961 scheme->wmarks.activated = false;
962 return scheme->wmarks.interval;
963 }
964
965 /* inactive and higher than middle watermark */
966 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
967 !scheme->wmarks.activated)
968 return scheme->wmarks.interval;
969
970 if (!scheme->wmarks.activated)
971 pr_debug("activate a scheme (%d)\n", scheme->action);
972 scheme->wmarks.activated = true;
973 return 0;
974}
975
976static void kdamond_usleep(unsigned long usecs)
977{
SeongJae Park4de46a302021-12-10 14:46:28 -0800978 /* See Documentation/timers/timers-howto.rst for the thresholds */
979 if (usecs > 20 * USEC_PER_MSEC)
SeongJae Park70e92742021-12-10 14:46:25 -0800980 schedule_timeout_idle(usecs_to_jiffies(usecs));
SeongJae Parkee801b72021-11-05 13:47:47 -0700981 else
SeongJae Park70e92742021-12-10 14:46:25 -0800982 usleep_idle_range(usecs, usecs + 1);
SeongJae Parkee801b72021-11-05 13:47:47 -0700983}
984
985/* Returns negative error code if it's not activated but should return */
986static int kdamond_wait_activation(struct damon_ctx *ctx)
987{
988 struct damos *s;
989 unsigned long wait_time;
990 unsigned long min_wait_time = 0;
991
992 while (!kdamond_need_stop(ctx)) {
993 damon_for_each_scheme(s, ctx) {
994 wait_time = damos_wmark_wait_us(s);
995 if (!min_wait_time || wait_time < min_wait_time)
996 min_wait_time = wait_time;
997 }
998 if (!min_wait_time)
999 return 0;
1000
1001 kdamond_usleep(min_wait_time);
1002 }
1003 return -EBUSY;
1004}
1005
SeongJae Park2224d842021-09-07 19:56:28 -07001006/*
1007 * The monitoring daemon that runs as a kernel thread
1008 */
1009static int kdamond_fn(void *data)
1010{
1011 struct damon_ctx *ctx = (struct damon_ctx *)data;
SeongJae Parkf23b8ee2021-09-07 19:56:32 -07001012 struct damon_target *t;
1013 struct damon_region *r, *next;
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001014 unsigned int max_nr_accesses = 0;
1015 unsigned long sz_limit = 0;
Changbin Du0f91d132021-11-05 13:48:22 -07001016 bool done = false;
SeongJae Park2224d842021-09-07 19:56:28 -07001017
Changbin Du42e4cef2021-11-05 13:46:12 -07001018 pr_debug("kdamond (%d) starts\n", current->pid);
SeongJae Park2224d842021-09-07 19:56:28 -07001019
1020 if (ctx->primitive.init)
1021 ctx->primitive.init(ctx);
1022 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
Changbin Du0f91d132021-11-05 13:48:22 -07001023 done = true;
SeongJae Park2224d842021-09-07 19:56:28 -07001024
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001025 sz_limit = damon_region_sz_limit(ctx);
1026
Changbin Du0f91d132021-11-05 13:48:22 -07001027 while (!kdamond_need_stop(ctx) && !done) {
SeongJae Parkee801b72021-11-05 13:47:47 -07001028 if (kdamond_wait_activation(ctx))
1029 continue;
1030
SeongJae Park2224d842021-09-07 19:56:28 -07001031 if (ctx->primitive.prepare_access_checks)
1032 ctx->primitive.prepare_access_checks(ctx);
1033 if (ctx->callback.after_sampling &&
1034 ctx->callback.after_sampling(ctx))
Changbin Du0f91d132021-11-05 13:48:22 -07001035 done = true;
SeongJae Park2224d842021-09-07 19:56:28 -07001036
SeongJae Park70e92742021-12-10 14:46:25 -08001037 kdamond_usleep(ctx->sample_interval);
SeongJae Park2224d842021-09-07 19:56:28 -07001038
1039 if (ctx->primitive.check_accesses)
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001040 max_nr_accesses = ctx->primitive.check_accesses(ctx);
SeongJae Park2224d842021-09-07 19:56:28 -07001041
1042 if (kdamond_aggregate_interval_passed(ctx)) {
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001043 kdamond_merge_regions(ctx,
1044 max_nr_accesses / 10,
1045 sz_limit);
SeongJae Park2224d842021-09-07 19:56:28 -07001046 if (ctx->callback.after_aggregation &&
1047 ctx->callback.after_aggregation(ctx))
Changbin Du0f91d132021-11-05 13:48:22 -07001048 done = true;
SeongJae Park1f366e42021-11-05 13:46:22 -07001049 kdamond_apply_schemes(ctx);
SeongJae Parkf23b8ee2021-09-07 19:56:32 -07001050 kdamond_reset_aggregated(ctx);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001051 kdamond_split_regions(ctx);
SeongJae Park2224d842021-09-07 19:56:28 -07001052 if (ctx->primitive.reset_aggregated)
1053 ctx->primitive.reset_aggregated(ctx);
1054 }
1055
1056 if (kdamond_need_update_primitive(ctx)) {
1057 if (ctx->primitive.update)
1058 ctx->primitive.update(ctx);
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001059 sz_limit = damon_region_sz_limit(ctx);
SeongJae Park2224d842021-09-07 19:56:28 -07001060 }
1061 }
SeongJae Parkf23b8ee2021-09-07 19:56:32 -07001062 damon_for_each_target(t, ctx) {
1063 damon_for_each_region_safe(r, next, t)
SeongJae Parkb9a6ac42021-09-07 19:56:36 -07001064 damon_destroy_region(r, t);
SeongJae Parkf23b8ee2021-09-07 19:56:32 -07001065 }
SeongJae Park2224d842021-09-07 19:56:28 -07001066
Changbin Du0f91d132021-11-05 13:48:22 -07001067 if (ctx->callback.before_terminate)
1068 ctx->callback.before_terminate(ctx);
SeongJae Park2224d842021-09-07 19:56:28 -07001069 if (ctx->primitive.cleanup)
1070 ctx->primitive.cleanup(ctx);
1071
Changbin Du42e4cef2021-11-05 13:46:12 -07001072 pr_debug("kdamond (%d) finishes\n", current->pid);
SeongJae Park2224d842021-09-07 19:56:28 -07001073 mutex_lock(&ctx->kdamond_lock);
1074 ctx->kdamond = NULL;
1075 mutex_unlock(&ctx->kdamond_lock);
1076
1077 mutex_lock(&damon_lock);
1078 nr_running_ctxs--;
1079 mutex_unlock(&damon_lock);
1080
Changbin Du5f7fe2b2021-11-05 13:46:09 -07001081 return 0;
SeongJae Park2224d842021-09-07 19:56:28 -07001082}
SeongJae Park17ccae82021-09-07 19:57:09 -07001083
1084#include "core-test.h"