Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2019 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/workqueue.h> |
| 9 | |
| 10 | #include "i915_active.h" |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 11 | #include "gem/i915_gem_context.h" |
Chris Wilson | 9893214 | 2019-05-28 10:29:44 +0100 | [diff] [blame] | 12 | #include "gem/i915_gem_object.h" |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 13 | #include "i915_globals.h" |
| 14 | #include "i915_request.h" |
| 15 | #include "i915_scheduler.h" |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 16 | #include "i915_vma.h" |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 17 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 18 | static LIST_HEAD(globals); |
| 19 | |
Chris Wilson | da23379 | 2019-04-08 10:17:02 +0100 | [diff] [blame] | 20 | static atomic_t active; |
| 21 | static atomic_t epoch; |
| 22 | static struct park_work { |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 23 | struct delayed_work work; |
| 24 | struct rcu_head rcu; |
| 25 | unsigned long flags; |
| 26 | #define PENDING 0 |
Chris Wilson | da23379 | 2019-04-08 10:17:02 +0100 | [diff] [blame] | 27 | int epoch; |
| 28 | } park; |
| 29 | |
| 30 | static void i915_globals_shrink(void) |
| 31 | { |
| 32 | struct i915_global *global; |
| 33 | |
| 34 | /* |
| 35 | * kmem_cache_shrink() discards empty slabs and reorders partially |
| 36 | * filled slabs to prioritise allocating from the mostly full slabs, |
| 37 | * with the aim of reducing fragmentation. |
| 38 | */ |
| 39 | list_for_each_entry(global, &globals, link) |
| 40 | global->shrink(); |
| 41 | } |
| 42 | |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 43 | static void __i915_globals_grace(struct rcu_head *rcu) |
| 44 | { |
| 45 | /* Ratelimit parking as shrinking is quite slow */ |
| 46 | schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ)); |
| 47 | } |
| 48 | |
| 49 | static void __i915_globals_queue_rcu(void) |
| 50 | { |
| 51 | park.epoch = atomic_inc_return(&epoch); |
| 52 | if (!atomic_read(&active)) { |
| 53 | init_rcu_head(&park.rcu); |
| 54 | call_rcu(&park.rcu, __i915_globals_grace); |
| 55 | } |
| 56 | } |
| 57 | |
Chris Wilson | da23379 | 2019-04-08 10:17:02 +0100 | [diff] [blame] | 58 | static void __i915_globals_park(struct work_struct *work) |
| 59 | { |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 60 | destroy_rcu_head(&park.rcu); |
| 61 | |
Chris Wilson | da23379 | 2019-04-08 10:17:02 +0100 | [diff] [blame] | 62 | /* Confirm nothing woke up in the last grace period */ |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 63 | if (park.epoch != atomic_read(&epoch)) { |
| 64 | __i915_globals_queue_rcu(); |
| 65 | return; |
| 66 | } |
| 67 | |
| 68 | clear_bit(PENDING, &park.flags); |
| 69 | i915_globals_shrink(); |
Chris Wilson | da23379 | 2019-04-08 10:17:02 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 72 | void __init i915_global_register(struct i915_global *global) |
| 73 | { |
| 74 | GEM_BUG_ON(!global->shrink); |
| 75 | GEM_BUG_ON(!global->exit); |
| 76 | |
| 77 | list_add_tail(&global->link, &globals); |
| 78 | } |
| 79 | |
| 80 | static void __i915_globals_cleanup(void) |
| 81 | { |
| 82 | struct i915_global *global, *next; |
| 83 | |
| 84 | list_for_each_entry_safe_reverse(global, next, &globals, link) |
| 85 | global->exit(); |
| 86 | } |
| 87 | |
| 88 | static __initconst int (* const initfn[])(void) = { |
| 89 | i915_global_active_init, |
Matthew Auld | 14d1b9a | 2019-08-09 21:29:24 +0100 | [diff] [blame] | 90 | i915_global_buddy_init, |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 91 | i915_global_context_init, |
Chris Wilson | c4d52fe | 2019-03-08 13:25:19 +0000 | [diff] [blame] | 92 | i915_global_gem_context_init, |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 93 | i915_global_objects_init, |
| 94 | i915_global_request_init, |
| 95 | i915_global_scheduler_init, |
| 96 | i915_global_vma_init, |
| 97 | }; |
| 98 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 99 | int __init i915_globals_init(void) |
| 100 | { |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 101 | int i; |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 102 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 103 | for (i = 0; i < ARRAY_SIZE(initfn); i++) { |
| 104 | int err; |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 105 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 106 | err = initfn[i](); |
| 107 | if (err) { |
| 108 | __i915_globals_cleanup(); |
| 109 | return err; |
| 110 | } |
| 111 | } |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 112 | |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 113 | INIT_DELAYED_WORK(&park.work, __i915_globals_park); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 114 | return 0; |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 115 | } |
| 116 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 117 | void i915_globals_park(void) |
| 118 | { |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 119 | /* |
| 120 | * Defer shrinking the global slab caches (and other work) until |
| 121 | * after a RCU grace period has completed with no activity. This |
| 122 | * is to try and reduce the latency impact on the consumers caused |
| 123 | * by us shrinking the caches the same time as they are trying to |
| 124 | * allocate, with the assumption being that if we idle long enough |
| 125 | * for an RCU grace period to elapse since the last use, it is likely |
| 126 | * to be longer until we need the caches again. |
| 127 | */ |
| 128 | if (!atomic_dec_and_test(&active)) |
| 129 | return; |
| 130 | |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 131 | /* Queue cleanup after the next RCU grace period has freed slabs */ |
| 132 | if (!test_and_set_bit(PENDING, &park.flags)) |
| 133 | __i915_globals_queue_rcu(); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | void i915_globals_unpark(void) |
| 137 | { |
| 138 | atomic_inc(&epoch); |
| 139 | atomic_inc(&active); |
| 140 | } |
| 141 | |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 142 | static void __exit __i915_globals_flush(void) |
| 143 | { |
| 144 | atomic_inc(&active); /* skip shrinking */ |
| 145 | |
| 146 | rcu_barrier(); /* wait for the work to be queued */ |
| 147 | flush_delayed_work(&park.work); |
| 148 | |
| 149 | atomic_dec(&active); |
| 150 | } |
| 151 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 152 | void __exit i915_globals_exit(void) |
| 153 | { |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 154 | GEM_BUG_ON(atomic_read(&active)); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 155 | |
Chris Wilson | 01f624f | 2019-12-18 09:40:57 +0000 | [diff] [blame] | 156 | __i915_globals_flush(); |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 157 | __i915_globals_cleanup(); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 158 | |
| 159 | /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ |
| 160 | rcu_barrier(); |
| 161 | } |