blob: 3aa2136842935a0343098fe210e54aec4a2d136b [file] [log] [blame]
Chris Wilson32eb6bc2019-02-28 10:20:33 +00001/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7#include <linux/slab.h>
8#include <linux/workqueue.h>
9
10#include "i915_active.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010011#include "gem/i915_gem_context.h"
Chris Wilson98932142019-05-28 10:29:44 +010012#include "gem/i915_gem_object.h"
Chris Wilson32eb6bc2019-02-28 10:20:33 +000013#include "i915_globals.h"
14#include "i915_request.h"
15#include "i915_scheduler.h"
Chris Wilson13f1bfd2019-02-28 10:20:34 +000016#include "i915_vma.h"
Chris Wilson32eb6bc2019-02-28 10:20:33 +000017
Chris Wilson103b76ee2019-03-05 21:38:30 +000018static LIST_HEAD(globals);
19
Chris Wilsonda233792019-04-08 10:17:02 +010020static atomic_t active;
21static atomic_t epoch;
22static struct park_work {
Chris Wilson01f624f2019-12-18 09:40:57 +000023 struct delayed_work work;
24 struct rcu_head rcu;
25 unsigned long flags;
26#define PENDING 0
Chris Wilsonda233792019-04-08 10:17:02 +010027 int epoch;
28} park;
29
30static void i915_globals_shrink(void)
31{
32 struct i915_global *global;
33
34 /*
35 * kmem_cache_shrink() discards empty slabs and reorders partially
36 * filled slabs to prioritise allocating from the mostly full slabs,
37 * with the aim of reducing fragmentation.
38 */
39 list_for_each_entry(global, &globals, link)
40 global->shrink();
41}
42
Chris Wilson01f624f2019-12-18 09:40:57 +000043static void __i915_globals_grace(struct rcu_head *rcu)
44{
45 /* Ratelimit parking as shrinking is quite slow */
46 schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
47}
48
49static void __i915_globals_queue_rcu(void)
50{
51 park.epoch = atomic_inc_return(&epoch);
52 if (!atomic_read(&active)) {
53 init_rcu_head(&park.rcu);
54 call_rcu(&park.rcu, __i915_globals_grace);
55 }
56}
57
Chris Wilsonda233792019-04-08 10:17:02 +010058static void __i915_globals_park(struct work_struct *work)
59{
Chris Wilson01f624f2019-12-18 09:40:57 +000060 destroy_rcu_head(&park.rcu);
61
Chris Wilsonda233792019-04-08 10:17:02 +010062 /* Confirm nothing woke up in the last grace period */
Chris Wilson01f624f2019-12-18 09:40:57 +000063 if (park.epoch != atomic_read(&epoch)) {
64 __i915_globals_queue_rcu();
65 return;
66 }
67
68 clear_bit(PENDING, &park.flags);
69 i915_globals_shrink();
Chris Wilsonda233792019-04-08 10:17:02 +010070}
71
Chris Wilson103b76ee2019-03-05 21:38:30 +000072void __init i915_global_register(struct i915_global *global)
73{
74 GEM_BUG_ON(!global->shrink);
75 GEM_BUG_ON(!global->exit);
76
77 list_add_tail(&global->link, &globals);
78}
79
80static void __i915_globals_cleanup(void)
81{
82 struct i915_global *global, *next;
83
84 list_for_each_entry_safe_reverse(global, next, &globals, link)
85 global->exit();
86}
87
88static __initconst int (* const initfn[])(void) = {
89 i915_global_active_init,
Matthew Auld14d1b9a2019-08-09 21:29:24 +010090 i915_global_buddy_init,
Chris Wilson103b76ee2019-03-05 21:38:30 +000091 i915_global_context_init,
Chris Wilsonc4d52fe2019-03-08 13:25:19 +000092 i915_global_gem_context_init,
Chris Wilson103b76ee2019-03-05 21:38:30 +000093 i915_global_objects_init,
94 i915_global_request_init,
95 i915_global_scheduler_init,
96 i915_global_vma_init,
97};
98
Chris Wilson32eb6bc2019-02-28 10:20:33 +000099int __init i915_globals_init(void)
100{
Chris Wilson103b76ee2019-03-05 21:38:30 +0000101 int i;
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000102
Chris Wilson103b76ee2019-03-05 21:38:30 +0000103 for (i = 0; i < ARRAY_SIZE(initfn); i++) {
104 int err;
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000105
Chris Wilson103b76ee2019-03-05 21:38:30 +0000106 err = initfn[i]();
107 if (err) {
108 __i915_globals_cleanup();
109 return err;
110 }
111 }
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000112
Chris Wilson01f624f2019-12-18 09:40:57 +0000113 INIT_DELAYED_WORK(&park.work, __i915_globals_park);
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000114 return 0;
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000115}
116
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000117void i915_globals_park(void)
118{
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000119 /*
120 * Defer shrinking the global slab caches (and other work) until
121 * after a RCU grace period has completed with no activity. This
122 * is to try and reduce the latency impact on the consumers caused
123 * by us shrinking the caches the same time as they are trying to
124 * allocate, with the assumption being that if we idle long enough
125 * for an RCU grace period to elapse since the last use, it is likely
126 * to be longer until we need the caches again.
127 */
128 if (!atomic_dec_and_test(&active))
129 return;
130
Chris Wilson01f624f2019-12-18 09:40:57 +0000131 /* Queue cleanup after the next RCU grace period has freed slabs */
132 if (!test_and_set_bit(PENDING, &park.flags))
133 __i915_globals_queue_rcu();
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000134}
135
136void i915_globals_unpark(void)
137{
138 atomic_inc(&epoch);
139 atomic_inc(&active);
140}
141
Chris Wilson01f624f2019-12-18 09:40:57 +0000142static void __exit __i915_globals_flush(void)
143{
144 atomic_inc(&active); /* skip shrinking */
145
146 rcu_barrier(); /* wait for the work to be queued */
147 flush_delayed_work(&park.work);
148
149 atomic_dec(&active);
150}
151
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000152void __exit i915_globals_exit(void)
153{
Chris Wilson01f624f2019-12-18 09:40:57 +0000154 GEM_BUG_ON(atomic_read(&active));
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000155
Chris Wilson01f624f2019-12-18 09:40:57 +0000156 __i915_globals_flush();
Chris Wilson103b76ee2019-03-05 21:38:30 +0000157 __i915_globals_cleanup();
Chris Wilson32eb6bc2019-02-28 10:20:33 +0000158
159 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
160 rcu_barrier();
161}