Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 2 | /* |
Rafael J. Wysocki | fe52de3 | 2020-02-12 00:35:39 +0100 | [diff] [blame] | 3 | * Power Management Quality of Service (PM QoS) support base. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 4 | * |
Rafael J. Wysocki | fe52de3 | 2020-02-12 00:35:39 +0100 | [diff] [blame] | 5 | * Copyright (C) 2020 Intel Corporation |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 6 | * |
Rafael J. Wysocki | fe52de3 | 2020-02-12 00:35:39 +0100 | [diff] [blame] | 7 | * Authors: |
| 8 | * Mark Gross <mgross@linux.intel.com> |
| 9 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 10 | * |
Rafael J. Wysocki | fe52de3 | 2020-02-12 00:35:39 +0100 | [diff] [blame] | 11 | * Provided here is an interface for specifying PM QoS dependencies. It allows |
| 12 | * entities depending on QoS constraints to register their requests which are |
| 13 | * aggregated as appropriate to produce effective constraints (target values) |
| 14 | * that can be monitored by entities needing to respect them, either by polling |
| 15 | * or through a built-in notification mechanism. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 16 | * |
Rafael J. Wysocki | fe52de3 | 2020-02-12 00:35:39 +0100 | [diff] [blame] | 17 | * In addition to the basic functionality, more specific interfaces for managing |
| 18 | * global CPU latency QoS requests and frequency QoS requests are provided. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 19 | */ |
| 20 | |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 21 | /*#define DEBUG*/ |
| 22 | |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 23 | #include <linux/pm_qos.h> |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 24 | #include <linux/sched.h> |
| 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/slab.h> |
| 27 | #include <linux/time.h> |
| 28 | #include <linux/fs.h> |
| 29 | #include <linux/device.h> |
| 30 | #include <linux/miscdevice.h> |
| 31 | #include <linux/string.h> |
| 32 | #include <linux/platform_device.h> |
| 33 | #include <linux/init.h> |
Rafael J. Wysocki | 0775a60 | 2011-05-27 00:05:23 +0200 | [diff] [blame] | 34 | #include <linux/kernel.h> |
Nishanth Menon | f5f4eda | 2014-12-05 11:19:08 -0600 | [diff] [blame] | 35 | #include <linux/debugfs.h> |
| 36 | #include <linux/seq_file.h> |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 37 | |
| 38 | #include <linux/uaccess.h> |
Paul Gortmaker | 6e5fdee | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 39 | #include <linux/export.h> |
Sahara | 247e9ee | 2013-06-21 11:12:28 +0900 | [diff] [blame] | 40 | #include <trace/events/power.h> |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 41 | |
| 42 | /* |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 43 | * locking rule: all changes to constraints or notifiers lists |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 44 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
| 45 | * held, taken with _irqsave. One lock to rule them all |
| 46 | */ |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 47 | static DEFINE_SPINLOCK(pm_qos_lock); |
| 48 | |
Rafael J. Wysocki | dcd70ca | 2020-02-11 23:58:39 +0100 | [diff] [blame] | 49 | /** |
| 50 | * pm_qos_read_value - Return the current effective constraint value. |
| 51 | * @c: List of PM QoS constraint requests. |
| 52 | */ |
| 53 | s32 pm_qos_read_value(struct pm_qos_constraints *c) |
| 54 | { |
Qian Cai | a534e92 | 2020-02-25 20:58:13 -0500 | [diff] [blame] | 55 | return READ_ONCE(c->target_value); |
Rafael J. Wysocki | dcd70ca | 2020-02-11 23:58:39 +0100 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static int pm_qos_get_value(struct pm_qos_constraints *c) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 59 | { |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 60 | if (plist_head_empty(&c->list)) |
Rafael J. Wysocki | 327adae | 2014-02-11 00:35:29 +0100 | [diff] [blame] | 61 | return c->no_constraint_value; |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 62 | |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 63 | switch (c->type) { |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 64 | case PM_QOS_MIN: |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 65 | return plist_first(&c->list)->prio; |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 66 | |
| 67 | case PM_QOS_MAX: |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 68 | return plist_last(&c->list)->prio; |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 69 | |
| 70 | default: |
Rafael J. Wysocki | dcd70ca | 2020-02-11 23:58:39 +0100 | [diff] [blame] | 71 | WARN(1, "Unknown PM QoS type in %s\n", __func__); |
Luis Gonzalez Fernandez | c6a57bf | 2012-09-07 21:35:21 +0200 | [diff] [blame] | 72 | return PM_QOS_DEFAULT_VALUE; |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 73 | } |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 74 | } |
| 75 | |
Rafael J. Wysocki | dcd70ca | 2020-02-11 23:58:39 +0100 | [diff] [blame] | 76 | static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) |
Tim Chen | 333c5ae | 2011-02-11 12:49:04 -0800 | [diff] [blame] | 77 | { |
Qian Cai | a534e92 | 2020-02-25 20:58:13 -0500 | [diff] [blame] | 78 | WRITE_ONCE(c->target_value, value); |
Tim Chen | 333c5ae | 2011-02-11 12:49:04 -0800 | [diff] [blame] | 79 | } |
| 80 | |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 81 | /** |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 82 | * pm_qos_update_target - Update a list of PM QoS constraint requests. |
| 83 | * @c: List of PM QoS requests. |
| 84 | * @node: Target list entry. |
| 85 | * @action: Action to carry out (add, update or remove). |
| 86 | * @value: New request value for the target list entry. |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 87 | * |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 88 | * Update the given list of PM QoS constraint requests, @c, by carrying an |
| 89 | * @action involving the @node list entry and @value on it. |
| 90 | * |
| 91 | * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node |
| 92 | * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store |
| 93 | * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove |
| 94 | * @node from the list, ignore @value). |
| 95 | * |
| 96 | * Return: 1 if the aggregate constraint value has changed, 0 otherwise. |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 97 | */ |
| 98 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
| 99 | enum pm_qos_req_action action, int value) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 100 | { |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 101 | int prev_value, curr_value, new_value; |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 102 | unsigned long flags; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 103 | |
| 104 | spin_lock_irqsave(&pm_qos_lock, flags); |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 105 | |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 106 | prev_value = pm_qos_get_value(c); |
| 107 | if (value == PM_QOS_DEFAULT_VALUE) |
| 108 | new_value = c->default_value; |
| 109 | else |
| 110 | new_value = value; |
| 111 | |
| 112 | switch (action) { |
| 113 | case PM_QOS_REMOVE_REQ: |
| 114 | plist_del(node, &c->list); |
| 115 | break; |
| 116 | case PM_QOS_UPDATE_REQ: |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 117 | /* |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 118 | * To change the list, atomically remove, reinit with new value |
| 119 | * and add, then see if the aggregate has changed. |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 120 | */ |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 121 | plist_del(node, &c->list); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 122 | fallthrough; |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 123 | case PM_QOS_ADD_REQ: |
| 124 | plist_node_init(node, new_value); |
| 125 | plist_add(node, &c->list); |
| 126 | break; |
| 127 | default: |
| 128 | /* no action */ |
| 129 | ; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 130 | } |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 131 | |
| 132 | curr_value = pm_qos_get_value(c); |
| 133 | pm_qos_set_value(c, curr_value); |
| 134 | |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 135 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
| 136 | |
Sahara | 247e9ee | 2013-06-21 11:12:28 +0900 | [diff] [blame] | 137 | trace_pm_qos_update_target(action, prev_value, curr_value); |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 138 | |
| 139 | if (prev_value == curr_value) |
| 140 | return 0; |
| 141 | |
| 142 | if (c->notifiers) |
| 143 | blocking_notifier_call_chain(c->notifiers, curr_value, NULL); |
| 144 | |
| 145 | return 1; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 146 | } |
| 147 | |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 148 | /** |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 149 | * pm_qos_flags_remove_req - Remove device PM QoS flags request. |
| 150 | * @pqf: Device PM QoS flags set to remove the request from. |
| 151 | * @req: Request to remove from the set. |
| 152 | */ |
| 153 | static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, |
| 154 | struct pm_qos_flags_request *req) |
| 155 | { |
| 156 | s32 val = 0; |
| 157 | |
| 158 | list_del(&req->node); |
| 159 | list_for_each_entry(req, &pqf->list, node) |
| 160 | val |= req->flags; |
| 161 | |
| 162 | pqf->effective_flags = val; |
| 163 | } |
| 164 | |
| 165 | /** |
| 166 | * pm_qos_update_flags - Update a set of PM QoS flags. |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 167 | * @pqf: Set of PM QoS flags to update. |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 168 | * @req: Request to add to the set, to modify, or to remove from the set. |
| 169 | * @action: Action to take on the set. |
| 170 | * @val: Value of the request to add or modify. |
| 171 | * |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 172 | * Return: 1 if the aggregate constraint value has changed, 0 otherwise. |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 173 | */ |
| 174 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, |
| 175 | struct pm_qos_flags_request *req, |
| 176 | enum pm_qos_req_action action, s32 val) |
| 177 | { |
| 178 | unsigned long irqflags; |
| 179 | s32 prev_value, curr_value; |
| 180 | |
| 181 | spin_lock_irqsave(&pm_qos_lock, irqflags); |
| 182 | |
| 183 | prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; |
| 184 | |
| 185 | switch (action) { |
| 186 | case PM_QOS_REMOVE_REQ: |
| 187 | pm_qos_flags_remove_req(pqf, req); |
| 188 | break; |
| 189 | case PM_QOS_UPDATE_REQ: |
| 190 | pm_qos_flags_remove_req(pqf, req); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 191 | fallthrough; |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 192 | case PM_QOS_ADD_REQ: |
| 193 | req->flags = val; |
| 194 | INIT_LIST_HEAD(&req->node); |
| 195 | list_add_tail(&req->node, &pqf->list); |
| 196 | pqf->effective_flags |= val; |
| 197 | break; |
| 198 | default: |
| 199 | /* no action */ |
| 200 | ; |
| 201 | } |
| 202 | |
| 203 | curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; |
| 204 | |
| 205 | spin_unlock_irqrestore(&pm_qos_lock, irqflags); |
| 206 | |
Sahara | 247e9ee | 2013-06-21 11:12:28 +0900 | [diff] [blame] | 207 | trace_pm_qos_update_flags(action, prev_value, curr_value); |
Rafael J. Wysocki | 7b35370 | 2020-02-11 23:58:33 +0100 | [diff] [blame] | 208 | |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 209 | return prev_value != curr_value; |
| 210 | } |
| 211 | |
Rafael J. Wysocki | 814d51f | 2020-02-12 00:37:11 +0100 | [diff] [blame] | 212 | #ifdef CONFIG_CPU_IDLE |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 213 | /* Definitions related to the CPU latency QoS. */ |
| 214 | |
| 215 | static struct pm_qos_constraints cpu_latency_constraints = { |
| 216 | .list = PLIST_HEAD_INIT(cpu_latency_constraints.list), |
| 217 | .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, |
| 218 | .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, |
| 219 | .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, |
| 220 | .type = PM_QOS_MIN, |
| 221 | }; |
| 222 | |
Rafael J. Wysocki | 5efbe42 | 2012-10-23 01:07:46 +0200 | [diff] [blame] | 223 | /** |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 224 | * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 225 | */ |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 226 | s32 cpu_latency_qos_limit(void) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 227 | { |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 228 | return pm_qos_read_value(&cpu_latency_constraints); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 229 | } |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 230 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 231 | /** |
| 232 | * cpu_latency_qos_request_active - Check the given PM QoS request. |
| 233 | * @req: PM QoS request to check. |
| 234 | * |
| 235 | * Return: 'true' if @req has been added to the CPU latency QoS list, 'false' |
| 236 | * otherwise. |
| 237 | */ |
| 238 | bool cpu_latency_qos_request_active(struct pm_qos_request *req) |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 239 | { |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 240 | return req->qos == &cpu_latency_constraints; |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 241 | } |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 242 | EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active); |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 243 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 244 | static void cpu_latency_qos_apply(struct pm_qos_request *req, |
| 245 | enum pm_qos_req_action action, s32 value) |
Rafael J. Wysocki | 3a4a004 | 2020-02-12 00:02:30 +0100 | [diff] [blame] | 246 | { |
| 247 | int ret = pm_qos_update_target(req->qos, &req->node, action, value); |
| 248 | if (ret > 0) |
| 249 | wake_up_all_idle_cpus(); |
| 250 | } |
| 251 | |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 252 | /** |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 253 | * cpu_latency_qos_add_request - Add new CPU latency QoS request. |
| 254 | * @req: Pointer to a preallocated handle. |
| 255 | * @value: Requested constraint value. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 256 | * |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 257 | * Use @value to initialize the request handle pointed to by @req, insert it as |
| 258 | * a new entry to the CPU latency QoS list and recompute the effective QoS |
| 259 | * constraint for that list. |
| 260 | * |
| 261 | * Callers need to save the handle for later use in updates and removal of the |
| 262 | * QoS request represented by it. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 263 | */ |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 264 | void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 265 | { |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 266 | if (!req) |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 267 | return; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 268 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 269 | if (cpu_latency_qos_request_active(req)) { |
| 270 | WARN(1, KERN_ERR "%s called for already added request\n", __func__); |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 271 | return; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 272 | } |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 273 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 274 | trace_pm_qos_add_request(value); |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 275 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 276 | req->qos = &cpu_latency_constraints; |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 277 | cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 278 | } |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 279 | EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 280 | |
| 281 | /** |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 282 | * cpu_latency_qos_update_request - Modify existing CPU latency QoS request. |
| 283 | * @req : QoS request to update. |
| 284 | * @new_value: New requested constraint value. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 285 | * |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 286 | * Use @new_value to update the QoS request represented by @req in the CPU |
| 287 | * latency QoS list along with updating the effective constraint value for that |
| 288 | * list. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 289 | */ |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 290 | void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 291 | { |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 292 | if (!req) |
James Bottomley | 5f27984 | 2010-07-19 02:00:18 +0200 | [diff] [blame] | 293 | return; |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 294 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 295 | if (!cpu_latency_qos_request_active(req)) { |
| 296 | WARN(1, KERN_ERR "%s called for unknown object\n", __func__); |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 297 | return; |
| 298 | } |
| 299 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 300 | trace_pm_qos_update_request(new_value); |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 301 | |
| 302 | if (new_value == req->node.prio) |
| 303 | return; |
| 304 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 305 | cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 306 | } |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 307 | EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 308 | |
| 309 | /** |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 310 | * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request. |
| 311 | * @req: QoS request to remove. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 312 | * |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 313 | * Remove the CPU latency QoS request represented by @req from the CPU latency |
| 314 | * QoS list along with updating the effective constraint value for that list. |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 315 | */ |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 316 | void cpu_latency_qos_remove_request(struct pm_qos_request *req) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 317 | { |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 318 | if (!req) |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 319 | return; |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 320 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 321 | if (!cpu_latency_qos_request_active(req)) { |
| 322 | WARN(1, KERN_ERR "%s called for unknown object\n", __func__); |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 323 | return; |
| 324 | } |
| 325 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 326 | trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE); |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 327 | |
Rafael J. Wysocki | 333eed7d | 2020-02-12 00:06:17 +0100 | [diff] [blame] | 328 | cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 329 | memset(req, 0, sizeof(*req)); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 330 | } |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 331 | EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 332 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 333 | /* User space interface to the CPU latency QoS via misc device. */ |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 334 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 335 | static int cpu_latency_qos_open(struct inode *inode, struct file *filp) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 336 | { |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 337 | struct pm_qos_request *req; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 338 | |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 339 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
| 340 | if (!req) |
| 341 | return -ENOMEM; |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 342 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 343 | cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE); |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 344 | filp->private_data = req; |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 345 | |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 346 | return 0; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 347 | } |
| 348 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 349 | static int cpu_latency_qos_release(struct inode *inode, struct file *filp) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 350 | { |
Rafael J. Wysocki | 299a229 | 2020-02-12 00:00:12 +0100 | [diff] [blame] | 351 | struct pm_qos_request *req = filp->private_data; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 352 | |
Rafael J. Wysocki | 299a229 | 2020-02-12 00:00:12 +0100 | [diff] [blame] | 353 | filp->private_data = NULL; |
| 354 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 355 | cpu_latency_qos_remove_request(req); |
James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 356 | kfree(req); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 357 | |
| 358 | return 0; |
| 359 | } |
| 360 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 361 | static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf, |
| 362 | size_t count, loff_t *f_pos) |
Thomas Renninger | f9b9e80 | 2011-02-28 22:06:34 +0100 | [diff] [blame] | 363 | { |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 364 | struct pm_qos_request *req = filp->private_data; |
Rafael J. Wysocki | 299a229 | 2020-02-12 00:00:12 +0100 | [diff] [blame] | 365 | unsigned long flags; |
| 366 | s32 value; |
Thomas Renninger | f9b9e80 | 2011-02-28 22:06:34 +0100 | [diff] [blame] | 367 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 368 | if (!req || !cpu_latency_qos_request_active(req)) |
Thomas Renninger | f9b9e80 | 2011-02-28 22:06:34 +0100 | [diff] [blame] | 369 | return -EINVAL; |
| 370 | |
Thomas Renninger | f9b9e80 | 2011-02-28 22:06:34 +0100 | [diff] [blame] | 371 | spin_lock_irqsave(&pm_qos_lock, flags); |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 372 | value = pm_qos_get_value(&cpu_latency_constraints); |
Thomas Renninger | f9b9e80 | 2011-02-28 22:06:34 +0100 | [diff] [blame] | 373 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
| 374 | |
| 375 | return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); |
| 376 | } |
| 377 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 378 | static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf, |
| 379 | size_t count, loff_t *f_pos) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 380 | { |
| 381 | s32 value; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 382 | |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 383 | if (count == sizeof(s32)) { |
| 384 | if (copy_from_user(&value, buf, sizeof(s32))) |
| 385 | return -EFAULT; |
Andy Shevchenko | d4f7ecf | 2013-09-11 17:02:38 +0300 | [diff] [blame] | 386 | } else { |
Rafael J. Wysocki | 0775a60 | 2011-05-27 00:05:23 +0200 | [diff] [blame] | 387 | int ret; |
| 388 | |
Andy Shevchenko | d4f7ecf | 2013-09-11 17:02:38 +0300 | [diff] [blame] | 389 | ret = kstrtos32_from_user(buf, count, 16, &value); |
| 390 | if (ret) |
| 391 | return ret; |
Rafael J. Wysocki | 0775a60 | 2011-05-27 00:05:23 +0200 | [diff] [blame] | 392 | } |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 393 | |
Rafael J. Wysocki | 67b06ba | 2020-02-12 00:35:04 +0100 | [diff] [blame] | 394 | cpu_latency_qos_update_request(filp->private_data, value); |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 395 | |
| 396 | return count; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 397 | } |
| 398 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 399 | static const struct file_operations cpu_latency_qos_fops = { |
| 400 | .write = cpu_latency_qos_write, |
| 401 | .read = cpu_latency_qos_read, |
| 402 | .open = cpu_latency_qos_open, |
| 403 | .release = cpu_latency_qos_release, |
Rafael J. Wysocki | 299a229 | 2020-02-12 00:00:12 +0100 | [diff] [blame] | 404 | .llseek = noop_llseek, |
| 405 | }; |
| 406 | |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 407 | static struct miscdevice cpu_latency_qos_miscdev = { |
| 408 | .minor = MISC_DYNAMIC_MINOR, |
| 409 | .name = "cpu_dma_latency", |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 410 | .fops = &cpu_latency_qos_fops, |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 411 | }; |
Rafael J. Wysocki | 299a229 | 2020-02-12 00:00:12 +0100 | [diff] [blame] | 412 | |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 413 | static int __init cpu_latency_qos_init(void) |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 414 | { |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 415 | int ret; |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 416 | |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 417 | ret = misc_register(&cpu_latency_qos_miscdev); |
Rafael J. Wysocki | 63cffc0 | 2020-02-11 23:59:22 +0100 | [diff] [blame] | 418 | if (ret < 0) |
| 419 | pr_err("%s: %s setup failed\n", __func__, |
Rafael J. Wysocki | 02c92a3 | 2020-02-12 00:01:18 +0100 | [diff] [blame] | 420 | cpu_latency_qos_miscdev.name); |
Mark Gross | d82b351 | 2008-02-04 22:30:08 -0800 | [diff] [blame] | 421 | |
| 422 | return ret; |
| 423 | } |
Rafael J. Wysocki | 2552d35 | 2020-02-12 00:04:31 +0100 | [diff] [blame] | 424 | late_initcall(cpu_latency_qos_init); |
Rafael J. Wysocki | 814d51f | 2020-02-12 00:37:11 +0100 | [diff] [blame] | 425 | #endif /* CONFIG_CPU_IDLE */ |
Rafael J. Wysocki | 77751a4 | 2019-10-16 12:41:24 +0200 | [diff] [blame] | 426 | |
| 427 | /* Definitions related to the frequency QoS below. */ |
| 428 | |
| 429 | /** |
| 430 | * freq_constraints_init - Initialize frequency QoS constraints. |
| 431 | * @qos: Frequency QoS constraints to initialize. |
| 432 | */ |
| 433 | void freq_constraints_init(struct freq_constraints *qos) |
| 434 | { |
| 435 | struct pm_qos_constraints *c; |
| 436 | |
| 437 | c = &qos->min_freq; |
| 438 | plist_head_init(&c->list); |
| 439 | c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE; |
| 440 | c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE; |
| 441 | c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE; |
| 442 | c->type = PM_QOS_MAX; |
| 443 | c->notifiers = &qos->min_freq_notifiers; |
| 444 | BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); |
| 445 | |
| 446 | c = &qos->max_freq; |
| 447 | plist_head_init(&c->list); |
| 448 | c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE; |
| 449 | c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE; |
| 450 | c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE; |
| 451 | c->type = PM_QOS_MIN; |
| 452 | c->notifiers = &qos->max_freq_notifiers; |
| 453 | BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); |
| 454 | } |
| 455 | |
| 456 | /** |
| 457 | * freq_qos_read_value - Get frequency QoS constraint for a given list. |
| 458 | * @qos: Constraints to evaluate. |
| 459 | * @type: QoS request type. |
| 460 | */ |
| 461 | s32 freq_qos_read_value(struct freq_constraints *qos, |
| 462 | enum freq_qos_req_type type) |
| 463 | { |
| 464 | s32 ret; |
| 465 | |
| 466 | switch (type) { |
| 467 | case FREQ_QOS_MIN: |
| 468 | ret = IS_ERR_OR_NULL(qos) ? |
| 469 | FREQ_QOS_MIN_DEFAULT_VALUE : |
| 470 | pm_qos_read_value(&qos->min_freq); |
| 471 | break; |
| 472 | case FREQ_QOS_MAX: |
| 473 | ret = IS_ERR_OR_NULL(qos) ? |
| 474 | FREQ_QOS_MAX_DEFAULT_VALUE : |
| 475 | pm_qos_read_value(&qos->max_freq); |
| 476 | break; |
| 477 | default: |
| 478 | WARN_ON(1); |
| 479 | ret = 0; |
| 480 | } |
| 481 | |
| 482 | return ret; |
| 483 | } |
| 484 | |
| 485 | /** |
| 486 | * freq_qos_apply - Add/modify/remove frequency QoS request. |
| 487 | * @req: Constraint request to apply. |
| 488 | * @action: Action to perform (add/update/remove). |
| 489 | * @value: Value to assign to the QoS request. |
Leonard Crestez | 36a8015 | 2019-11-26 17:17:13 +0200 | [diff] [blame] | 490 | * |
| 491 | * This is only meant to be called from inside pm_qos, not drivers. |
Rafael J. Wysocki | 77751a4 | 2019-10-16 12:41:24 +0200 | [diff] [blame] | 492 | */ |
Leonard Crestez | 36a8015 | 2019-11-26 17:17:13 +0200 | [diff] [blame] | 493 | int freq_qos_apply(struct freq_qos_request *req, |
Rafael J. Wysocki | 77751a4 | 2019-10-16 12:41:24 +0200 | [diff] [blame] | 494 | enum pm_qos_req_action action, s32 value) |
| 495 | { |
| 496 | int ret; |
| 497 | |
| 498 | switch(req->type) { |
| 499 | case FREQ_QOS_MIN: |
| 500 | ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode, |
| 501 | action, value); |
| 502 | break; |
| 503 | case FREQ_QOS_MAX: |
| 504 | ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode, |
| 505 | action, value); |
| 506 | break; |
| 507 | default: |
| 508 | ret = -EINVAL; |
| 509 | } |
| 510 | |
| 511 | return ret; |
| 512 | } |
| 513 | |
| 514 | /** |
| 515 | * freq_qos_add_request - Insert new frequency QoS request into a given list. |
| 516 | * @qos: Constraints to update. |
| 517 | * @req: Preallocated request object. |
| 518 | * @type: Request type. |
| 519 | * @value: Request value. |
| 520 | * |
| 521 | * Insert a new entry into the @qos list of requests, recompute the effective |
| 522 | * QoS constraint value for that list and initialize the @req object. The |
| 523 | * caller needs to save that object for later use in updates and removal. |
| 524 | * |
| 525 | * Return 1 if the effective constraint value has changed, 0 if the effective |
| 526 | * constraint value has not changed, or a negative error code on failures. |
| 527 | */ |
| 528 | int freq_qos_add_request(struct freq_constraints *qos, |
| 529 | struct freq_qos_request *req, |
| 530 | enum freq_qos_req_type type, s32 value) |
| 531 | { |
| 532 | int ret; |
| 533 | |
| 534 | if (IS_ERR_OR_NULL(qos) || !req) |
| 535 | return -EINVAL; |
| 536 | |
| 537 | if (WARN(freq_qos_request_active(req), |
| 538 | "%s() called for active request\n", __func__)) |
| 539 | return -EINVAL; |
| 540 | |
| 541 | req->qos = qos; |
| 542 | req->type = type; |
| 543 | ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value); |
| 544 | if (ret < 0) { |
| 545 | req->qos = NULL; |
| 546 | req->type = 0; |
| 547 | } |
| 548 | |
| 549 | return ret; |
| 550 | } |
| 551 | EXPORT_SYMBOL_GPL(freq_qos_add_request); |
| 552 | |
| 553 | /** |
| 554 | * freq_qos_update_request - Modify existing frequency QoS request. |
| 555 | * @req: Request to modify. |
| 556 | * @new_value: New request value. |
| 557 | * |
| 558 | * Update an existing frequency QoS request along with the effective constraint |
| 559 | * value for the list of requests it belongs to. |
| 560 | * |
| 561 | * Return 1 if the effective constraint value has changed, 0 if the effective |
| 562 | * constraint value has not changed, or a negative error code on failures. |
| 563 | */ |
| 564 | int freq_qos_update_request(struct freq_qos_request *req, s32 new_value) |
| 565 | { |
| 566 | if (!req) |
| 567 | return -EINVAL; |
| 568 | |
| 569 | if (WARN(!freq_qos_request_active(req), |
| 570 | "%s() called for unknown object\n", __func__)) |
| 571 | return -EINVAL; |
| 572 | |
| 573 | if (req->pnode.prio == new_value) |
| 574 | return 0; |
| 575 | |
| 576 | return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value); |
| 577 | } |
| 578 | EXPORT_SYMBOL_GPL(freq_qos_update_request); |
| 579 | |
| 580 | /** |
| 581 | * freq_qos_remove_request - Remove frequency QoS request from its list. |
| 582 | * @req: Request to remove. |
| 583 | * |
| 584 | * Remove the given frequency QoS request from the list of constraints it |
| 585 | * belongs to and recompute the effective constraint value for that list. |
| 586 | * |
| 587 | * Return 1 if the effective constraint value has changed, 0 if the effective |
| 588 | * constraint value has not changed, or a negative error code on failures. |
| 589 | */ |
| 590 | int freq_qos_remove_request(struct freq_qos_request *req) |
| 591 | { |
Rafael J. Wysocki | 05ff1ba | 2019-11-20 10:33:34 +0100 | [diff] [blame] | 592 | int ret; |
| 593 | |
Rafael J. Wysocki | 77751a4 | 2019-10-16 12:41:24 +0200 | [diff] [blame] | 594 | if (!req) |
| 595 | return -EINVAL; |
| 596 | |
| 597 | if (WARN(!freq_qos_request_active(req), |
| 598 | "%s() called for unknown object\n", __func__)) |
| 599 | return -EINVAL; |
| 600 | |
Rafael J. Wysocki | 05ff1ba | 2019-11-20 10:33:34 +0100 | [diff] [blame] | 601 | ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); |
| 602 | req->qos = NULL; |
| 603 | req->type = 0; |
| 604 | |
| 605 | return ret; |
Rafael J. Wysocki | 77751a4 | 2019-10-16 12:41:24 +0200 | [diff] [blame] | 606 | } |
| 607 | EXPORT_SYMBOL_GPL(freq_qos_remove_request); |
| 608 | |
| 609 | /** |
| 610 | * freq_qos_add_notifier - Add frequency QoS change notifier. |
| 611 | * @qos: List of requests to add the notifier to. |
| 612 | * @type: Request type. |
| 613 | * @notifier: Notifier block to add. |
| 614 | */ |
| 615 | int freq_qos_add_notifier(struct freq_constraints *qos, |
| 616 | enum freq_qos_req_type type, |
| 617 | struct notifier_block *notifier) |
| 618 | { |
| 619 | int ret; |
| 620 | |
| 621 | if (IS_ERR_OR_NULL(qos) || !notifier) |
| 622 | return -EINVAL; |
| 623 | |
| 624 | switch (type) { |
| 625 | case FREQ_QOS_MIN: |
| 626 | ret = blocking_notifier_chain_register(qos->min_freq.notifiers, |
| 627 | notifier); |
| 628 | break; |
| 629 | case FREQ_QOS_MAX: |
| 630 | ret = blocking_notifier_chain_register(qos->max_freq.notifiers, |
| 631 | notifier); |
| 632 | break; |
| 633 | default: |
| 634 | WARN_ON(1); |
| 635 | ret = -EINVAL; |
| 636 | } |
| 637 | |
| 638 | return ret; |
| 639 | } |
| 640 | EXPORT_SYMBOL_GPL(freq_qos_add_notifier); |
| 641 | |
| 642 | /** |
| 643 | * freq_qos_remove_notifier - Remove frequency QoS change notifier. |
| 644 | * @qos: List of requests to remove the notifier from. |
| 645 | * @type: Request type. |
| 646 | * @notifier: Notifier block to remove. |
| 647 | */ |
| 648 | int freq_qos_remove_notifier(struct freq_constraints *qos, |
| 649 | enum freq_qos_req_type type, |
| 650 | struct notifier_block *notifier) |
| 651 | { |
| 652 | int ret; |
| 653 | |
| 654 | if (IS_ERR_OR_NULL(qos) || !notifier) |
| 655 | return -EINVAL; |
| 656 | |
| 657 | switch (type) { |
| 658 | case FREQ_QOS_MIN: |
| 659 | ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers, |
| 660 | notifier); |
| 661 | break; |
| 662 | case FREQ_QOS_MAX: |
| 663 | ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers, |
| 664 | notifier); |
| 665 | break; |
| 666 | default: |
| 667 | WARN_ON(1); |
| 668 | ret = -EINVAL; |
| 669 | } |
| 670 | |
| 671 | return ret; |
| 672 | } |
| 673 | EXPORT_SYMBOL_GPL(freq_qos_remove_notifier); |