blob: f3fdfda5e5588d8a040eebb570f32bd044cb9c77 [file] [log] [blame]
Ben Widawsky0136db52012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db52012-04-10 21:17:01 -070033#include "i915_drv.h"
34
David Weinehall694c2822016-08-22 13:32:43 +030035static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
David Weinehallc49d13e2016-08-22 13:32:42 +030036{
David Weinehall694c2822016-08-22 13:32:43 +030037 struct drm_minor *minor = dev_get_drvdata(kdev);
38 return to_i915(minor->dev);
David Weinehallc49d13e2016-08-22 13:32:42 +030039}
Dave Airlie14c8d1102013-10-11 14:45:30 +100040
Hunt Xu5ab36332012-07-01 03:45:07 +000041#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +030042static u32 calc_residency(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +020043 i915_reg_t reg)
Ben Widawsky0136db52012-04-10 21:17:01 -070044{
Mika Kuoppalac5a0ad12017-03-15 17:43:00 +020045 return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
46 1000);
Ben Widawsky0136db52012-04-10 21:17:01 -070047}
48
49static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070050show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070051{
Chris Wilsondc979972016-05-10 14:10:04 +010052 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
Ben Widawsky0136db52012-04-10 21:17:01 -070053}
54
55static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070056show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070057{
David Weinehall694c2822016-08-22 13:32:43 +030058 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
59 u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +020060 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070061}
62
63static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070064show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070065{
David Weinehall694c2822016-08-22 13:32:43 +030066 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
67 u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
Jani Nikula3e2a1552013-02-14 10:42:11 +020068 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070069}
70
71static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070072show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070073{
David Weinehall694c2822016-08-22 13:32:43 +030074 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
75 u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
Jani Nikula3e2a1552013-02-14 10:42:11 +020076 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070077}
78
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053079static ssize_t
80show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
81{
David Weinehall694c2822016-08-22 13:32:43 +030082 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
83 u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053084 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
85}
86
Ben Widawsky0136db52012-04-10 21:17:01 -070087static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
88static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
89static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
90static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053091static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
Ben Widawsky0136db52012-04-10 21:17:01 -070092
93static struct attribute *rc6_attrs[] = {
94 &dev_attr_rc6_enable.attr,
95 &dev_attr_rc6_residency_ms.attr,
Ben Widawsky0136db52012-04-10 21:17:01 -070096 NULL
97};
98
99static struct attribute_group rc6_attr_group = {
100 .name = power_group_name,
101 .attrs = rc6_attrs
102};
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700103
104static struct attribute *rc6p_attrs[] = {
105 &dev_attr_rc6p_residency_ms.attr,
106 &dev_attr_rc6pp_residency_ms.attr,
107 NULL
108};
109
110static struct attribute_group rc6p_attr_group = {
111 .name = power_group_name,
112 .attrs = rc6p_attrs
113};
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530114
115static struct attribute *media_rc6_attrs[] = {
116 &dev_attr_media_rc6_residency_ms.attr,
117 NULL
118};
119
120static struct attribute_group media_rc6_attr_group = {
121 .name = power_group_name,
122 .attrs = media_rc6_attrs
123};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700124#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700125
David Weinehall694c2822016-08-22 13:32:43 +0300126static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
Ben Widawsky84bc7582012-05-25 16:56:25 -0700127{
David Weinehall694c2822016-08-22 13:32:43 +0300128 if (!HAS_L3_DPF(dev_priv))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700129 return -EPERM;
130
131 if (offset % 4 != 0)
132 return -EINVAL;
133
134 if (offset >= GEN7_L3LOG_SIZE)
135 return -ENXIO;
136
137 return 0;
138}
139
140static ssize_t
141i915_l3_read(struct file *filp, struct kobject *kobj,
142 struct bin_attribute *attr, char *buf,
143 loff_t offset, size_t count)
144{
David Weinehallc49d13e2016-08-22 13:32:42 +0300145 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300146 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
147 struct drm_device *dev = &dev_priv->drm;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700148 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700149 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700150
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700151 count = round_down(count, 4);
152
David Weinehall694c2822016-08-22 13:32:43 +0300153 ret = l3_access_valid(dev_priv, offset);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700154 if (ret)
155 return ret;
156
Dan Carpentere5ad4022013-09-20 14:20:18 +0300157 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700158
David Weinehallc49d13e2016-08-22 13:32:42 +0300159 ret = i915_mutex_lock_interruptible(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700160 if (ret)
161 return ret;
162
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700163 if (dev_priv->l3_parity.remap_info[slice])
164 memcpy(buf,
165 dev_priv->l3_parity.remap_info[slice] + (offset/4),
166 count);
167 else
168 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700169
David Weinehallc49d13e2016-08-22 13:32:42 +0300170 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700171
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700172 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700173}
174
175static ssize_t
176i915_l3_write(struct file *filp, struct kobject *kobj,
177 struct bin_attribute *attr, char *buf,
178 loff_t offset, size_t count)
179{
David Weinehallc49d13e2016-08-22 13:32:42 +0300180 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300181 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
182 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +0100183 struct i915_gem_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700184 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700185 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700186 int ret;
187
David Weinehall694c2822016-08-22 13:32:43 +0300188 if (!HAS_HW_CONTEXTS(dev_priv))
Ben Widawsky8245be32013-11-06 13:56:29 -0200189 return -ENXIO;
190
David Weinehall694c2822016-08-22 13:32:43 +0300191 ret = l3_access_valid(dev_priv, offset);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700192 if (ret)
193 return ret;
194
David Weinehallc49d13e2016-08-22 13:32:42 +0300195 ret = i915_mutex_lock_interruptible(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700196 if (ret)
197 return ret;
198
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700199 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700200 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
201 if (!temp) {
David Weinehallc49d13e2016-08-22 13:32:42 +0300202 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700203 return -ENOMEM;
204 }
205 }
206
Ben Widawsky84bc7582012-05-25 16:56:25 -0700207 /* TODO: Ideally we really want a GPU reset here to make sure errors
208 * aren't propagated. Since I cannot find a stable way to reset the GPU
209 * at this point it is left as a TODO.
210 */
211 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700212 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700213
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700214 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700215
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700216 /* NB: We defer the remapping until we switch to the context */
217 list_for_each_entry(ctx, &dev_priv->context_list, link)
218 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700219
David Weinehallc49d13e2016-08-22 13:32:42 +0300220 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700221
222 return count;
223}
224
225static struct bin_attribute dpf_attrs = {
226 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
227 .size = GEN7_L3LOG_SIZE,
228 .read = i915_l3_read,
229 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700230 .mmap = NULL,
231 .private = (void *)0
232};
233
234static struct bin_attribute dpf_attrs_1 = {
235 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
236 .size = GEN7_L3LOG_SIZE,
237 .read = i915_l3_read,
238 .write = i915_l3_write,
239 .mmap = NULL,
240 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700241};
242
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200243static ssize_t gt_act_freq_mhz_show(struct device *kdev,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700244 struct device_attribute *attr, char *buf)
245{
David Weinehall694c2822016-08-22 13:32:43 +0300246 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700247 int ret;
248
Imre Deakd46c0512014-04-14 20:24:27 +0300249 intel_runtime_pm_get(dev_priv);
250
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700251 mutex_lock(&dev_priv->rps.hw_lock);
Wayne Boyer666a4532015-12-09 12:29:35 -0800252 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes177006a2013-05-02 10:48:07 -0700253 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300254 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200255 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700256 } else {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200257 u32 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeled64d662015-03-06 11:07:22 +0530258 if (IS_GEN9(dev_priv))
259 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
260 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200261 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
262 else
263 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200264 ret = intel_gpu_freq(dev_priv, ret);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200265 }
266 mutex_unlock(&dev_priv->rps.hw_lock);
267
268 intel_runtime_pm_put(dev_priv);
269
270 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
271}
272
273static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
274 struct device_attribute *attr, char *buf)
275{
David Weinehall694c2822016-08-22 13:32:43 +0300276 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200277
Chris Wilson62e1baa2016-07-13 09:10:36 +0100278 return snprintf(buf, PAGE_SIZE, "%d\n",
279 intel_gpu_freq(dev_priv,
280 dev_priv->rps.cur_freq));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700281}
282
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100283static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
284{
David Weinehall694c2822016-08-22 13:32:43 +0300285 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100286
287 return snprintf(buf, PAGE_SIZE, "%d\n",
Chris Wilson62e1baa2016-07-13 09:10:36 +0100288 intel_gpu_freq(dev_priv,
289 dev_priv->rps.boost_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100290}
291
292static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
293 struct device_attribute *attr,
294 const char *buf, size_t count)
295{
David Weinehall694c2822016-08-22 13:32:43 +0300296 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100297 u32 val;
298 ssize_t ret;
299
300 ret = kstrtou32(buf, 0, &val);
301 if (ret)
302 return ret;
303
304 /* Validate against (static) hardware limits */
305 val = intel_freq_opcode(dev_priv, val);
306 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
307 return -EINVAL;
308
309 mutex_lock(&dev_priv->rps.hw_lock);
310 dev_priv->rps.boost_freq = val;
311 mutex_unlock(&dev_priv->rps.hw_lock);
312
313 return count;
314}
315
Chris Wilson97e4eed2013-08-26 16:18:54 +0100316static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
317 struct device_attribute *attr, char *buf)
318{
David Weinehall694c2822016-08-22 13:32:43 +0300319 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100320
Chris Wilson62e1baa2016-07-13 09:10:36 +0100321 return snprintf(buf, PAGE_SIZE, "%d\n",
322 intel_gpu_freq(dev_priv,
323 dev_priv->rps.efficient_freq));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100324}
325
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700326static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
327{
David Weinehall694c2822016-08-22 13:32:43 +0300328 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700329
Chris Wilson62e1baa2016-07-13 09:10:36 +0100330 return snprintf(buf, PAGE_SIZE, "%d\n",
331 intel_gpu_freq(dev_priv,
332 dev_priv->rps.max_freq_softlimit));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700333}
334
Ben Widawsky46ddf192012-09-12 18:12:07 -0700335static ssize_t gt_max_freq_mhz_store(struct device *kdev,
336 struct device_attribute *attr,
337 const char *buf, size_t count)
338{
David Weinehall694c2822016-08-22 13:32:43 +0300339 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700340 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700341 ssize_t ret;
342
343 ret = kstrtou32(buf, 0, &val);
344 if (ret)
345 return ret;
346
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530347 intel_runtime_pm_get(dev_priv);
348
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700349 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700350
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200351 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700352
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700353 if (val < dev_priv->rps.min_freq ||
354 val > dev_priv->rps.max_freq ||
Ben Widawskyb39fb292014-03-19 18:31:11 -0700355 val < dev_priv->rps.min_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700356 mutex_unlock(&dev_priv->rps.hw_lock);
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530357 intel_runtime_pm_put(dev_priv);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700358 return -EINVAL;
359 }
360
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700361 if (val > dev_priv->rps.rp0_freq)
Ben Widawsky31c77382013-04-05 14:29:22 -0700362 DRM_DEBUG("User requested overclocking to %d\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200363 intel_gpu_freq(dev_priv, val));
Ben Widawsky31c77382013-04-05 14:29:22 -0700364
Ben Widawskyb39fb292014-03-19 18:31:11 -0700365 dev_priv->rps.max_freq_softlimit = val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700366
Ville Syrjäläf745a802015-01-23 21:04:23 +0200367 val = clamp_t(int, dev_priv->rps.cur_freq,
368 dev_priv->rps.min_freq_softlimit,
369 dev_priv->rps.max_freq_softlimit);
370
371 /* We still need *_set_rps to process the new max_delay and
372 * update the interrupt limits and PMINTRMSK even though
373 * frequency request may be unchanged. */
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000374 ret = intel_set_rps(dev_priv, val);
Chris Wilson6917c7b2013-11-06 13:56:26 -0200375
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700376 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700377
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530378 intel_runtime_pm_put(dev_priv);
379
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000380 return ret ?: count;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700381}
382
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700383static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
384{
David Weinehall694c2822016-08-22 13:32:43 +0300385 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700386
Chris Wilson62e1baa2016-07-13 09:10:36 +0100387 return snprintf(buf, PAGE_SIZE, "%d\n",
388 intel_gpu_freq(dev_priv,
389 dev_priv->rps.min_freq_softlimit));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700390}
391
Ben Widawsky46ddf192012-09-12 18:12:07 -0700392static ssize_t gt_min_freq_mhz_store(struct device *kdev,
393 struct device_attribute *attr,
394 const char *buf, size_t count)
395{
David Weinehall694c2822016-08-22 13:32:43 +0300396 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700397 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700398 ssize_t ret;
399
400 ret = kstrtou32(buf, 0, &val);
401 if (ret)
402 return ret;
403
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530404 intel_runtime_pm_get(dev_priv);
405
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700406 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700407
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200408 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700409
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700410 if (val < dev_priv->rps.min_freq ||
411 val > dev_priv->rps.max_freq ||
412 val > dev_priv->rps.max_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700413 mutex_unlock(&dev_priv->rps.hw_lock);
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530414 intel_runtime_pm_put(dev_priv);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700415 return -EINVAL;
416 }
417
Ben Widawskyb39fb292014-03-19 18:31:11 -0700418 dev_priv->rps.min_freq_softlimit = val;
Chris Wilson6917c7b2013-11-06 13:56:26 -0200419
Ville Syrjäläf745a802015-01-23 21:04:23 +0200420 val = clamp_t(int, dev_priv->rps.cur_freq,
421 dev_priv->rps.min_freq_softlimit,
422 dev_priv->rps.max_freq_softlimit);
423
424 /* We still need *_set_rps to process the new min_delay and
425 * update the interrupt limits and PMINTRMSK even though
426 * frequency request may be unchanged. */
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000427 ret = intel_set_rps(dev_priv, val);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700428
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700429 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700430
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530431 intel_runtime_pm_put(dev_priv);
432
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000433 return ret ?: count;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700434}
435
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200436static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700437static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Mika Kuoppala73a79872016-12-14 14:26:20 +0200438static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700439static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
440static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700441
Chris Wilson97e4eed2013-08-26 16:18:54 +0100442static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700443
444static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
445static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
446static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
447static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
448
449/* For now we have a static number of RP states */
450static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
451{
David Weinehall694c2822016-08-22 13:32:43 +0300452 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Akash Goelbc4d91f2015-02-26 16:09:47 +0530453 u32 val;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700454
Akash Goelbc4d91f2015-02-26 16:09:47 +0530455 if (attr == &dev_attr_gt_RP0_freq_mhz)
456 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
457 else if (attr == &dev_attr_gt_RP1_freq_mhz)
458 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
459 else if (attr == &dev_attr_gt_RPn_freq_mhz)
460 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
461 else
Ben Widawskyac6ae342012-09-07 19:43:44 -0700462 BUG();
Akash Goelbc4d91f2015-02-26 16:09:47 +0530463
Jani Nikula3e2a1552013-02-14 10:42:11 +0200464 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700465}
466
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700467static const struct attribute *gen6_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200468 &dev_attr_gt_act_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700469 &dev_attr_gt_cur_freq_mhz.attr,
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100470 &dev_attr_gt_boost_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700471 &dev_attr_gt_max_freq_mhz.attr,
472 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700473 &dev_attr_gt_RP0_freq_mhz.attr,
474 &dev_attr_gt_RP1_freq_mhz.attr,
475 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700476 NULL,
477};
478
Chris Wilson97e4eed2013-08-26 16:18:54 +0100479static const struct attribute *vlv_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200480 &dev_attr_gt_act_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100481 &dev_attr_gt_cur_freq_mhz.attr,
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100482 &dev_attr_gt_boost_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100483 &dev_attr_gt_max_freq_mhz.attr,
484 &dev_attr_gt_min_freq_mhz.attr,
Deepak S74c4f622014-07-10 13:16:22 +0530485 &dev_attr_gt_RP0_freq_mhz.attr,
486 &dev_attr_gt_RP1_freq_mhz.attr,
487 &dev_attr_gt_RPn_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100488 &dev_attr_vlv_rpe_freq_mhz.attr,
489 NULL,
490};
491
Chris Wilson98a2f412016-10-12 10:05:18 +0100492#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
493
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300494static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
495 struct bin_attribute *attr, char *buf,
496 loff_t off, size_t count)
497{
498
Geliang Tang657fb5f2016-01-13 22:48:40 +0800499 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300500 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300501 struct drm_i915_error_state_buf error_str;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000502 struct i915_gpu_state *gpu;
503 ssize_t ret;
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300504
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000505 ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300506 if (ret)
507 return ret;
508
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000509 gpu = i915_first_error_state(dev_priv);
510 ret = i915_error_state_to_str(&error_str, gpu);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300511 if (ret)
512 goto out;
513
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000514 ret = count < error_str.bytes ? count : error_str.bytes;
515 memcpy(buf, error_str.buf, ret);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300516
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300517out:
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000518 i915_gpu_state_put(gpu);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300519 i915_error_state_buf_release(&error_str);
520
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000521 return ret;
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300522}
523
524static ssize_t error_state_write(struct file *file, struct kobject *kobj,
525 struct bin_attribute *attr, char *buf,
526 loff_t off, size_t count)
527{
Geliang Tang657fb5f2016-01-13 22:48:40 +0800528 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300529 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300530
531 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000532 i915_reset_error_state(dev_priv);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300533
534 return count;
535}
536
537static struct bin_attribute error_state_attr = {
538 .attr.name = "error",
539 .attr.mode = S_IRUSR | S_IWUSR,
540 .size = 0,
541 .read = error_state_read,
542 .write = error_state_write,
543};
544
Chris Wilson98a2f412016-10-12 10:05:18 +0100545static void i915_setup_error_capture(struct device *kdev)
546{
547 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
548 DRM_ERROR("error_state sysfs setup failed\n");
549}
550
551static void i915_teardown_error_capture(struct device *kdev)
552{
553 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
554}
555#else
556static void i915_setup_error_capture(struct device *kdev) {}
557static void i915_teardown_error_capture(struct device *kdev) {}
558#endif
559
David Weinehall694c2822016-08-22 13:32:43 +0300560void i915_setup_sysfs(struct drm_i915_private *dev_priv)
Ben Widawsky0136db52012-04-10 21:17:01 -0700561{
David Weinehall694c2822016-08-22 13:32:43 +0300562 struct device *kdev = dev_priv->drm.primary->kdev;
Ben Widawsky0136db52012-04-10 21:17:01 -0700563 int ret;
564
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700565#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +0300566 if (HAS_RC6(dev_priv)) {
567 ret = sysfs_merge_group(&kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200568 &rc6_attr_group);
569 if (ret)
570 DRM_ERROR("RC6 residency sysfs setup failed\n");
571 }
David Weinehall694c2822016-08-22 13:32:43 +0300572 if (HAS_RC6p(dev_priv)) {
573 ret = sysfs_merge_group(&kdev->kobj,
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700574 &rc6p_attr_group);
575 if (ret)
576 DRM_ERROR("RC6p residency sysfs setup failed\n");
577 }
David Weinehall694c2822016-08-22 13:32:43 +0300578 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
579 ret = sysfs_merge_group(&kdev->kobj,
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530580 &media_rc6_attr_group);
581 if (ret)
582 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
583 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700584#endif
David Weinehall694c2822016-08-22 13:32:43 +0300585 if (HAS_L3_DPF(dev_priv)) {
586 ret = device_create_bin_file(kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200587 if (ret)
588 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700589
David Weinehall694c2822016-08-22 13:32:43 +0300590 if (NUM_L3_SLICES(dev_priv) > 1) {
591 ret = device_create_bin_file(kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700592 &dpf_attrs_1);
593 if (ret)
594 DRM_ERROR("l3 parity slice 1 setup failed\n");
595 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200596 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700597
Chris Wilson97e4eed2013-08-26 16:18:54 +0100598 ret = 0;
David Weinehall694c2822016-08-22 13:32:43 +0300599 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
600 ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
601 else if (INTEL_GEN(dev_priv) >= 6)
602 ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100603 if (ret)
604 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300605
Chris Wilson98a2f412016-10-12 10:05:18 +0100606 i915_setup_error_capture(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -0700607}
608
David Weinehall694c2822016-08-22 13:32:43 +0300609void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
Ben Widawsky0136db52012-04-10 21:17:01 -0700610{
David Weinehall694c2822016-08-22 13:32:43 +0300611 struct device *kdev = dev_priv->drm.primary->kdev;
612
Chris Wilson98a2f412016-10-12 10:05:18 +0100613 i915_teardown_error_capture(kdev);
614
David Weinehall694c2822016-08-22 13:32:43 +0300615 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
616 sysfs_remove_files(&kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100617 else
David Weinehall694c2822016-08-22 13:32:43 +0300618 sysfs_remove_files(&kdev->kobj, gen6_attrs);
619 device_remove_bin_file(kdev, &dpf_attrs_1);
620 device_remove_bin_file(kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700621#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +0300622 sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
623 sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700624#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700625}