blob: 74086eb5bf8351f97f6b645aa5d41818e92aeba1 [file] [log] [blame]
Ben Widawsky0136db52012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db52012-04-10 21:17:01 -070033#include "i915_drv.h"
34
Dave Airlie5bdebb12013-10-11 14:07:25 +100035#define dev_to_drm_minor(d) dev_get_drvdata((d))
Dave Airlie14c8d1102013-10-11 14:45:30 +100036
Hunt Xu5ab36332012-07-01 03:45:07 +000037#ifdef CONFIG_PM
Ben Widawsky0136db52012-04-10 21:17:01 -070038static u32 calc_residency(struct drm_device *dev, const u32 reg)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */
Ville Syrjälä2cc9fab2015-09-28 23:43:43 +030042 u64 units = 128ULL, div = 100000ULL;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020043 u32 ret;
Ben Widawsky0136db52012-04-10 21:17:01 -070044
45 if (!intel_enable_rc6(dev))
46 return 0;
47
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020048 intel_runtime_pm_get(dev_priv);
49
Mika Kuoppala542a6b22014-07-09 14:55:56 +030050 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
Jesse Barnese454a052013-09-26 17:55:58 -070051 if (IS_VALLEYVIEW(dev)) {
Ville Syrjälä2cc9fab2015-09-28 23:43:43 +030052 units = 1;
53 div = dev_priv->czclk_freq;
Mika Kuoppala542a6b22014-07-09 14:55:56 +030054
Jesse Barnese454a052013-09-26 17:55:58 -070055 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
56 units <<= 8;
Jesse Barnese454a052013-09-26 17:55:58 -070057 }
58
59 raw_time = I915_READ(reg) * units;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020060 ret = DIV_ROUND_UP_ULL(raw_time, div);
61
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020062 intel_runtime_pm_put(dev_priv);
63 return ret;
Ben Widawsky0136db52012-04-10 21:17:01 -070064}
65
66static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070067show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070068{
Dave Airlie14c8d1102013-10-11 14:45:30 +100069 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Jani Nikula3e2a1552013-02-14 10:42:11 +020070 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
Ben Widawsky0136db52012-04-10 21:17:01 -070071}
72
73static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070074show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070075{
Dave Airlie5bdebb12013-10-11 14:07:25 +100076 struct drm_minor *dminor = dev_get_drvdata(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070077 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +020078 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070079}
80
81static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070082show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070083{
Dave Airlie14c8d1102013-10-11 14:45:30 +100084 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070085 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
Jani Nikula3e2a1552013-02-14 10:42:11 +020086 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070087}
88
89static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070090show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070091{
Dave Airlie14c8d1102013-10-11 14:45:30 +100092 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070093 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
Jani Nikula3e2a1552013-02-14 10:42:11 +020094 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070095}
96
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053097static ssize_t
98show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
99{
100 struct drm_minor *dminor = dev_get_drvdata(kdev);
101 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
102 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
103}
104
Ben Widawsky0136db52012-04-10 21:17:01 -0700105static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
106static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
107static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
108static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530109static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
Ben Widawsky0136db52012-04-10 21:17:01 -0700110
111static struct attribute *rc6_attrs[] = {
112 &dev_attr_rc6_enable.attr,
113 &dev_attr_rc6_residency_ms.attr,
Ben Widawsky0136db52012-04-10 21:17:01 -0700114 NULL
115};
116
117static struct attribute_group rc6_attr_group = {
118 .name = power_group_name,
119 .attrs = rc6_attrs
120};
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700121
122static struct attribute *rc6p_attrs[] = {
123 &dev_attr_rc6p_residency_ms.attr,
124 &dev_attr_rc6pp_residency_ms.attr,
125 NULL
126};
127
128static struct attribute_group rc6p_attr_group = {
129 .name = power_group_name,
130 .attrs = rc6p_attrs
131};
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530132
133static struct attribute *media_rc6_attrs[] = {
134 &dev_attr_media_rc6_residency_ms.attr,
135 NULL
136};
137
138static struct attribute_group media_rc6_attr_group = {
139 .name = power_group_name,
140 .attrs = media_rc6_attrs
141};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700142#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700143
Ben Widawsky84bc7582012-05-25 16:56:25 -0700144static int l3_access_valid(struct drm_device *dev, loff_t offset)
145{
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700146 if (!HAS_L3_DPF(dev))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700147 return -EPERM;
148
149 if (offset % 4 != 0)
150 return -EINVAL;
151
152 if (offset >= GEN7_L3LOG_SIZE)
153 return -ENXIO;
154
155 return 0;
156}
157
158static ssize_t
159i915_l3_read(struct file *filp, struct kobject *kobj,
160 struct bin_attribute *attr, char *buf,
161 loff_t offset, size_t count)
162{
163 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000164 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700165 struct drm_device *drm_dev = dminor->dev;
166 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700167 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700168 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700169
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700170 count = round_down(count, 4);
171
Ben Widawsky84bc7582012-05-25 16:56:25 -0700172 ret = l3_access_valid(drm_dev, offset);
173 if (ret)
174 return ret;
175
Dan Carpentere5ad4022013-09-20 14:20:18 +0300176 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700177
Ben Widawsky84bc7582012-05-25 16:56:25 -0700178 ret = i915_mutex_lock_interruptible(drm_dev);
179 if (ret)
180 return ret;
181
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700182 if (dev_priv->l3_parity.remap_info[slice])
183 memcpy(buf,
184 dev_priv->l3_parity.remap_info[slice] + (offset/4),
185 count);
186 else
187 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700188
Ben Widawsky84bc7582012-05-25 16:56:25 -0700189 mutex_unlock(&drm_dev->struct_mutex);
190
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700191 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700192}
193
194static ssize_t
195i915_l3_write(struct file *filp, struct kobject *kobj,
196 struct bin_attribute *attr, char *buf,
197 loff_t offset, size_t count)
198{
199 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000200 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700201 struct drm_device *drm_dev = dminor->dev;
202 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100203 struct intel_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700204 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700205 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700206 int ret;
207
Ben Widawsky8245be32013-11-06 13:56:29 -0200208 if (!HAS_HW_CONTEXTS(drm_dev))
209 return -ENXIO;
210
Ben Widawsky84bc7582012-05-25 16:56:25 -0700211 ret = l3_access_valid(drm_dev, offset);
212 if (ret)
213 return ret;
214
215 ret = i915_mutex_lock_interruptible(drm_dev);
216 if (ret)
217 return ret;
218
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700219 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700220 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
221 if (!temp) {
222 mutex_unlock(&drm_dev->struct_mutex);
223 return -ENOMEM;
224 }
225 }
226
227 ret = i915_gpu_idle(drm_dev);
228 if (ret) {
229 kfree(temp);
230 mutex_unlock(&drm_dev->struct_mutex);
231 return ret;
232 }
233
234 /* TODO: Ideally we really want a GPU reset here to make sure errors
235 * aren't propagated. Since I cannot find a stable way to reset the GPU
236 * at this point it is left as a TODO.
237 */
238 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700239 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700240
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700241 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700242
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700243 /* NB: We defer the remapping until we switch to the context */
244 list_for_each_entry(ctx, &dev_priv->context_list, link)
245 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700246
247 mutex_unlock(&drm_dev->struct_mutex);
248
249 return count;
250}
251
252static struct bin_attribute dpf_attrs = {
253 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
254 .size = GEN7_L3LOG_SIZE,
255 .read = i915_l3_read,
256 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700257 .mmap = NULL,
258 .private = (void *)0
259};
260
261static struct bin_attribute dpf_attrs_1 = {
262 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
263 .size = GEN7_L3LOG_SIZE,
264 .read = i915_l3_read,
265 .write = i915_l3_write,
266 .mmap = NULL,
267 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700268};
269
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200270static ssize_t gt_act_freq_mhz_show(struct device *kdev,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700271 struct device_attribute *attr, char *buf)
272{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000273 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700274 struct drm_device *dev = minor->dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 int ret;
277
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700278 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
279
Imre Deakd46c0512014-04-14 20:24:27 +0300280 intel_runtime_pm_get(dev_priv);
281
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700282 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes177006a2013-05-02 10:48:07 -0700283 if (IS_VALLEYVIEW(dev_priv->dev)) {
284 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300285 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200286 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700287 } else {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200288 u32 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeled64d662015-03-06 11:07:22 +0530289 if (IS_GEN9(dev_priv))
290 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
291 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200292 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
293 else
294 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200295 ret = intel_gpu_freq(dev_priv, ret);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200296 }
297 mutex_unlock(&dev_priv->rps.hw_lock);
298
299 intel_runtime_pm_put(dev_priv);
300
301 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
302}
303
304static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
305 struct device_attribute *attr, char *buf)
306{
307 struct drm_minor *minor = dev_to_drm_minor(kdev);
308 struct drm_device *dev = minor->dev;
309 struct drm_i915_private *dev_priv = dev->dev_private;
310 int ret;
311
312 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
313
314 intel_runtime_pm_get(dev_priv);
315
316 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200317 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700318 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700319
Imre Deakd46c0512014-04-14 20:24:27 +0300320 intel_runtime_pm_put(dev_priv);
321
Jani Nikula3e2a1552013-02-14 10:42:11 +0200322 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700323}
324
Chris Wilson97e4eed2013-08-26 16:18:54 +0100325static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
326 struct device_attribute *attr, char *buf)
327{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000328 struct drm_minor *minor = dev_to_drm_minor(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100329 struct drm_device *dev = minor->dev;
330 struct drm_i915_private *dev_priv = dev->dev_private;
331
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200332 return snprintf(buf, PAGE_SIZE,
333 "%d\n",
334 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100335}
336
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700337static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
338{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000339 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700340 struct drm_device *dev = minor->dev;
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 int ret;
343
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700344 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
345
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700346 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200347 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700348 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700349
Jani Nikula3e2a1552013-02-14 10:42:11 +0200350 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700351}
352
Ben Widawsky46ddf192012-09-12 18:12:07 -0700353static ssize_t gt_max_freq_mhz_store(struct device *kdev,
354 struct device_attribute *attr,
355 const char *buf, size_t count)
356{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000357 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700358 struct drm_device *dev = minor->dev;
359 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700360 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700361 ssize_t ret;
362
363 ret = kstrtou32(buf, 0, &val);
364 if (ret)
365 return ret;
366
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700367 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
368
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700369 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700370
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200371 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700372
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700373 if (val < dev_priv->rps.min_freq ||
374 val > dev_priv->rps.max_freq ||
Ben Widawskyb39fb292014-03-19 18:31:11 -0700375 val < dev_priv->rps.min_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700376 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700377 return -EINVAL;
378 }
379
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700380 if (val > dev_priv->rps.rp0_freq)
Ben Widawsky31c77382013-04-05 14:29:22 -0700381 DRM_DEBUG("User requested overclocking to %d\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200382 intel_gpu_freq(dev_priv, val));
Ben Widawsky31c77382013-04-05 14:29:22 -0700383
Ben Widawskyb39fb292014-03-19 18:31:11 -0700384 dev_priv->rps.max_freq_softlimit = val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700385
Ville Syrjäläf745a802015-01-23 21:04:23 +0200386 val = clamp_t(int, dev_priv->rps.cur_freq,
387 dev_priv->rps.min_freq_softlimit,
388 dev_priv->rps.max_freq_softlimit);
389
390 /* We still need *_set_rps to process the new max_delay and
391 * update the interrupt limits and PMINTRMSK even though
392 * frequency request may be unchanged. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +0200393 intel_set_rps(dev, val);
Chris Wilson6917c7b2013-11-06 13:56:26 -0200394
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700395 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700396
397 return count;
398}
399
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700400static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
401{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000402 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700403 struct drm_device *dev = minor->dev;
404 struct drm_i915_private *dev_priv = dev->dev_private;
405 int ret;
406
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700407 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
408
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700409 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200410 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700411 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700412
Jani Nikula3e2a1552013-02-14 10:42:11 +0200413 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700414}
415
Ben Widawsky46ddf192012-09-12 18:12:07 -0700416static ssize_t gt_min_freq_mhz_store(struct device *kdev,
417 struct device_attribute *attr,
418 const char *buf, size_t count)
419{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000420 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700421 struct drm_device *dev = minor->dev;
422 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700423 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700424 ssize_t ret;
425
426 ret = kstrtou32(buf, 0, &val);
427 if (ret)
428 return ret;
429
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700430 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
431
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700432 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700433
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200434 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700435
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700436 if (val < dev_priv->rps.min_freq ||
437 val > dev_priv->rps.max_freq ||
438 val > dev_priv->rps.max_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700439 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700440 return -EINVAL;
441 }
442
Ben Widawskyb39fb292014-03-19 18:31:11 -0700443 dev_priv->rps.min_freq_softlimit = val;
Chris Wilson6917c7b2013-11-06 13:56:26 -0200444
Ville Syrjäläf745a802015-01-23 21:04:23 +0200445 val = clamp_t(int, dev_priv->rps.cur_freq,
446 dev_priv->rps.min_freq_softlimit,
447 dev_priv->rps.max_freq_softlimit);
448
449 /* We still need *_set_rps to process the new min_delay and
450 * update the interrupt limits and PMINTRMSK even though
451 * frequency request may be unchanged. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +0200452 intel_set_rps(dev, val);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700453
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700454 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700455
456 return count;
457
458}
459
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200460static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700461static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700462static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
463static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700464
Chris Wilson97e4eed2013-08-26 16:18:54 +0100465static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700466
467static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
468static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
469static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
470static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
471
472/* For now we have a static number of RP states */
473static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
474{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000475 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700476 struct drm_device *dev = minor->dev;
477 struct drm_i915_private *dev_priv = dev->dev_private;
Akash Goelbc4d91f2015-02-26 16:09:47 +0530478 u32 val;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700479
Akash Goelbc4d91f2015-02-26 16:09:47 +0530480 if (attr == &dev_attr_gt_RP0_freq_mhz)
481 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
482 else if (attr == &dev_attr_gt_RP1_freq_mhz)
483 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
484 else if (attr == &dev_attr_gt_RPn_freq_mhz)
485 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
486 else
Ben Widawskyac6ae342012-09-07 19:43:44 -0700487 BUG();
Akash Goelbc4d91f2015-02-26 16:09:47 +0530488
Jani Nikula3e2a1552013-02-14 10:42:11 +0200489 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700490}
491
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700492static const struct attribute *gen6_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200493 &dev_attr_gt_act_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700494 &dev_attr_gt_cur_freq_mhz.attr,
495 &dev_attr_gt_max_freq_mhz.attr,
496 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700497 &dev_attr_gt_RP0_freq_mhz.attr,
498 &dev_attr_gt_RP1_freq_mhz.attr,
499 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700500 NULL,
501};
502
Chris Wilson97e4eed2013-08-26 16:18:54 +0100503static const struct attribute *vlv_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200504 &dev_attr_gt_act_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100505 &dev_attr_gt_cur_freq_mhz.attr,
506 &dev_attr_gt_max_freq_mhz.attr,
507 &dev_attr_gt_min_freq_mhz.attr,
Deepak S74c4f622014-07-10 13:16:22 +0530508 &dev_attr_gt_RP0_freq_mhz.attr,
509 &dev_attr_gt_RP1_freq_mhz.attr,
510 &dev_attr_gt_RPn_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100511 &dev_attr_vlv_rpe_freq_mhz.attr,
512 NULL,
513};
514
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300515static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
516 struct bin_attribute *attr, char *buf,
517 loff_t off, size_t count)
518{
519
520 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000521 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300522 struct drm_device *dev = minor->dev;
523 struct i915_error_state_file_priv error_priv;
524 struct drm_i915_error_state_buf error_str;
525 ssize_t ret_count = 0;
526 int ret;
527
528 memset(&error_priv, 0, sizeof(error_priv));
529
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100530 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300531 if (ret)
532 return ret;
533
534 error_priv.dev = dev;
535 i915_error_state_get(dev, &error_priv);
536
537 ret = i915_error_state_to_str(&error_str, &error_priv);
538 if (ret)
539 goto out;
540
541 ret_count = count < error_str.bytes ? count : error_str.bytes;
542
543 memcpy(buf, error_str.buf, ret_count);
544out:
545 i915_error_state_put(&error_priv);
546 i915_error_state_buf_release(&error_str);
547
548 return ret ?: ret_count;
549}
550
551static ssize_t error_state_write(struct file *file, struct kobject *kobj,
552 struct bin_attribute *attr, char *buf,
553 loff_t off, size_t count)
554{
555 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000556 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300557 struct drm_device *dev = minor->dev;
558 int ret;
559
560 DRM_DEBUG_DRIVER("Resetting error state\n");
561
562 ret = mutex_lock_interruptible(&dev->struct_mutex);
563 if (ret)
564 return ret;
565
566 i915_destroy_error_state(dev);
567 mutex_unlock(&dev->struct_mutex);
568
569 return count;
570}
571
572static struct bin_attribute error_state_attr = {
573 .attr.name = "error",
574 .attr.mode = S_IRUSR | S_IWUSR,
575 .size = 0,
576 .read = error_state_read,
577 .write = error_state_write,
578};
579
Ben Widawsky0136db52012-04-10 21:17:01 -0700580void i915_setup_sysfs(struct drm_device *dev)
581{
582 int ret;
583
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700584#ifdef CONFIG_PM
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700585 if (HAS_RC6(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000586 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200587 &rc6_attr_group);
588 if (ret)
589 DRM_ERROR("RC6 residency sysfs setup failed\n");
590 }
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700591 if (HAS_RC6p(dev)) {
592 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
593 &rc6p_attr_group);
594 if (ret)
595 DRM_ERROR("RC6p residency sysfs setup failed\n");
596 }
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530597 if (IS_VALLEYVIEW(dev)) {
598 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
599 &media_rc6_attr_group);
600 if (ret)
601 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
602 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700603#endif
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700604 if (HAS_L3_DPF(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000605 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200606 if (ret)
607 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700608
609 if (NUM_L3_SLICES(dev) > 1) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000610 ret = device_create_bin_file(dev->primary->kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700611 &dpf_attrs_1);
612 if (ret)
613 DRM_ERROR("l3 parity slice 1 setup failed\n");
614 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200615 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700616
Chris Wilson97e4eed2013-08-26 16:18:54 +0100617 ret = 0;
618 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000619 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100620 else if (INTEL_INFO(dev)->gen >= 6)
Dave Airlie5bdebb12013-10-11 14:07:25 +1000621 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100622 if (ret)
623 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300624
Dave Airlie5bdebb12013-10-11 14:07:25 +1000625 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300626 &error_state_attr);
627 if (ret)
628 DRM_ERROR("error_state sysfs setup failed\n");
Ben Widawsky0136db52012-04-10 21:17:01 -0700629}
630
631void i915_teardown_sysfs(struct drm_device *dev)
632{
Dave Airlie5bdebb12013-10-11 14:07:25 +1000633 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100634 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000635 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100636 else
Dave Airlie5bdebb12013-10-11 14:07:25 +1000637 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
638 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
639 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700640#ifdef CONFIG_PM
Dave Airlie5bdebb12013-10-11 14:07:25 +1000641 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700642 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700643#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700644}