Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2012 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Ben Widawsky <ben@bwidawsk.net> |
| 25 | * |
| 26 | */ |
| 27 | |
| 28 | #include <linux/device.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/stat.h> |
| 31 | #include <linux/sysfs.h> |
Chris Wilson | 56c5098 | 2019-04-26 09:17:22 +0100 | [diff] [blame] | 32 | |
Andi Shyti | c113236 | 2019-09-27 12:08:49 +0100 | [diff] [blame] | 33 | #include "gt/intel_rc6.h" |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 34 | #include "gt/intel_rps.h" |
Chris Wilson | 4ec76db | 2020-02-28 13:17:10 +0000 | [diff] [blame] | 35 | #include "gt/sysfs_engines.h" |
Andi Shyti | c113236 | 2019-09-27 12:08:49 +0100 | [diff] [blame] | 36 | |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 37 | #include "i915_drv.h" |
Jani Nikula | be68261 | 2019-08-08 16:42:45 +0300 | [diff] [blame] | 38 | #include "i915_sysfs.h" |
Jani Nikula | ecbb5fb | 2019-04-29 15:29:37 +0300 | [diff] [blame] | 39 | #include "intel_pm.h" |
| 40 | #include "intel_sideband.h" |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 41 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 42 | static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 43 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 44 | struct drm_minor *minor = dev_get_drvdata(kdev); |
| 45 | return to_i915(minor->dev); |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 46 | } |
Dave Airlie | 14c8d110 | 2013-10-11 14:45:30 +1000 | [diff] [blame] | 47 | |
Hunt Xu | 5ab3633 | 2012-07-01 03:45:07 +0000 | [diff] [blame] | 48 | #ifdef CONFIG_PM |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 49 | static u32 calc_residency(struct drm_i915_private *dev_priv, |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 50 | i915_reg_t reg) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 51 | { |
Chris Wilson | 48d1c81 | 2019-01-14 14:21:13 +0000 | [diff] [blame] | 52 | intel_wakeref_t wakeref; |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 53 | u64 res = 0; |
Tvrtko Ursulin | 36cc8b9 | 2017-11-21 18:18:51 +0000 | [diff] [blame] | 54 | |
Daniele Ceraolo Spurio | c447ff7 | 2019-06-13 16:21:55 -0700 | [diff] [blame] | 55 | with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) |
Andi Shyti | c113236 | 2019-09-27 12:08:49 +0100 | [diff] [blame] | 56 | res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); |
Tvrtko Ursulin | 36cc8b9 | 2017-11-21 18:18:51 +0000 | [diff] [blame] | 57 | |
| 58 | return DIV_ROUND_CLOSEST_ULL(res, 1000); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 59 | } |
| 60 | |
| 61 | static ssize_t |
Ben Widawsky | dbdfd8e | 2012-09-07 19:43:38 -0700 | [diff] [blame] | 62 | show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 63 | { |
Chris Wilson | fb6db0f | 2017-12-01 11:30:30 +0000 | [diff] [blame] | 64 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
| 65 | unsigned int mask; |
| 66 | |
| 67 | mask = 0; |
| 68 | if (HAS_RC6(dev_priv)) |
| 69 | mask |= BIT(0); |
| 70 | if (HAS_RC6p(dev_priv)) |
| 71 | mask |= BIT(1); |
| 72 | if (HAS_RC6pp(dev_priv)) |
| 73 | mask |= BIT(2); |
| 74 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 75 | return sysfs_emit(buf, "%x\n", mask); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | static ssize_t |
Ben Widawsky | dbdfd8e | 2012-09-07 19:43:38 -0700 | [diff] [blame] | 79 | show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 80 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 81 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
| 82 | u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6); |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 83 | return sysfs_emit(buf, "%u\n", rc6_residency); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | static ssize_t |
Ben Widawsky | dbdfd8e | 2012-09-07 19:43:38 -0700 | [diff] [blame] | 87 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 88 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 89 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
| 90 | u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p); |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 91 | return sysfs_emit(buf, "%u\n", rc6p_residency); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | static ssize_t |
Ben Widawsky | dbdfd8e | 2012-09-07 19:43:38 -0700 | [diff] [blame] | 95 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 96 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 97 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
| 98 | u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp); |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 99 | return sysfs_emit(buf, "%u\n", rc6pp_residency); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 102 | static ssize_t |
| 103 | show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
| 104 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 105 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
| 106 | u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6); |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 107 | return sysfs_emit(buf, "%u\n", rc6_residency); |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 108 | } |
| 109 | |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 110 | static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); |
| 111 | static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); |
| 112 | static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); |
| 113 | static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 114 | static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 115 | |
| 116 | static struct attribute *rc6_attrs[] = { |
| 117 | &dev_attr_rc6_enable.attr, |
| 118 | &dev_attr_rc6_residency_ms.attr, |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 119 | NULL |
| 120 | }; |
| 121 | |
Arvind Yadav | 0a7a098 | 2017-07-03 16:38:25 +0530 | [diff] [blame] | 122 | static const struct attribute_group rc6_attr_group = { |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 123 | .name = power_group_name, |
| 124 | .attrs = rc6_attrs |
| 125 | }; |
Rodrigo Vivi | 58abf1d | 2014-10-07 07:06:50 -0700 | [diff] [blame] | 126 | |
| 127 | static struct attribute *rc6p_attrs[] = { |
| 128 | &dev_attr_rc6p_residency_ms.attr, |
| 129 | &dev_attr_rc6pp_residency_ms.attr, |
| 130 | NULL |
| 131 | }; |
| 132 | |
Arvind Yadav | 0a7a098 | 2017-07-03 16:38:25 +0530 | [diff] [blame] | 133 | static const struct attribute_group rc6p_attr_group = { |
Rodrigo Vivi | 58abf1d | 2014-10-07 07:06:50 -0700 | [diff] [blame] | 134 | .name = power_group_name, |
| 135 | .attrs = rc6p_attrs |
| 136 | }; |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 137 | |
| 138 | static struct attribute *media_rc6_attrs[] = { |
| 139 | &dev_attr_media_rc6_residency_ms.attr, |
| 140 | NULL |
| 141 | }; |
| 142 | |
Arvind Yadav | 0a7a098 | 2017-07-03 16:38:25 +0530 | [diff] [blame] | 143 | static const struct attribute_group media_rc6_attr_group = { |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 144 | .name = power_group_name, |
| 145 | .attrs = media_rc6_attrs |
| 146 | }; |
Ben Widawsky | 8c3f929 | 2012-09-02 00:24:40 -0700 | [diff] [blame] | 147 | #endif |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 148 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 149 | static int l3_access_valid(struct drm_i915_private *i915, loff_t offset) |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 150 | { |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 151 | if (!HAS_L3_DPF(i915)) |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 152 | return -EPERM; |
| 153 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 154 | if (!IS_ALIGNED(offset, sizeof(u32))) |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 155 | return -EINVAL; |
| 156 | |
| 157 | if (offset >= GEN7_L3LOG_SIZE) |
| 158 | return -ENXIO; |
| 159 | |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | static ssize_t |
| 164 | i915_l3_read(struct file *filp, struct kobject *kobj, |
| 165 | struct bin_attribute *attr, char *buf, |
| 166 | loff_t offset, size_t count) |
| 167 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 168 | struct device *kdev = kobj_to_dev(kobj); |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 169 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 170 | int slice = (int)(uintptr_t)attr->private; |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 171 | int ret; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 172 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 173 | ret = l3_access_valid(i915, offset); |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 174 | if (ret) |
| 175 | return ret; |
| 176 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 177 | count = round_down(count, sizeof(u32)); |
Dan Carpenter | e5ad402 | 2013-09-20 14:20:18 +0300 | [diff] [blame] | 178 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 179 | memset(buf, 0, count); |
Ben Widawsky | 33618ea | 2013-09-12 22:28:29 -0700 | [diff] [blame] | 180 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 181 | spin_lock(&i915->gem.contexts.lock); |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 182 | if (i915->l3_parity.remap_info[slice]) |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 183 | memcpy(buf, |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 184 | i915->l3_parity.remap_info[slice] + offset / sizeof(u32), |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 185 | count); |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 186 | spin_unlock(&i915->gem.contexts.lock); |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 187 | |
Ben Widawsky | 1c966dd | 2013-09-17 21:12:42 -0700 | [diff] [blame] | 188 | return count; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | static ssize_t |
| 192 | i915_l3_write(struct file *filp, struct kobject *kobj, |
| 193 | struct bin_attribute *attr, char *buf, |
| 194 | loff_t offset, size_t count) |
| 195 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 196 | struct device *kdev = kobj_to_dev(kobj); |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 197 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 198 | int slice = (int)(uintptr_t)attr->private; |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 199 | u32 *remap_info, *freeme = NULL; |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 200 | struct i915_gem_context *ctx; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 201 | int ret; |
| 202 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 203 | ret = l3_access_valid(i915, offset); |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 204 | if (ret) |
| 205 | return ret; |
| 206 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 207 | if (count < sizeof(u32)) |
| 208 | return -EINVAL; |
| 209 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 210 | remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
| 211 | if (!remap_info) |
| 212 | return -ENOMEM; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 213 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 214 | spin_lock(&i915->gem.contexts.lock); |
| 215 | |
| 216 | if (i915->l3_parity.remap_info[slice]) { |
| 217 | freeme = remap_info; |
| 218 | remap_info = i915->l3_parity.remap_info[slice]; |
| 219 | } else { |
| 220 | i915->l3_parity.remap_info[slice] = remap_info; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 223 | count = round_down(count, sizeof(u32)); |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 224 | memcpy(remap_info + offset / sizeof(u32), buf, count); |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 225 | |
| 226 | /* NB: We defer the remapping until we switch to the context */ |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 227 | list_for_each_entry(ctx, &i915->gem.contexts.list, link) |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 228 | ctx->remap_slice |= BIT(slice); |
| 229 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 230 | spin_unlock(&i915->gem.contexts.lock); |
| 231 | kfree(freeme); |
| 232 | |
Chris Wilson | 261ea7e | 2019-10-04 11:59:58 +0100 | [diff] [blame] | 233 | /* |
| 234 | * TODO: Ideally we really want a GPU reset here to make sure errors |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 235 | * aren't propagated. Since I cannot find a stable way to reset the GPU |
| 236 | * at this point it is left as a TODO. |
| 237 | */ |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 238 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 239 | return count; |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 240 | } |
| 241 | |
Bhumika Goyal | 59f3da1 | 2017-08-02 22:50:47 +0530 | [diff] [blame] | 242 | static const struct bin_attribute dpf_attrs = { |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 243 | .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, |
| 244 | .size = GEN7_L3LOG_SIZE, |
| 245 | .read = i915_l3_read, |
| 246 | .write = i915_l3_write, |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 247 | .mmap = NULL, |
| 248 | .private = (void *)0 |
| 249 | }; |
| 250 | |
Bhumika Goyal | 59f3da1 | 2017-08-02 22:50:47 +0530 | [diff] [blame] | 251 | static const struct bin_attribute dpf_attrs_1 = { |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 252 | .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, |
| 253 | .size = GEN7_L3LOG_SIZE, |
| 254 | .read = i915_l3_read, |
| 255 | .write = i915_l3_write, |
| 256 | .mmap = NULL, |
| 257 | .private = (void *)1 |
Ben Widawsky | 84bc758 | 2012-05-25 16:56:25 -0700 | [diff] [blame] | 258 | }; |
| 259 | |
Ville Syrjälä | c8c972e | 2015-01-23 21:04:24 +0200 | [diff] [blame] | 260 | static ssize_t gt_act_freq_mhz_show(struct device *kdev, |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 261 | struct device_attribute *attr, char *buf) |
| 262 | { |
Andi Shyti | e03512e | 2019-12-13 20:37:35 +0200 | [diff] [blame] | 263 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
| 264 | struct intel_rps *rps = &i915->gt.rps; |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 265 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 266 | return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps)); |
Ville Syrjälä | c8c972e | 2015-01-23 21:04:24 +0200 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, |
| 270 | struct device_attribute *attr, char *buf) |
| 271 | { |
Andi Shyti | e03512e | 2019-12-13 20:37:35 +0200 | [diff] [blame] | 272 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
| 273 | struct intel_rps *rps = &i915->gt.rps; |
Ville Syrjälä | c8c972e | 2015-01-23 21:04:24 +0200 | [diff] [blame] | 274 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 275 | return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->cur_freq)); |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 278 | static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
| 279 | { |
Andi Shyti | e03512e | 2019-12-13 20:37:35 +0200 | [diff] [blame] | 280 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
| 281 | struct intel_rps *rps = &i915->gt.rps; |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 282 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 283 | return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq)); |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | static ssize_t gt_boost_freq_mhz_store(struct device *kdev, |
| 287 | struct device_attribute *attr, |
| 288 | const char *buf, size_t count) |
| 289 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 290 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 291 | struct intel_rps *rps = &dev_priv->gt.rps; |
Chris Wilson | 59cd31f | 2018-03-08 14:26:47 +0000 | [diff] [blame] | 292 | bool boost = false; |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 293 | ssize_t ret; |
Chris Wilson | 59cd31f | 2018-03-08 14:26:47 +0000 | [diff] [blame] | 294 | u32 val; |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 295 | |
| 296 | ret = kstrtou32(buf, 0, &val); |
| 297 | if (ret) |
| 298 | return ret; |
| 299 | |
| 300 | /* Validate against (static) hardware limits */ |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 301 | val = intel_freq_opcode(rps, val); |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 302 | if (val < rps->min_freq || val > rps->max_freq) |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 303 | return -EINVAL; |
| 304 | |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 305 | mutex_lock(&rps->lock); |
Chris Wilson | 59cd31f | 2018-03-08 14:26:47 +0000 | [diff] [blame] | 306 | if (val != rps->boost_freq) { |
| 307 | rps->boost_freq = val; |
| 308 | boost = atomic_read(&rps->num_waiters); |
| 309 | } |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 310 | mutex_unlock(&rps->lock); |
Chris Wilson | 59cd31f | 2018-03-08 14:26:47 +0000 | [diff] [blame] | 311 | if (boost) |
| 312 | schedule_work(&rps->work); |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 313 | |
| 314 | return count; |
| 315 | } |
| 316 | |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 317 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, |
| 318 | struct device_attribute *attr, char *buf) |
| 319 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 320 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 321 | struct intel_rps *rps = &dev_priv->gt.rps; |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 322 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 323 | return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq)); |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 324 | } |
| 325 | |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 326 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
| 327 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 328 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 329 | struct intel_rps *rps = &dev_priv->gt.rps; |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 330 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 331 | return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->max_freq_softlimit)); |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 332 | } |
| 333 | |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 334 | static ssize_t gt_max_freq_mhz_store(struct device *kdev, |
| 335 | struct device_attribute *attr, |
| 336 | const char *buf, size_t count) |
| 337 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 338 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 339 | struct intel_rps *rps = &dev_priv->gt.rps; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 340 | ssize_t ret; |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 341 | u32 val; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 342 | |
| 343 | ret = kstrtou32(buf, 0, &val); |
| 344 | if (ret) |
| 345 | return ret; |
| 346 | |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 347 | mutex_lock(&rps->lock); |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 348 | |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 349 | val = intel_freq_opcode(rps, val); |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 350 | if (val < rps->min_freq || |
| 351 | val > rps->max_freq || |
| 352 | val < rps->min_freq_softlimit) { |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 353 | ret = -EINVAL; |
| 354 | goto unlock; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 357 | if (val > rps->rp0_freq) |
Ben Widawsky | 31c7738 | 2013-04-05 14:29:22 -0700 | [diff] [blame] | 358 | DRM_DEBUG("User requested overclocking to %d\n", |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 359 | intel_gpu_freq(rps, val)); |
Ben Widawsky | 31c7738 | 2013-04-05 14:29:22 -0700 | [diff] [blame] | 360 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 361 | rps->max_freq_softlimit = val; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 362 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 363 | val = clamp_t(int, rps->cur_freq, |
| 364 | rps->min_freq_softlimit, |
| 365 | rps->max_freq_softlimit); |
Ville Syrjälä | f745a80 | 2015-01-23 21:04:23 +0200 | [diff] [blame] | 366 | |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 367 | /* |
| 368 | * We still need *_set_rps to process the new max_delay and |
Ville Syrjälä | f745a80 | 2015-01-23 21:04:23 +0200 | [diff] [blame] | 369 | * update the interrupt limits and PMINTRMSK even though |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 370 | * frequency request may be unchanged. |
| 371 | */ |
| 372 | intel_rps_set(rps, val); |
Chris Wilson | 6917c7b | 2013-11-06 13:56:26 -0200 | [diff] [blame] | 373 | |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 374 | unlock: |
| 375 | mutex_unlock(&rps->lock); |
Sagar Arun Kamble | 933bfb4 | 2016-02-08 22:47:11 +0530 | [diff] [blame] | 376 | |
Chris Wilson | 9fcee2f | 2017-01-26 10:19:19 +0000 | [diff] [blame] | 377 | return ret ?: count; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 380 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
| 381 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 382 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 383 | struct intel_rps *rps = &dev_priv->gt.rps; |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 384 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 385 | return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->min_freq_softlimit)); |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 386 | } |
| 387 | |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 388 | static ssize_t gt_min_freq_mhz_store(struct device *kdev, |
| 389 | struct device_attribute *attr, |
| 390 | const char *buf, size_t count) |
| 391 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 392 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 393 | struct intel_rps *rps = &dev_priv->gt.rps; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 394 | ssize_t ret; |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 395 | u32 val; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 396 | |
| 397 | ret = kstrtou32(buf, 0, &val); |
| 398 | if (ret) |
| 399 | return ret; |
| 400 | |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 401 | mutex_lock(&rps->lock); |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 402 | |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 403 | val = intel_freq_opcode(rps, val); |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 404 | if (val < rps->min_freq || |
| 405 | val > rps->max_freq || |
| 406 | val > rps->max_freq_softlimit) { |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 407 | ret = -EINVAL; |
| 408 | goto unlock; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 411 | rps->min_freq_softlimit = val; |
Chris Wilson | 6917c7b | 2013-11-06 13:56:26 -0200 | [diff] [blame] | 412 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 413 | val = clamp_t(int, rps->cur_freq, |
| 414 | rps->min_freq_softlimit, |
| 415 | rps->max_freq_softlimit); |
Ville Syrjälä | f745a80 | 2015-01-23 21:04:23 +0200 | [diff] [blame] | 416 | |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 417 | /* |
| 418 | * We still need *_set_rps to process the new min_delay and |
Ville Syrjälä | f745a80 | 2015-01-23 21:04:23 +0200 | [diff] [blame] | 419 | * update the interrupt limits and PMINTRMSK even though |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 420 | * frequency request may be unchanged. |
| 421 | */ |
| 422 | intel_rps_set(rps, val); |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 423 | |
Chris Wilson | ebb5eb7 | 2019-04-26 09:17:21 +0100 | [diff] [blame] | 424 | unlock: |
| 425 | mutex_unlock(&rps->lock); |
Sagar Arun Kamble | 933bfb4 | 2016-02-08 22:47:11 +0530 | [diff] [blame] | 426 | |
Chris Wilson | 9fcee2f | 2017-01-26 10:19:19 +0000 | [diff] [blame] | 427 | return ret ?: count; |
Ben Widawsky | 46ddf19 | 2012-09-12 18:12:07 -0700 | [diff] [blame] | 428 | } |
| 429 | |
Joe Perches | c828a89 | 2017-12-19 10:15:08 -0800 | [diff] [blame] | 430 | static DEVICE_ATTR_RO(gt_act_freq_mhz); |
| 431 | static DEVICE_ATTR_RO(gt_cur_freq_mhz); |
Joe Perches | b6b996b | 2017-12-19 10:15:07 -0800 | [diff] [blame] | 432 | static DEVICE_ATTR_RW(gt_boost_freq_mhz); |
| 433 | static DEVICE_ATTR_RW(gt_max_freq_mhz); |
| 434 | static DEVICE_ATTR_RW(gt_min_freq_mhz); |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 435 | |
Joe Perches | c828a89 | 2017-12-19 10:15:08 -0800 | [diff] [blame] | 436 | static DEVICE_ATTR_RO(vlv_rpe_freq_mhz); |
Ben Widawsky | ac6ae34 | 2012-09-07 19:43:44 -0700 | [diff] [blame] | 437 | |
| 438 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); |
| 439 | static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); |
| 440 | static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); |
| 441 | static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); |
| 442 | |
| 443 | /* For now we have a static number of RP states */ |
| 444 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
| 445 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 446 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 447 | struct intel_rps *rps = &dev_priv->gt.rps; |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 448 | u32 val; |
Ben Widawsky | ac6ae34 | 2012-09-07 19:43:44 -0700 | [diff] [blame] | 449 | |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 450 | if (attr == &dev_attr_gt_RP0_freq_mhz) |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 451 | val = intel_gpu_freq(rps, rps->rp0_freq); |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 452 | else if (attr == &dev_attr_gt_RP1_freq_mhz) |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 453 | val = intel_gpu_freq(rps, rps->rp1_freq); |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 454 | else if (attr == &dev_attr_gt_RPn_freq_mhz) |
Andi Shyti | 3e7abf8 | 2019-10-24 22:16:41 +0100 | [diff] [blame] | 455 | val = intel_gpu_freq(rps, rps->min_freq); |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 456 | else |
Ben Widawsky | ac6ae34 | 2012-09-07 19:43:44 -0700 | [diff] [blame] | 457 | BUG(); |
Akash Goel | bc4d91f | 2015-02-26 16:09:47 +0530 | [diff] [blame] | 458 | |
Xuezhi Zhang | 11cda49 | 2021-04-04 08:41:03 +0000 | [diff] [blame] | 459 | return sysfs_emit(buf, "%d\n", val); |
Ben Widawsky | ac6ae34 | 2012-09-07 19:43:44 -0700 | [diff] [blame] | 460 | } |
| 461 | |
Jani Nikula | e1215de | 2018-10-04 17:37:50 +0300 | [diff] [blame] | 462 | static const struct attribute * const gen6_attrs[] = { |
Ville Syrjälä | c8c972e | 2015-01-23 21:04:24 +0200 | [diff] [blame] | 463 | &dev_attr_gt_act_freq_mhz.attr, |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 464 | &dev_attr_gt_cur_freq_mhz.attr, |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 465 | &dev_attr_gt_boost_freq_mhz.attr, |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 466 | &dev_attr_gt_max_freq_mhz.attr, |
| 467 | &dev_attr_gt_min_freq_mhz.attr, |
Ben Widawsky | ac6ae34 | 2012-09-07 19:43:44 -0700 | [diff] [blame] | 468 | &dev_attr_gt_RP0_freq_mhz.attr, |
| 469 | &dev_attr_gt_RP1_freq_mhz.attr, |
| 470 | &dev_attr_gt_RPn_freq_mhz.attr, |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 471 | NULL, |
| 472 | }; |
| 473 | |
Jani Nikula | e1215de | 2018-10-04 17:37:50 +0300 | [diff] [blame] | 474 | static const struct attribute * const vlv_attrs[] = { |
Ville Syrjälä | c8c972e | 2015-01-23 21:04:24 +0200 | [diff] [blame] | 475 | &dev_attr_gt_act_freq_mhz.attr, |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 476 | &dev_attr_gt_cur_freq_mhz.attr, |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 477 | &dev_attr_gt_boost_freq_mhz.attr, |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 478 | &dev_attr_gt_max_freq_mhz.attr, |
| 479 | &dev_attr_gt_min_freq_mhz.attr, |
Deepak S | 74c4f62 | 2014-07-10 13:16:22 +0530 | [diff] [blame] | 480 | &dev_attr_gt_RP0_freq_mhz.attr, |
| 481 | &dev_attr_gt_RP1_freq_mhz.attr, |
| 482 | &dev_attr_gt_RPn_freq_mhz.attr, |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 483 | &dev_attr_vlv_rpe_freq_mhz.attr, |
| 484 | NULL, |
| 485 | }; |
| 486 | |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 487 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
| 488 | |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 489 | static ssize_t error_state_read(struct file *filp, struct kobject *kobj, |
| 490 | struct bin_attribute *attr, char *buf, |
| 491 | loff_t off, size_t count) |
| 492 | { |
| 493 | |
Geliang Tang | 657fb5f | 2016-01-13 22:48:40 +0800 | [diff] [blame] | 494 | struct device *kdev = kobj_to_dev(kobj); |
Chris Wilson | 0e39037 | 2018-11-23 13:23:25 +0000 | [diff] [blame] | 495 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
Chris Wilson | 742379c | 2020-01-10 12:30:56 +0000 | [diff] [blame] | 496 | struct i915_gpu_coredump *gpu; |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 497 | ssize_t ret; |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 498 | |
Chris Wilson | 0e39037 | 2018-11-23 13:23:25 +0000 | [diff] [blame] | 499 | gpu = i915_first_error_state(i915); |
Chris Wilson | e6154e4 | 2018-12-07 11:05:54 +0000 | [diff] [blame] | 500 | if (IS_ERR(gpu)) { |
| 501 | ret = PTR_ERR(gpu); |
| 502 | } else if (gpu) { |
Chris Wilson | 742379c | 2020-01-10 12:30:56 +0000 | [diff] [blame] | 503 | ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); |
| 504 | i915_gpu_coredump_put(gpu); |
Chris Wilson | 0e39037 | 2018-11-23 13:23:25 +0000 | [diff] [blame] | 505 | } else { |
| 506 | const char *str = "No error state collected\n"; |
| 507 | size_t len = strlen(str); |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 508 | |
Chris Wilson | 0e39037 | 2018-11-23 13:23:25 +0000 | [diff] [blame] | 509 | ret = min_t(size_t, count, len - off); |
| 510 | memcpy(buf, str + off, ret); |
| 511 | } |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 512 | |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 513 | return ret; |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 514 | } |
| 515 | |
| 516 | static ssize_t error_state_write(struct file *file, struct kobject *kobj, |
| 517 | struct bin_attribute *attr, char *buf, |
| 518 | loff_t off, size_t count) |
| 519 | { |
Geliang Tang | 657fb5f | 2016-01-13 22:48:40 +0800 | [diff] [blame] | 520 | struct device *kdev = kobj_to_dev(kobj); |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 521 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 522 | |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 523 | drm_dbg(&dev_priv->drm, "Resetting error state\n"); |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 524 | i915_reset_error_state(dev_priv); |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 525 | |
| 526 | return count; |
| 527 | } |
| 528 | |
Bhumika Goyal | 59f3da1 | 2017-08-02 22:50:47 +0530 | [diff] [blame] | 529 | static const struct bin_attribute error_state_attr = { |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 530 | .attr.name = "error", |
| 531 | .attr.mode = S_IRUSR | S_IWUSR, |
| 532 | .size = 0, |
| 533 | .read = error_state_read, |
| 534 | .write = error_state_write, |
| 535 | }; |
| 536 | |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 537 | static void i915_setup_error_capture(struct device *kdev) |
| 538 | { |
| 539 | if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) |
| 540 | DRM_ERROR("error_state sysfs setup failed\n"); |
| 541 | } |
| 542 | |
| 543 | static void i915_teardown_error_capture(struct device *kdev) |
| 544 | { |
| 545 | sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); |
| 546 | } |
| 547 | #else |
| 548 | static void i915_setup_error_capture(struct device *kdev) {} |
| 549 | static void i915_teardown_error_capture(struct device *kdev) {} |
| 550 | #endif |
| 551 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 552 | void i915_setup_sysfs(struct drm_i915_private *dev_priv) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 553 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 554 | struct device *kdev = dev_priv->drm.primary->kdev; |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 555 | int ret; |
| 556 | |
Ben Widawsky | 8c3f929 | 2012-09-02 00:24:40 -0700 | [diff] [blame] | 557 | #ifdef CONFIG_PM |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 558 | if (HAS_RC6(dev_priv)) { |
| 559 | ret = sysfs_merge_group(&kdev->kobj, |
Daniel Vetter | 112abd2 | 2012-05-31 14:57:43 +0200 | [diff] [blame] | 560 | &rc6_attr_group); |
| 561 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 562 | drm_err(&dev_priv->drm, |
| 563 | "RC6 residency sysfs setup failed\n"); |
Daniel Vetter | 112abd2 | 2012-05-31 14:57:43 +0200 | [diff] [blame] | 564 | } |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 565 | if (HAS_RC6p(dev_priv)) { |
| 566 | ret = sysfs_merge_group(&kdev->kobj, |
Rodrigo Vivi | 58abf1d | 2014-10-07 07:06:50 -0700 | [diff] [blame] | 567 | &rc6p_attr_group); |
| 568 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 569 | drm_err(&dev_priv->drm, |
| 570 | "RC6p residency sysfs setup failed\n"); |
Rodrigo Vivi | 58abf1d | 2014-10-07 07:06:50 -0700 | [diff] [blame] | 571 | } |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 572 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
| 573 | ret = sysfs_merge_group(&kdev->kobj, |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 574 | &media_rc6_attr_group); |
| 575 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 576 | drm_err(&dev_priv->drm, |
| 577 | "Media RC6 residency sysfs setup failed\n"); |
Ville Syrjälä | 626ad6f | 2015-02-26 21:10:27 +0530 | [diff] [blame] | 578 | } |
Ben Widawsky | 8c3f929 | 2012-09-02 00:24:40 -0700 | [diff] [blame] | 579 | #endif |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 580 | if (HAS_L3_DPF(dev_priv)) { |
| 581 | ret = device_create_bin_file(kdev, &dpf_attrs); |
Daniel Vetter | 112abd2 | 2012-05-31 14:57:43 +0200 | [diff] [blame] | 582 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 583 | drm_err(&dev_priv->drm, |
| 584 | "l3 parity sysfs setup failed\n"); |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 585 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 586 | if (NUM_L3_SLICES(dev_priv) > 1) { |
| 587 | ret = device_create_bin_file(kdev, |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 588 | &dpf_attrs_1); |
| 589 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 590 | drm_err(&dev_priv->drm, |
| 591 | "l3 parity slice 1 setup failed\n"); |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 592 | } |
Daniel Vetter | 112abd2 | 2012-05-31 14:57:43 +0200 | [diff] [blame] | 593 | } |
Ben Widawsky | df6eedc | 2012-09-07 19:43:40 -0700 | [diff] [blame] | 594 | |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 595 | ret = 0; |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 596 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
| 597 | ret = sysfs_create_files(&kdev->kobj, vlv_attrs); |
Lucas De Marchi | 651e7d4 | 2021-06-05 21:50:49 -0700 | [diff] [blame^] | 598 | else if (GRAPHICS_VER(dev_priv) >= 6) |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 599 | ret = sysfs_create_files(&kdev->kobj, gen6_attrs); |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 600 | if (ret) |
Wambui Karuga | 00376cc | 2020-01-31 12:34:12 +0300 | [diff] [blame] | 601 | drm_err(&dev_priv->drm, "RPS sysfs setup failed\n"); |
Mika Kuoppala | ef86ddc | 2013-06-06 17:38:54 +0300 | [diff] [blame] | 602 | |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 603 | i915_setup_error_capture(kdev); |
Chris Wilson | 4ec76db | 2020-02-28 13:17:10 +0000 | [diff] [blame] | 604 | |
| 605 | intel_engines_add_sysfs(dev_priv); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 606 | } |
| 607 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 608 | void i915_teardown_sysfs(struct drm_i915_private *dev_priv) |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 609 | { |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 610 | struct device *kdev = dev_priv->drm.primary->kdev; |
| 611 | |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 612 | i915_teardown_error_capture(kdev); |
| 613 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 614 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
| 615 | sysfs_remove_files(&kdev->kobj, vlv_attrs); |
Chris Wilson | 97e4eed | 2013-08-26 16:18:54 +0100 | [diff] [blame] | 616 | else |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 617 | sysfs_remove_files(&kdev->kobj, gen6_attrs); |
| 618 | device_remove_bin_file(kdev, &dpf_attrs_1); |
| 619 | device_remove_bin_file(kdev, &dpf_attrs); |
Ben Widawsky | 853c70e | 2012-09-19 10:50:19 -0700 | [diff] [blame] | 620 | #ifdef CONFIG_PM |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 621 | sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); |
| 622 | sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); |
Ben Widawsky | 853c70e | 2012-09-19 10:50:19 -0700 | [diff] [blame] | 623 | #endif |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 624 | } |