Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 2 | /* |
Jeykumar Sankaran | 25fdd59 | 2018-06-27 15:26:09 -0400 | [diff] [blame] | 3 | * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 4 | * Copyright (C) 2013 Red Hat |
| 5 | * Author: Rob Clark <robdclark@gmail.com> |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #ifndef __MSM_KMS_H__ |
| 9 | #define __MSM_KMS_H__ |
| 10 | |
| 11 | #include <linux/clk.h> |
| 12 | #include <linux/regulator/consumer.h> |
| 13 | |
| 14 | #include "msm_drv.h" |
| 15 | |
Stephane Viau | 7ca1271 | 2014-12-08 10:48:57 -0500 | [diff] [blame] | 16 | #define MAX_PLANE 4 |
| 17 | |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 18 | /* As there are different display controller blocks depending on the |
| 19 | * snapdragon version, the kms support is split out and the appropriate |
| 20 | * implementation is loaded at runtime. The kms module is responsible |
| 21 | * for constructing the appropriate planes/crtcs/encoders/connectors. |
| 22 | */ |
| 23 | struct msm_kms_funcs { |
| 24 | /* hw initialization: */ |
| 25 | int (*hw_init)(struct msm_kms *kms); |
| 26 | /* irq handling: */ |
| 27 | void (*irq_preinstall)(struct msm_kms *kms); |
| 28 | int (*irq_postinstall)(struct msm_kms *kms); |
| 29 | void (*irq_uninstall)(struct msm_kms *kms); |
| 30 | irqreturn_t (*irq)(struct msm_kms *kms); |
| 31 | int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); |
| 32 | void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 33 | |
| 34 | /* |
| 35 | * Atomic commit handling: |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 36 | * |
| 37 | * Note that in the case of async commits, the funcs which take |
| 38 | * a crtc_mask (ie. ->flush_commit(), and ->complete_commit()) |
| 39 | * might not be evenly balanced with ->prepare_commit(), however |
| 40 | * each crtc that effected by a ->prepare_commit() (potentially |
| 41 | * multiple times) will eventually (at end of vsync period) be |
| 42 | * flushed and completed. |
| 43 | * |
| 44 | * This has some implications about tracking of cleanup state, |
| 45 | * for example SMP blocks to release after commit completes. Ie. |
| 46 | * cleanup state should be also duplicated in the various |
| 47 | * duplicate_state() methods, as the current cleanup state at |
| 48 | * ->complete_commit() time may have accumulated cleanup work |
| 49 | * from multiple commits. |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 50 | */ |
| 51 | |
| 52 | /** |
Rob Clark | e35a29d | 2019-08-29 09:45:15 -0700 | [diff] [blame] | 53 | * Enable/disable power/clks needed for hw access done in other |
| 54 | * commit related methods. |
| 55 | * |
| 56 | * If mdp4 is migrated to runpm, we could probably drop these |
| 57 | * and use runpm directly. |
| 58 | */ |
| 59 | void (*enable_commit)(struct msm_kms *kms); |
| 60 | void (*disable_commit)(struct msm_kms *kms); |
| 61 | |
| 62 | /** |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 63 | * If the kms backend supports async commit, it should implement |
| 64 | * this method to return the time of the next vsync. This is |
| 65 | * used to determine a time slightly before vsync, for the async |
| 66 | * commit timer to run and complete an async commit. |
| 67 | */ |
| 68 | ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc); |
| 69 | |
| 70 | /** |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 71 | * Prepare for atomic commit. This is called after any previous |
| 72 | * (async or otherwise) commit has completed. |
| 73 | */ |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 74 | void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 75 | |
| 76 | /** |
| 77 | * Flush an atomic commit. This is called after the hardware |
| 78 | * updates have already been pushed down to effected planes/ |
| 79 | * crtcs/encoders/connectors. |
| 80 | */ |
| 81 | void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask); |
| 82 | |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 83 | /** |
| 84 | * Wait for any in-progress flush to complete on the specified |
| 85 | * crtcs. This should not block if there is no in-progress |
| 86 | * commit (ie. don't just wait for a vblank), as it will also |
| 87 | * be called before ->prepare_commit() to ensure any potential |
| 88 | * "async" commit has completed. |
| 89 | */ |
Rob Clark | d4d2c60 | 2019-08-29 09:45:12 -0700 | [diff] [blame] | 90 | void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask); |
| 91 | |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 92 | /** |
| 93 | * Clean up after commit is completed. This is called after |
| 94 | * ->wait_flush(), to give the backend a chance to do any |
| 95 | * post-commit cleanup. |
| 96 | */ |
| 97 | void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask); |
| 98 | |
| 99 | /* |
| 100 | * Format handling: |
| 101 | */ |
| 102 | |
Jeykumar Sankaran | 4e49023 | 2018-02-13 12:42:44 -0500 | [diff] [blame] | 103 | /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ |
| 104 | const struct msm_format *(*get_format)(struct msm_kms *kms, |
| 105 | const uint32_t format, |
| 106 | const uint64_t modifiers); |
Jeykumar Sankaran | 25fdd59 | 2018-06-27 15:26:09 -0400 | [diff] [blame] | 107 | /* do format checking on format modified through fb_cmd2 modifiers */ |
| 108 | int (*check_modified_format)(const struct msm_kms *kms, |
| 109 | const struct msm_format *msm_fmt, |
| 110 | const struct drm_mode_fb_cmd2 *cmd, |
| 111 | struct drm_gem_object **bos); |
Rob Clark | 9f6b656 | 2019-08-29 09:45:14 -0700 | [diff] [blame] | 112 | |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 113 | /* misc: */ |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 114 | long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, |
| 115 | struct drm_encoder *encoder); |
Hai Li | 7a6dc95 | 2015-03-26 19:25:15 -0400 | [diff] [blame] | 116 | int (*set_split_display)(struct msm_kms *kms, |
| 117 | struct drm_encoder *encoder, |
| 118 | struct drm_encoder *slave_encoder, |
| 119 | bool is_cmd_mode); |
Archit Taneja | 9c9f6f8 | 2016-12-05 15:24:53 +0530 | [diff] [blame] | 120 | void (*set_encoder_mode)(struct msm_kms *kms, |
| 121 | struct drm_encoder *encoder, |
| 122 | bool cmd_mode); |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 123 | /* cleanup: */ |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 124 | void (*destroy)(struct msm_kms *kms); |
Rob Clark | bc5289e | 2016-10-26 14:06:55 -0400 | [diff] [blame] | 125 | #ifdef CONFIG_DEBUG_FS |
| 126 | /* debugfs: */ |
| 127 | int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor); |
Rob Clark | bc5289e | 2016-10-26 14:06:55 -0400 | [diff] [blame] | 128 | #endif |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 129 | }; |
| 130 | |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 131 | struct msm_kms; |
| 132 | |
| 133 | /* |
| 134 | * A per-crtc timer for pending async atomic flushes. Scheduled to expire |
| 135 | * shortly before vblank to flush pending async updates. |
| 136 | */ |
| 137 | struct msm_pending_timer { |
| 138 | struct hrtimer timer; |
Rob Clark | 363bcec | 2020-10-19 14:10:53 -0700 | [diff] [blame] | 139 | struct kthread_work work; |
| 140 | struct kthread_worker *worker; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 141 | struct msm_kms *kms; |
| 142 | unsigned crtc_idx; |
| 143 | }; |
| 144 | |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 145 | struct msm_kms { |
| 146 | const struct msm_kms_funcs *funcs; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 147 | struct drm_device *dev; |
Archit Taneja | a2b3a55 | 2016-05-18 15:06:03 +0530 | [diff] [blame] | 148 | |
| 149 | /* irq number to be passed on to drm_irq_install */ |
| 150 | int irq; |
Rob Clark | f59f62d | 2017-06-13 10:22:37 -0400 | [diff] [blame] | 151 | |
| 152 | /* mapper-id used to request GEM buffer mapped for scanout: */ |
Rob Clark | f59f62d | 2017-06-13 10:22:37 -0400 | [diff] [blame] | 153 | struct msm_gem_address_space *aspace; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * For async commit, where ->flush_commit() and later happens |
| 157 | * from the crtc's pending_timer close to end of the frame: |
| 158 | */ |
Krishna Manikandan | b3d9180 | 2020-10-16 19:40:43 +0530 | [diff] [blame] | 159 | struct mutex commit_lock[MAX_CRTCS]; |
Stephen Boyd | 6ec9351 | 2021-01-28 09:09:29 -0800 | [diff] [blame] | 160 | struct lock_class_key commit_lock_keys[MAX_CRTCS]; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 161 | unsigned pending_crtc_mask; |
| 162 | struct msm_pending_timer pending_timers[MAX_CRTCS]; |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 163 | }; |
| 164 | |
Rob Clark | ffe7111 | 2020-10-19 14:10:52 -0700 | [diff] [blame] | 165 | static inline int msm_kms_init(struct msm_kms *kms, |
Rob Clark | 9e0efa6 | 2013-11-30 17:24:22 -0500 | [diff] [blame] | 166 | const struct msm_kms_funcs *funcs) |
| 167 | { |
Rob Clark | 363bcec | 2020-10-19 14:10:53 -0700 | [diff] [blame] | 168 | unsigned i, ret; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 169 | |
Stephen Boyd | 6ec9351 | 2021-01-28 09:09:29 -0800 | [diff] [blame] | 170 | for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { |
| 171 | lockdep_register_key(&kms->commit_lock_keys[i]); |
| 172 | __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", |
| 173 | &kms->commit_lock_keys[i]); |
| 174 | } |
Krishna Manikandan | b3d9180 | 2020-10-16 19:40:43 +0530 | [diff] [blame] | 175 | |
Rob Clark | 9e0efa6 | 2013-11-30 17:24:22 -0500 | [diff] [blame] | 176 | kms->funcs = funcs; |
Rob Clark | 2d99ced | 2019-08-29 09:45:16 -0700 | [diff] [blame] | 177 | |
Rob Clark | 363bcec | 2020-10-19 14:10:53 -0700 | [diff] [blame] | 178 | for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) { |
| 179 | ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); |
| 180 | if (ret) { |
| 181 | return ret; |
| 182 | } |
| 183 | } |
Rob Clark | ffe7111 | 2020-10-19 14:10:52 -0700 | [diff] [blame] | 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | static inline void msm_kms_destroy(struct msm_kms *kms) |
| 189 | { |
Rob Clark | 363bcec | 2020-10-19 14:10:53 -0700 | [diff] [blame] | 190 | unsigned i; |
| 191 | |
| 192 | for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) |
| 193 | msm_atomic_destroy_pending_timer(&kms->pending_timers[i]); |
Rob Clark | 9e0efa6 | 2013-11-30 17:24:22 -0500 | [diff] [blame] | 194 | } |
| 195 | |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 196 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); |
Rob Clark | 06c0dd9 | 2013-11-30 17:51:47 -0500 | [diff] [blame] | 197 | struct msm_kms *mdp5_kms_init(struct drm_device *dev); |
Jeykumar Sankaran | 25fdd59 | 2018-06-27 15:26:09 -0400 | [diff] [blame] | 198 | struct msm_kms *dpu_kms_init(struct drm_device *dev); |
Rajesh Yadav | bc3220b | 2018-06-21 16:06:10 -0400 | [diff] [blame] | 199 | |
| 200 | struct msm_mdss_funcs { |
| 201 | int (*enable)(struct msm_mdss *mdss); |
| 202 | int (*disable)(struct msm_mdss *mdss); |
| 203 | void (*destroy)(struct drm_device *dev); |
| 204 | }; |
| 205 | |
| 206 | struct msm_mdss { |
| 207 | struct drm_device *dev; |
| 208 | const struct msm_mdss_funcs *funcs; |
| 209 | }; |
| 210 | |
| 211 | int mdp5_mdss_init(struct drm_device *dev); |
Jeykumar Sankaran | 25fdd59 | 2018-06-27 15:26:09 -0400 | [diff] [blame] | 212 | int dpu_mdss_init(struct drm_device *dev); |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 213 | |
Rob Clark | d4d2c60 | 2019-08-29 09:45:12 -0700 | [diff] [blame] | 214 | #define for_each_crtc_mask(dev, crtc, crtc_mask) \ |
| 215 | drm_for_each_crtc(crtc, dev) \ |
| 216 | for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) |
| 217 | |
Rob Clark | cb21f3f | 2020-10-20 15:26:00 -0700 | [diff] [blame] | 218 | #define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \ |
| 219 | drm_for_each_crtc_reverse(crtc, dev) \ |
| 220 | for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) |
| 221 | |
Rob Clark | dd2da6e | 2013-11-30 16:12:10 -0500 | [diff] [blame] | 222 | #endif /* __MSM_KMS_H__ */ |