blob: cce5bc97b8637b96a44dc1fc19ea4e238753839e [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04002 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Rob Clarkc8afe682013-06-26 12:44:06 -04003 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -040019#include <linux/kthread.h>
20#include <uapi/linux/sched/types.h>
Russell King97ac0e42016-10-19 11:28:27 +010021#include <drm/drm_of.h>
22
Rob Clarkc8afe682013-06-26 12:44:06 -040023#include "msm_drv.h"
Rob Clarkedcd60c2016-03-16 12:56:12 -040024#include "msm_debugfs.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040025#include "msm_fence.h"
Rob Clarkf05c83e2018-11-29 10:27:22 -050026#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040027#include "msm_gpu.h"
Rob Clarkdd2da6e2013-11-30 16:12:10 -050028#include "msm_kms.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040029
Rob Clarka8d854c2016-06-01 14:02:02 -040030
31/*
32 * MSM driver version:
33 * - 1.0.0 - initial interface
34 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
Rob Clark7a3bcc02016-09-16 18:37:44 -040035 * - 1.2.0 - adds explicit fence support for submit ioctl
Jordan Crousef7de1542017-10-20 11:06:55 -060036 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
37 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
38 * MSM_GEM_INFO ioctl.
Rob Clark1fed8df2018-11-29 10:30:04 -050039 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
40 * GEM object's debug name
Rob Clarka8d854c2016-06-01 14:02:02 -040041 */
42#define MSM_VERSION_MAJOR 1
Rob Clark1fed8df2018-11-29 10:30:04 -050043#define MSM_VERSION_MINOR 4
Rob Clarka8d854c2016-06-01 14:02:02 -040044#define MSM_VERSION_PATCHLEVEL 0
45
Rob Clarkc8afe682013-06-26 12:44:06 -040046static const struct drm_mode_config_funcs mode_config_funcs = {
47 .fb_create = msm_framebuffer_create,
Noralf Trønnes4ccbc6e2017-12-05 19:24:59 +010048 .output_poll_changed = drm_fb_helper_output_poll_changed,
Rob Clark1f920172017-10-25 12:30:51 -040049 .atomic_check = drm_atomic_helper_check,
Sean Pauld14659f2018-02-28 14:19:05 -050050 .atomic_commit = drm_atomic_helper_commit,
51};
52
53static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
54 .atomic_commit_tail = msm_atomic_commit_tail,
Rob Clarkc8afe682013-06-26 12:44:06 -040055};
56
Rob Clarkc8afe682013-06-26 12:44:06 -040057#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
58static bool reglog = false;
59MODULE_PARM_DESC(reglog, "Enable register read/write logging");
60module_param(reglog, bool, 0600);
61#else
62#define reglog 0
63#endif
64
Archit Tanejaa9ee34b2015-07-13 12:12:07 +053065#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarke90dfec2015-01-30 17:05:41 -050066static bool fbdev = true;
67MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
68module_param(fbdev, bool, 0600);
69#endif
70
Rob Clark3a10ba82014-09-08 14:24:57 -040071static char *vram = "16m";
Rob Clark4313c7442016-02-03 14:02:04 -050072MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
Rob Clark871d8122013-11-16 12:56:06 -050073module_param(vram, charp, 0);
74
Rob Clark06d9f562016-11-05 11:08:12 -040075bool dumpstate = false;
76MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
77module_param(dumpstate, bool, 0600);
78
Rob Clarkba4dd712017-07-06 16:33:44 -040079static bool modeset = true;
80MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
81module_param(modeset, bool, 0600);
82
Rob Clark060530f2014-03-03 14:19:12 -050083/*
84 * Util/helpers:
85 */
86
Jordan Crouse8e54eea2018-08-06 11:33:21 -060087int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
88{
89 struct property *prop;
90 const char *name;
91 struct clk_bulk_data *local;
92 int i = 0, ret, count;
93
94 count = of_property_count_strings(dev->of_node, "clock-names");
95 if (count < 1)
96 return 0;
97
98 local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
99 count, GFP_KERNEL);
100 if (!local)
101 return -ENOMEM;
102
103 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
104 local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
105 if (!local[i].id) {
106 devm_kfree(dev, local);
107 return -ENOMEM;
108 }
109
110 i++;
111 }
112
113 ret = devm_clk_bulk_get(dev, count, local);
114
115 if (ret) {
116 for (i = 0; i < count; i++)
117 devm_kfree(dev, (void *) local[i].id);
118 devm_kfree(dev, local);
119
120 return ret;
121 }
122
123 *bulk = local;
124 return count;
125}
126
127struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
128 const char *name)
129{
130 int i;
131 char n[32];
132
133 snprintf(n, sizeof(n), "%s_clk", name);
134
135 for (i = 0; bulk && i < count; i++) {
136 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
137 return bulk[i].clk;
138 }
139
140
141 return NULL;
142}
143
Rob Clark720c3bb2017-01-30 11:30:58 -0500144struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
145{
146 struct clk *clk;
147 char name2[32];
148
149 clk = devm_clk_get(&pdev->dev, name);
150 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
151 return clk;
152
153 snprintf(name2, sizeof(name2), "%s_clk", name);
154
155 clk = devm_clk_get(&pdev->dev, name2);
156 if (!IS_ERR(clk))
157 dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
158 "\"%s\" instead of \"%s\"\n", name, name2);
159
160 return clk;
161}
162
Rob Clarkc8afe682013-06-26 12:44:06 -0400163void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
164 const char *dbgname)
165{
166 struct resource *res;
167 unsigned long size;
168 void __iomem *ptr;
169
170 if (name)
171 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
172 else
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174
175 if (!res) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530176 DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400177 return ERR_PTR(-EINVAL);
178 }
179
180 size = resource_size(res);
181
182 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
183 if (!ptr) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530184 DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400185 return ERR_PTR(-ENOMEM);
186 }
187
188 if (reglog)
Thierry Redingfc99f972015-04-09 16:39:51 +0200189 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
Rob Clarkc8afe682013-06-26 12:44:06 -0400190
191 return ptr;
192}
193
194void msm_writel(u32 data, void __iomem *addr)
195{
196 if (reglog)
Thierry Redingfc99f972015-04-09 16:39:51 +0200197 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
Rob Clarkc8afe682013-06-26 12:44:06 -0400198 writel(data, addr);
199}
200
201u32 msm_readl(const void __iomem *addr)
202{
203 u32 val = readl(addr);
204 if (reglog)
Joe Perches8dfe1622017-02-28 04:55:54 -0800205 pr_err("IO:R %p %08x\n", addr, val);
Rob Clarkc8afe682013-06-26 12:44:06 -0400206 return val;
207}
208
Hai Li78b1d472015-07-27 13:49:45 -0400209struct vblank_event {
210 struct list_head node;
211 int crtc_id;
212 bool enable;
213};
214
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400215static void vblank_ctrl_worker(struct kthread_work *work)
Hai Li78b1d472015-07-27 13:49:45 -0400216{
217 struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
218 struct msm_vblank_ctrl, work);
219 struct msm_drm_private *priv = container_of(vbl_ctrl,
220 struct msm_drm_private, vblank_ctrl);
221 struct msm_kms *kms = priv->kms;
222 struct vblank_event *vbl_ev, *tmp;
223 unsigned long flags;
224
225 spin_lock_irqsave(&vbl_ctrl->lock, flags);
226 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
227 list_del(&vbl_ev->node);
228 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
229
230 if (vbl_ev->enable)
231 kms->funcs->enable_vblank(kms,
232 priv->crtcs[vbl_ev->crtc_id]);
233 else
234 kms->funcs->disable_vblank(kms,
235 priv->crtcs[vbl_ev->crtc_id]);
236
237 kfree(vbl_ev);
238
239 spin_lock_irqsave(&vbl_ctrl->lock, flags);
240 }
241
242 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
243}
244
245static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
246 int crtc_id, bool enable)
247{
248 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
249 struct vblank_event *vbl_ev;
250 unsigned long flags;
251
252 vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
253 if (!vbl_ev)
254 return -ENOMEM;
255
256 vbl_ev->crtc_id = crtc_id;
257 vbl_ev->enable = enable;
258
259 spin_lock_irqsave(&vbl_ctrl->lock, flags);
260 list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
261 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
262
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400263 kthread_queue_work(&priv->disp_thread[crtc_id].worker,
264 &vbl_ctrl->work);
Hai Li78b1d472015-07-27 13:49:45 -0400265
266 return 0;
267}
268
Archit Taneja2b669872016-05-02 11:05:54 +0530269static int msm_drm_uninit(struct device *dev)
Rob Clarkc8afe682013-06-26 12:44:06 -0400270{
Archit Taneja2b669872016-05-02 11:05:54 +0530271 struct platform_device *pdev = to_platform_device(dev);
272 struct drm_device *ddev = platform_get_drvdata(pdev);
273 struct msm_drm_private *priv = ddev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400274 struct msm_kms *kms = priv->kms;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400275 struct msm_mdss *mdss = priv->mdss;
Hai Li78b1d472015-07-27 13:49:45 -0400276 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
277 struct vblank_event *vbl_ev, *tmp;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400278 int i;
Hai Li78b1d472015-07-27 13:49:45 -0400279
280 /* We must cancel and cleanup any pending vblank enable/disable
281 * work before drm_irq_uninstall() to avoid work re-enabling an
282 * irq after uninstall has disabled it.
283 */
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400284 kthread_flush_work(&vbl_ctrl->work);
Hai Li78b1d472015-07-27 13:49:45 -0400285 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
286 list_del(&vbl_ev->node);
287 kfree(vbl_ev);
288 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400289
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400290 /* clean up display commit/event worker threads */
291 for (i = 0; i < priv->num_crtcs; i++) {
292 if (priv->disp_thread[i].thread) {
293 kthread_flush_worker(&priv->disp_thread[i].worker);
294 kthread_stop(priv->disp_thread[i].thread);
295 priv->disp_thread[i].thread = NULL;
296 }
297
298 if (priv->event_thread[i].thread) {
299 kthread_flush_worker(&priv->event_thread[i].worker);
300 kthread_stop(priv->event_thread[i].thread);
301 priv->event_thread[i].thread = NULL;
302 }
303 }
304
Rob Clark68209392016-05-17 16:19:32 -0400305 msm_gem_shrinker_cleanup(ddev);
306
Archit Taneja2b669872016-05-02 11:05:54 +0530307 drm_kms_helper_poll_fini(ddev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530308
Archit Taneja2b669872016-05-02 11:05:54 +0530309 drm_dev_unregister(ddev);
Archit Taneja8208ed92016-05-02 11:05:53 +0530310
Noralf Trønnes85eac472017-03-07 21:49:22 +0100311 msm_perf_debugfs_cleanup(priv);
312 msm_rd_debugfs_cleanup(priv);
313
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530314#ifdef CONFIG_DRM_FBDEV_EMULATION
315 if (fbdev && priv->fbdev)
Archit Taneja2b669872016-05-02 11:05:54 +0530316 msm_fbdev_free(ddev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530317#endif
Daniel Vetter3ea4b1e2018-10-04 22:24:36 +0200318 drm_atomic_helper_shutdown(ddev);
Archit Taneja2b669872016-05-02 11:05:54 +0530319 drm_mode_config_cleanup(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400320
Archit Taneja2b669872016-05-02 11:05:54 +0530321 pm_runtime_get_sync(dev);
322 drm_irq_uninstall(ddev);
323 pm_runtime_put_sync(dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400324
325 flush_workqueue(priv->wq);
326 destroy_workqueue(priv->wq);
327
Archit Taneja16976082016-11-03 17:36:18 +0530328 if (kms && kms->funcs)
Rob Clarkc8afe682013-06-26 12:44:06 -0400329 kms->funcs->destroy(kms);
Rob Clarkc8afe682013-06-26 12:44:06 -0400330
Rob Clark871d8122013-11-16 12:56:06 -0500331 if (priv->vram.paddr) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700332 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
Rob Clark871d8122013-11-16 12:56:06 -0500333 drm_mm_takedown(&priv->vram.mm);
Archit Taneja2b669872016-05-02 11:05:54 +0530334 dma_free_attrs(dev, priv->vram.size, NULL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700335 priv->vram.paddr, attrs);
Rob Clark871d8122013-11-16 12:56:06 -0500336 }
337
Archit Taneja2b669872016-05-02 11:05:54 +0530338 component_unbind_all(dev, ddev);
Rob Clark060530f2014-03-03 14:19:12 -0500339
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400340 if (mdss && mdss->funcs)
341 mdss->funcs->destroy(ddev);
Archit Taneja0a6030d2016-05-08 21:36:28 +0530342
Archit Taneja2b669872016-05-02 11:05:54 +0530343 ddev->dev_private = NULL;
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200344 drm_dev_put(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400345
346 kfree(priv);
347
348 return 0;
349}
350
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400351#define KMS_MDP4 4
352#define KMS_MDP5 5
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400353#define KMS_DPU 3
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400354
Rob Clark06c0dd92013-11-30 17:51:47 -0500355static int get_mdp_ver(struct platform_device *pdev)
356{
Rob Clark06c0dd92013-11-30 17:51:47 -0500357 struct device *dev = &pdev->dev;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530358
359 return (int) (unsigned long) of_device_get_match_data(dev);
Rob Clark06c0dd92013-11-30 17:51:47 -0500360}
361
Rob Clark072f1f92015-03-03 15:04:25 -0500362#include <linux/of_address.h>
363
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500364static int msm_init_vram(struct drm_device *dev)
Rob Clarkc8afe682013-06-26 12:44:06 -0400365{
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500366 struct msm_drm_private *priv = dev->dev_private;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530367 struct device_node *node;
Rob Clark072f1f92015-03-03 15:04:25 -0500368 unsigned long size = 0;
369 int ret = 0;
370
Rob Clark072f1f92015-03-03 15:04:25 -0500371 /* In the device-tree world, we could have a 'memory-region'
372 * phandle, which gives us a link to our "vram". Allocating
373 * is all nicely abstracted behind the dma api, but we need
374 * to know the entire size to allocate it all in one go. There
375 * are two cases:
376 * 1) device with no IOMMU, in which case we need exclusive
377 * access to a VRAM carveout big enough for all gpu
378 * buffers
379 * 2) device with IOMMU, but where the bootloader puts up
380 * a splash screen. In this case, the VRAM carveout
381 * need only be large enough for fbdev fb. But we need
382 * exclusive access to the buffer to avoid the kernel
383 * using those pages for other purposes (which appears
384 * as corruption on screen before we have a chance to
385 * load and do initial modeset)
386 */
Rob Clark072f1f92015-03-03 15:04:25 -0500387
388 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
389 if (node) {
390 struct resource r;
391 ret = of_address_to_resource(node, 0, &r);
Peter Chen2ca41c172016-07-04 16:49:50 +0800392 of_node_put(node);
Rob Clark072f1f92015-03-03 15:04:25 -0500393 if (ret)
394 return ret;
395 size = r.end - r.start;
Thierry Redingfc99f972015-04-09 16:39:51 +0200396 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
Rob Clarkc8afe682013-06-26 12:44:06 -0400397
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530398 /* if we have no IOMMU, then we need to use carveout allocator.
399 * Grab the entire CMA chunk carved out in early startup in
400 * mach-msm:
401 */
402 } else if (!iommu_present(&platform_bus_type)) {
Rob Clark072f1f92015-03-03 15:04:25 -0500403 DRM_INFO("using %s VRAM carveout\n", vram);
404 size = memparse(vram, NULL);
405 }
406
407 if (size) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700408 unsigned long attrs = 0;
Rob Clark871d8122013-11-16 12:56:06 -0500409 void *p;
410
Rob Clark871d8122013-11-16 12:56:06 -0500411 priv->vram.size = size;
412
413 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600414 spin_lock_init(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -0500415
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700416 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
417 attrs |= DMA_ATTR_WRITE_COMBINE;
Rob Clark871d8122013-11-16 12:56:06 -0500418
419 /* note that for no-kernel-mapping, the vaddr returned
420 * is bogus, but non-null if allocation succeeded:
421 */
422 p = dma_alloc_attrs(dev->dev, size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700423 &priv->vram.paddr, GFP_KERNEL, attrs);
Rob Clark871d8122013-11-16 12:56:06 -0500424 if (!p) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530425 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
Rob Clark871d8122013-11-16 12:56:06 -0500426 priv->vram.paddr = 0;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500427 return -ENOMEM;
Rob Clark871d8122013-11-16 12:56:06 -0500428 }
429
Mamta Shukla6a41da12018-10-20 23:19:26 +0530430 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
Rob Clark871d8122013-11-16 12:56:06 -0500431 (uint32_t)priv->vram.paddr,
432 (uint32_t)(priv->vram.paddr + size));
433 }
434
Rob Clark072f1f92015-03-03 15:04:25 -0500435 return ret;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500436}
437
Archit Taneja2b669872016-05-02 11:05:54 +0530438static int msm_drm_init(struct device *dev, struct drm_driver *drv)
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500439{
Archit Taneja2b669872016-05-02 11:05:54 +0530440 struct platform_device *pdev = to_platform_device(dev);
441 struct drm_device *ddev;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500442 struct msm_drm_private *priv;
443 struct msm_kms *kms;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400444 struct msm_mdss *mdss;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400445 int ret, i;
446 struct sched_param param;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500447
Archit Taneja2b669872016-05-02 11:05:54 +0530448 ddev = drm_dev_alloc(drv, dev);
Tom Gundersen0f288602016-09-21 16:59:19 +0200449 if (IS_ERR(ddev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530450 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
Tom Gundersen0f288602016-09-21 16:59:19 +0200451 return PTR_ERR(ddev);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500452 }
453
Archit Taneja2b669872016-05-02 11:05:54 +0530454 platform_set_drvdata(pdev, ddev);
Archit Taneja2b669872016-05-02 11:05:54 +0530455
456 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
457 if (!priv) {
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400458 ret = -ENOMEM;
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200459 goto err_put_drm_dev;
Archit Taneja2b669872016-05-02 11:05:54 +0530460 }
461
462 ddev->dev_private = priv;
Rob Clark68209392016-05-17 16:19:32 -0400463 priv->dev = ddev;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500464
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400465 switch (get_mdp_ver(pdev)) {
466 case KMS_MDP5:
467 ret = mdp5_mdss_init(ddev);
468 break;
469 case KMS_DPU:
470 ret = dpu_mdss_init(ddev);
471 break;
472 default:
473 ret = 0;
474 break;
475 }
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400476 if (ret)
477 goto err_free_priv;
Archit Taneja0a6030d2016-05-08 21:36:28 +0530478
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400479 mdss = priv->mdss;
480
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500481 priv->wq = alloc_ordered_workqueue("msm", 0);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500482
483 INIT_LIST_HEAD(&priv->inactive_list);
Hai Li78b1d472015-07-27 13:49:45 -0400484 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400485 kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
Hai Li78b1d472015-07-27 13:49:45 -0400486 spin_lock_init(&priv->vblank_ctrl.lock);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500487
Archit Taneja2b669872016-05-02 11:05:54 +0530488 drm_mode_config_init(ddev);
Rob Clark060530f2014-03-03 14:19:12 -0500489
490 /* Bind all our sub-components: */
Archit Taneja2b669872016-05-02 11:05:54 +0530491 ret = component_bind_all(dev, ddev);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400492 if (ret)
493 goto err_destroy_mdss;
Rob Clark060530f2014-03-03 14:19:12 -0500494
Archit Taneja2b669872016-05-02 11:05:54 +0530495 ret = msm_init_vram(ddev);
Rob Clark13f15562015-05-07 15:20:13 -0400496 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400497 goto err_msm_uninit;
Rob Clark13f15562015-05-07 15:20:13 -0400498
Rob Clark68209392016-05-17 16:19:32 -0400499 msm_gem_shrinker_init(ddev);
500
Rob Clark06c0dd92013-11-30 17:51:47 -0500501 switch (get_mdp_ver(pdev)) {
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400502 case KMS_MDP4:
Archit Taneja2b669872016-05-02 11:05:54 +0530503 kms = mdp4_kms_init(ddev);
Archit Taneja0a6030d2016-05-08 21:36:28 +0530504 priv->kms = kms;
Rob Clark06c0dd92013-11-30 17:51:47 -0500505 break;
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400506 case KMS_MDP5:
Archit Taneja392ae6e2016-06-14 18:24:54 +0530507 kms = mdp5_kms_init(ddev);
Rob Clark06c0dd92013-11-30 17:51:47 -0500508 break;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400509 case KMS_DPU:
510 kms = dpu_kms_init(ddev);
511 priv->kms = kms;
512 break;
Rob Clark06c0dd92013-11-30 17:51:47 -0500513 default:
514 kms = ERR_PTR(-ENODEV);
515 break;
516 }
517
Rob Clarkc8afe682013-06-26 12:44:06 -0400518 if (IS_ERR(kms)) {
519 /*
520 * NOTE: once we have GPU support, having no kms should not
521 * be considered fatal.. ideally we would still support gpu
522 * and (for example) use dmabuf/prime to share buffers with
523 * imx drm driver on iMX5
524 */
Mamta Shukla6a41da12018-10-20 23:19:26 +0530525 DRM_DEV_ERROR(dev, "failed to load kms\n");
Thomas Meyere4826a92013-09-16 23:19:54 +0200526 ret = PTR_ERR(kms);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400527 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400528 }
529
Jeykumar Sankaranbb676df2018-06-11 14:13:20 -0700530 /* Enable normalization of plane zpos */
531 ddev->mode_config.normalize_zpos = true;
532
Rob Clarkc8afe682013-06-26 12:44:06 -0400533 if (kms) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400534 ret = kms->funcs->hw_init(kms);
535 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530536 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400537 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400538 }
539 }
540
Archit Taneja2b669872016-05-02 11:05:54 +0530541 ddev->mode_config.funcs = &mode_config_funcs;
Sean Pauld14659f2018-02-28 14:19:05 -0500542 ddev->mode_config.helper_private = &mode_config_helper_funcs;
Rob Clarkc8afe682013-06-26 12:44:06 -0400543
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400544 /**
545 * this priority was found during empiric testing to have appropriate
546 * realtime scheduling to process display updates and interact with
547 * other real time and normal priority task
548 */
549 param.sched_priority = 16;
550 for (i = 0; i < priv->num_crtcs; i++) {
551
552 /* initialize display thread */
553 priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
554 kthread_init_worker(&priv->disp_thread[i].worker);
555 priv->disp_thread[i].dev = ddev;
556 priv->disp_thread[i].thread =
557 kthread_run(kthread_worker_fn,
558 &priv->disp_thread[i].worker,
559 "crtc_commit:%d", priv->disp_thread[i].crtc_id);
560 ret = sched_setscheduler(priv->disp_thread[i].thread,
561 SCHED_FIFO, &param);
562 if (ret)
563 pr_warn("display thread priority update failed: %d\n",
564 ret);
565
566 if (IS_ERR(priv->disp_thread[i].thread)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530567 DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400568 priv->disp_thread[i].thread = NULL;
569 }
570
571 /* initialize event thread */
572 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
573 kthread_init_worker(&priv->event_thread[i].worker);
574 priv->event_thread[i].dev = ddev;
575 priv->event_thread[i].thread =
576 kthread_run(kthread_worker_fn,
577 &priv->event_thread[i].worker,
578 "crtc_event:%d", priv->event_thread[i].crtc_id);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530579
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400580 /**
581 * event thread should also run at same priority as disp_thread
582 * because it is handling frame_done events. A lower priority
583 * event thread and higher priority disp_thread can causes
584 * frame_pending counters beyond 2. This can lead to commit
585 * failure at crtc commit level.
586 */
587 ret = sched_setscheduler(priv->event_thread[i].thread,
588 SCHED_FIFO, &param);
589 if (ret)
590 pr_warn("display event thread priority update failed: %d\n",
591 ret);
592
593 if (IS_ERR(priv->event_thread[i].thread)) {
594 dev_err(dev, "failed to create crtc_event kthread\n");
595 priv->event_thread[i].thread = NULL;
596 }
597
598 if ((!priv->disp_thread[i].thread) ||
599 !priv->event_thread[i].thread) {
600 /* clean up previously created threads if any */
601 for ( ; i >= 0; i--) {
602 if (priv->disp_thread[i].thread) {
603 kthread_stop(
604 priv->disp_thread[i].thread);
605 priv->disp_thread[i].thread = NULL;
606 }
607
608 if (priv->event_thread[i].thread) {
609 kthread_stop(
610 priv->event_thread[i].thread);
611 priv->event_thread[i].thread = NULL;
612 }
613 }
614 goto err_msm_uninit;
615 }
616 }
617
Archit Taneja2b669872016-05-02 11:05:54 +0530618 ret = drm_vblank_init(ddev, priv->num_crtcs);
Rob Clarkc8afe682013-06-26 12:44:06 -0400619 if (ret < 0) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530620 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400621 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400622 }
623
Archit Tanejaa2b3a552016-05-18 15:06:03 +0530624 if (kms) {
625 pm_runtime_get_sync(dev);
626 ret = drm_irq_install(ddev, kms->irq);
627 pm_runtime_put_sync(dev);
628 if (ret < 0) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530629 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400630 goto err_msm_uninit;
Archit Tanejaa2b3a552016-05-18 15:06:03 +0530631 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400632 }
633
Archit Taneja2b669872016-05-02 11:05:54 +0530634 ret = drm_dev_register(ddev, 0);
Rob Clarka7d3c952014-05-30 14:47:38 -0400635 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400636 goto err_msm_uninit;
Rob Clarka7d3c952014-05-30 14:47:38 -0400637
Archit Taneja2b669872016-05-02 11:05:54 +0530638 drm_mode_config_reset(ddev);
639
640#ifdef CONFIG_DRM_FBDEV_EMULATION
641 if (fbdev)
642 priv->fbdev = msm_fbdev_init(ddev);
643#endif
644
645 ret = msm_debugfs_late_init(ddev);
646 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400647 goto err_msm_uninit;
Archit Taneja2b669872016-05-02 11:05:54 +0530648
649 drm_kms_helper_poll_init(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400650
651 return 0;
652
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400653err_msm_uninit:
Archit Taneja2b669872016-05-02 11:05:54 +0530654 msm_drm_uninit(dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400655 return ret;
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400656err_destroy_mdss:
657 if (mdss && mdss->funcs)
658 mdss->funcs->destroy(ddev);
659err_free_priv:
660 kfree(priv);
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200661err_put_drm_dev:
662 drm_dev_put(ddev);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400663 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400664}
665
Archit Taneja2b669872016-05-02 11:05:54 +0530666/*
667 * DRM operations:
668 */
669
Rob Clark7198e6b2013-07-19 12:59:32 -0400670static void load_gpu(struct drm_device *dev)
671{
Rob Clarka1ad3522014-07-11 11:59:22 -0400672 static DEFINE_MUTEX(init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400673 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400674
Rob Clarka1ad3522014-07-11 11:59:22 -0400675 mutex_lock(&init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400676
Rob Clarke2550b72014-09-05 13:30:27 -0400677 if (!priv->gpu)
678 priv->gpu = adreno_load_gpu(dev);
Rob Clarka1ad3522014-07-11 11:59:22 -0400679
Rob Clarka1ad3522014-07-11 11:59:22 -0400680 mutex_unlock(&init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400681}
682
Jordan Crousef97deca2017-10-20 11:06:57 -0600683static int context_init(struct drm_device *dev, struct drm_file *file)
Rob Clark7198e6b2013-07-19 12:59:32 -0400684{
685 struct msm_file_private *ctx;
686
Rob Clark7198e6b2013-07-19 12:59:32 -0400687 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
688 if (!ctx)
689 return -ENOMEM;
690
Jordan Crousef97deca2017-10-20 11:06:57 -0600691 msm_submitqueue_init(dev, ctx);
Jordan Crousef7de1542017-10-20 11:06:55 -0600692
Rob Clark7198e6b2013-07-19 12:59:32 -0400693 file->driver_priv = ctx;
694
695 return 0;
696}
697
Jordan Crousef7de1542017-10-20 11:06:55 -0600698static int msm_open(struct drm_device *dev, struct drm_file *file)
699{
700 /* For now, load gpu on open.. to avoid the requirement of having
701 * firmware in the initrd.
702 */
703 load_gpu(dev);
704
Jordan Crousef97deca2017-10-20 11:06:57 -0600705 return context_init(dev, file);
Jordan Crousef7de1542017-10-20 11:06:55 -0600706}
707
708static void context_close(struct msm_file_private *ctx)
709{
710 msm_submitqueue_close(ctx);
711 kfree(ctx);
712}
713
Daniel Vetter94df1452017-03-08 15:12:46 +0100714static void msm_postclose(struct drm_device *dev, struct drm_file *file)
Rob Clarkc8afe682013-06-26 12:44:06 -0400715{
716 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400717 struct msm_file_private *ctx = file->driver_priv;
Rob Clark7198e6b2013-07-19 12:59:32 -0400718
Rob Clark7198e6b2013-07-19 12:59:32 -0400719 mutex_lock(&dev->struct_mutex);
720 if (ctx == priv->lastctx)
721 priv->lastctx = NULL;
722 mutex_unlock(&dev->struct_mutex);
723
Jordan Crousef7de1542017-10-20 11:06:55 -0600724 context_close(ctx);
Rob Clarkc8afe682013-06-26 12:44:06 -0400725}
726
Daniel Vettere9f0d762013-12-11 11:34:42 +0100727static irqreturn_t msm_irq(int irq, void *arg)
Rob Clarkc8afe682013-06-26 12:44:06 -0400728{
729 struct drm_device *dev = arg;
730 struct msm_drm_private *priv = dev->dev_private;
731 struct msm_kms *kms = priv->kms;
732 BUG_ON(!kms);
733 return kms->funcs->irq(kms);
734}
735
736static void msm_irq_preinstall(struct drm_device *dev)
737{
738 struct msm_drm_private *priv = dev->dev_private;
739 struct msm_kms *kms = priv->kms;
740 BUG_ON(!kms);
741 kms->funcs->irq_preinstall(kms);
742}
743
744static int msm_irq_postinstall(struct drm_device *dev)
745{
746 struct msm_drm_private *priv = dev->dev_private;
747 struct msm_kms *kms = priv->kms;
748 BUG_ON(!kms);
749 return kms->funcs->irq_postinstall(kms);
750}
751
752static void msm_irq_uninstall(struct drm_device *dev)
753{
754 struct msm_drm_private *priv = dev->dev_private;
755 struct msm_kms *kms = priv->kms;
756 BUG_ON(!kms);
757 kms->funcs->irq_uninstall(kms);
758}
759
Thierry Reding88e72712015-09-24 18:35:31 +0200760static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
Rob Clarkc8afe682013-06-26 12:44:06 -0400761{
762 struct msm_drm_private *priv = dev->dev_private;
763 struct msm_kms *kms = priv->kms;
764 if (!kms)
765 return -ENXIO;
Thierry Reding88e72712015-09-24 18:35:31 +0200766 DBG("dev=%p, crtc=%u", dev, pipe);
767 return vblank_ctrl_queue_work(priv, pipe, true);
Rob Clarkc8afe682013-06-26 12:44:06 -0400768}
769
Thierry Reding88e72712015-09-24 18:35:31 +0200770static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
Rob Clarkc8afe682013-06-26 12:44:06 -0400771{
772 struct msm_drm_private *priv = dev->dev_private;
773 struct msm_kms *kms = priv->kms;
774 if (!kms)
775 return;
Thierry Reding88e72712015-09-24 18:35:31 +0200776 DBG("dev=%p, crtc=%u", dev, pipe);
777 vblank_ctrl_queue_work(priv, pipe, false);
Rob Clarkc8afe682013-06-26 12:44:06 -0400778}
779
780/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400781 * DRM ioctls:
782 */
783
784static int msm_ioctl_get_param(struct drm_device *dev, void *data,
785 struct drm_file *file)
786{
787 struct msm_drm_private *priv = dev->dev_private;
788 struct drm_msm_param *args = data;
789 struct msm_gpu *gpu;
790
791 /* for now, we just have 3d pipe.. eventually this would need to
792 * be more clever to dispatch to appropriate gpu module:
793 */
794 if (args->pipe != MSM_PIPE_3D0)
795 return -EINVAL;
796
797 gpu = priv->gpu;
798
799 if (!gpu)
800 return -ENXIO;
801
802 return gpu->funcs->get_param(gpu, args->param, &args->value);
803}
804
805static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
806 struct drm_file *file)
807{
808 struct drm_msm_gem_new *args = data;
Rob Clark93ddb0d2014-03-03 09:42:33 -0500809
810 if (args->flags & ~MSM_BO_FLAGS) {
811 DRM_ERROR("invalid flags: %08x\n", args->flags);
812 return -EINVAL;
813 }
814
Rob Clark7198e6b2013-07-19 12:59:32 -0400815 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700816 args->flags, &args->handle, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400817}
818
Rob Clark56c2da82015-05-11 11:50:03 -0400819static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
820{
821 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
822}
Rob Clark7198e6b2013-07-19 12:59:32 -0400823
824static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
825 struct drm_file *file)
826{
827 struct drm_msm_gem_cpu_prep *args = data;
828 struct drm_gem_object *obj;
Rob Clark56c2da82015-05-11 11:50:03 -0400829 ktime_t timeout = to_ktime(args->timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400830 int ret;
831
Rob Clark93ddb0d2014-03-03 09:42:33 -0500832 if (args->op & ~MSM_PREP_FLAGS) {
833 DRM_ERROR("invalid op: %08x\n", args->op);
834 return -EINVAL;
835 }
836
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100837 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400838 if (!obj)
839 return -ENOENT;
840
Rob Clark56c2da82015-05-11 11:50:03 -0400841 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400842
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100843 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400844
845 return ret;
846}
847
848static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
849 struct drm_file *file)
850{
851 struct drm_msm_gem_cpu_fini *args = data;
852 struct drm_gem_object *obj;
853 int ret;
854
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100855 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400856 if (!obj)
857 return -ENOENT;
858
859 ret = msm_gem_cpu_fini(obj);
860
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100861 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400862
863 return ret;
864}
865
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600866static int msm_ioctl_gem_info_iova(struct drm_device *dev,
867 struct drm_gem_object *obj, uint64_t *iova)
868{
869 struct msm_drm_private *priv = dev->dev_private;
870
871 if (!priv->gpu)
872 return -EINVAL;
873
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700874 /*
875 * Don't pin the memory here - just get an address so that userspace can
876 * be productive
877 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400878 return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600879}
880
Rob Clark7198e6b2013-07-19 12:59:32 -0400881static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
882 struct drm_file *file)
883{
884 struct drm_msm_gem_info *args = data;
885 struct drm_gem_object *obj;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500886 struct msm_gem_object *msm_obj;
887 int i, ret = 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400888
Rob Clark789d2e52018-11-29 09:54:42 -0500889 if (args->pad)
Rob Clark7198e6b2013-07-19 12:59:32 -0400890 return -EINVAL;
891
Rob Clark789d2e52018-11-29 09:54:42 -0500892 switch (args->info) {
893 case MSM_INFO_GET_OFFSET:
894 case MSM_INFO_GET_IOVA:
895 /* value returned as immediate, not pointer, so len==0: */
896 if (args->len)
897 return -EINVAL;
898 break;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500899 case MSM_INFO_SET_NAME:
900 case MSM_INFO_GET_NAME:
901 break;
Rob Clark789d2e52018-11-29 09:54:42 -0500902 default:
903 return -EINVAL;
904 }
905
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100906 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400907 if (!obj)
908 return -ENOENT;
909
Rob Clarkf05c83e2018-11-29 10:27:22 -0500910 msm_obj = to_msm_bo(obj);
911
Rob Clark789d2e52018-11-29 09:54:42 -0500912 switch (args->info) {
913 case MSM_INFO_GET_OFFSET:
914 args->value = msm_gem_mmap_offset(obj);
915 break;
916 case MSM_INFO_GET_IOVA:
917 ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
918 break;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500919 case MSM_INFO_SET_NAME:
920 /* length check should leave room for terminating null: */
921 if (args->len >= sizeof(msm_obj->name)) {
922 ret = -EINVAL;
923 break;
924 }
925 ret = copy_from_user(msm_obj->name,
926 u64_to_user_ptr(args->value), args->len);
927 msm_obj->name[args->len] = '\0';
928 for (i = 0; i < args->len; i++) {
929 if (!isprint(msm_obj->name[i])) {
930 msm_obj->name[i] = '\0';
931 break;
932 }
933 }
934 break;
935 case MSM_INFO_GET_NAME:
936 if (args->value && (args->len < strlen(msm_obj->name))) {
937 ret = -EINVAL;
938 break;
939 }
940 args->len = strlen(msm_obj->name);
941 if (args->value) {
942 ret = copy_to_user(u64_to_user_ptr(args->value),
943 msm_obj->name, args->len);
944 }
945 break;
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600946 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400947
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100948 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400949
950 return ret;
951}
952
953static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
954 struct drm_file *file)
955{
Rob Clarkca762a82016-03-15 17:22:13 -0400956 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400957 struct drm_msm_wait_fence *args = data;
Rob Clark56c2da82015-05-11 11:50:03 -0400958 ktime_t timeout = to_ktime(args->timeout);
Jordan Crousef97deca2017-10-20 11:06:57 -0600959 struct msm_gpu_submitqueue *queue;
960 struct msm_gpu *gpu = priv->gpu;
961 int ret;
Rob Clark93ddb0d2014-03-03 09:42:33 -0500962
963 if (args->pad) {
964 DRM_ERROR("invalid pad: %08x\n", args->pad);
965 return -EINVAL;
966 }
967
Jordan Crousef97deca2017-10-20 11:06:57 -0600968 if (!gpu)
Rob Clarkca762a82016-03-15 17:22:13 -0400969 return 0;
970
Jordan Crousef97deca2017-10-20 11:06:57 -0600971 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
972 if (!queue)
973 return -ENOENT;
974
975 ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
976 true);
977
978 msm_submitqueue_put(queue);
979 return ret;
Rob Clark7198e6b2013-07-19 12:59:32 -0400980}
981
Rob Clark4cd33c42016-05-17 15:44:49 -0400982static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
983 struct drm_file *file)
984{
985 struct drm_msm_gem_madvise *args = data;
986 struct drm_gem_object *obj;
987 int ret;
988
989 switch (args->madv) {
990 case MSM_MADV_DONTNEED:
991 case MSM_MADV_WILLNEED:
992 break;
993 default:
994 return -EINVAL;
995 }
996
997 ret = mutex_lock_interruptible(&dev->struct_mutex);
998 if (ret)
999 return ret;
1000
1001 obj = drm_gem_object_lookup(file, args->handle);
1002 if (!obj) {
1003 ret = -ENOENT;
1004 goto unlock;
1005 }
1006
1007 ret = msm_gem_madvise(obj, args->madv);
1008 if (ret >= 0) {
1009 args->retained = ret;
1010 ret = 0;
1011 }
1012
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001013 drm_gem_object_put(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -04001014
1015unlock:
1016 mutex_unlock(&dev->struct_mutex);
1017 return ret;
1018}
1019
Jordan Crousef7de1542017-10-20 11:06:55 -06001020
1021static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
1022 struct drm_file *file)
1023{
1024 struct drm_msm_submitqueue *args = data;
1025
1026 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
1027 return -EINVAL;
1028
Jordan Crousef97deca2017-10-20 11:06:57 -06001029 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
Jordan Crousef7de1542017-10-20 11:06:55 -06001030 args->flags, &args->id);
1031}
1032
1033
1034static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
1035 struct drm_file *file)
1036{
1037 u32 id = *(u32 *) data;
1038
1039 return msm_submitqueue_remove(file->driver_priv, id);
1040}
1041
Rob Clark7198e6b2013-07-19 12:59:32 -04001042static const struct drm_ioctl_desc msm_ioctls[] = {
Daniel Vetterf8c47142015-09-08 13:56:30 +02001043 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
1044 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
1045 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
1046 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
1047 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
1048 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
1049 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
Rob Clark4cd33c42016-05-17 15:44:49 -04001050 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
Jordan Crousef7de1542017-10-20 11:06:55 -06001051 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
1052 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
Rob Clark7198e6b2013-07-19 12:59:32 -04001053};
1054
Rob Clarkc8afe682013-06-26 12:44:06 -04001055static const struct vm_operations_struct vm_ops = {
1056 .fault = msm_gem_fault,
1057 .open = drm_gem_vm_open,
1058 .close = drm_gem_vm_close,
1059};
1060
1061static const struct file_operations fops = {
1062 .owner = THIS_MODULE,
1063 .open = drm_open,
1064 .release = drm_release,
1065 .unlocked_ioctl = drm_ioctl,
Rob Clarkc8afe682013-06-26 12:44:06 -04001066 .compat_ioctl = drm_compat_ioctl,
Rob Clarkc8afe682013-06-26 12:44:06 -04001067 .poll = drm_poll,
1068 .read = drm_read,
1069 .llseek = no_llseek,
1070 .mmap = msm_gem_mmap,
1071};
1072
1073static struct drm_driver msm_driver = {
Rob Clark05b84912013-09-28 11:28:35 -04001074 .driver_features = DRIVER_HAVE_IRQ |
1075 DRIVER_GEM |
1076 DRIVER_PRIME |
Rob Clarkb4b15c82013-09-28 12:01:25 -04001077 DRIVER_RENDER |
Rob Clarka5436e12015-06-04 10:12:22 -04001078 DRIVER_ATOMIC |
Rob Clark05b84912013-09-28 11:28:35 -04001079 DRIVER_MODESET,
Rob Clark7198e6b2013-07-19 12:59:32 -04001080 .open = msm_open,
Daniel Vetter94df1452017-03-08 15:12:46 +01001081 .postclose = msm_postclose,
Noralf Trønnes4ccbc6e2017-12-05 19:24:59 +01001082 .lastclose = drm_fb_helper_lastclose,
Rob Clarkc8afe682013-06-26 12:44:06 -04001083 .irq_handler = msm_irq,
1084 .irq_preinstall = msm_irq_preinstall,
1085 .irq_postinstall = msm_irq_postinstall,
1086 .irq_uninstall = msm_irq_uninstall,
Rob Clarkc8afe682013-06-26 12:44:06 -04001087 .enable_vblank = msm_enable_vblank,
1088 .disable_vblank = msm_disable_vblank,
1089 .gem_free_object = msm_gem_free_object,
1090 .gem_vm_ops = &vm_ops,
1091 .dumb_create = msm_gem_dumb_create,
1092 .dumb_map_offset = msm_gem_dumb_map_offset,
Rob Clark05b84912013-09-28 11:28:35 -04001093 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1094 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1095 .gem_prime_export = drm_gem_prime_export,
1096 .gem_prime_import = drm_gem_prime_import,
Eric Anholt43523eb2017-04-12 12:11:58 -07001097 .gem_prime_res_obj = msm_gem_prime_res_obj,
Rob Clark05b84912013-09-28 11:28:35 -04001098 .gem_prime_pin = msm_gem_prime_pin,
1099 .gem_prime_unpin = msm_gem_prime_unpin,
1100 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1101 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1102 .gem_prime_vmap = msm_gem_prime_vmap,
1103 .gem_prime_vunmap = msm_gem_prime_vunmap,
Daniel Thompson77a147e2014-11-12 11:38:14 +00001104 .gem_prime_mmap = msm_gem_prime_mmap,
Rob Clarkc8afe682013-06-26 12:44:06 -04001105#ifdef CONFIG_DEBUG_FS
1106 .debugfs_init = msm_debugfs_init,
Rob Clarkc8afe682013-06-26 12:44:06 -04001107#endif
Rob Clark7198e6b2013-07-19 12:59:32 -04001108 .ioctls = msm_ioctls,
Jordan Crouse167b6062017-05-08 14:34:59 -06001109 .num_ioctls = ARRAY_SIZE(msm_ioctls),
Rob Clarkc8afe682013-06-26 12:44:06 -04001110 .fops = &fops,
1111 .name = "msm",
1112 .desc = "MSM Snapdragon DRM",
1113 .date = "20130625",
Rob Clarka8d854c2016-06-01 14:02:02 -04001114 .major = MSM_VERSION_MAJOR,
1115 .minor = MSM_VERSION_MINOR,
1116 .patchlevel = MSM_VERSION_PATCHLEVEL,
Rob Clarkc8afe682013-06-26 12:44:06 -04001117};
1118
1119#ifdef CONFIG_PM_SLEEP
1120static int msm_pm_suspend(struct device *dev)
1121{
1122 struct drm_device *ddev = dev_get_drvdata(dev);
Daniel Mackec446d02018-05-28 21:53:38 +02001123 struct msm_drm_private *priv = ddev->dev_private;
Jeykumar Sankaran036bfeb2018-06-27 15:24:17 -04001124
Bruce Wang3750e782018-10-05 17:04:01 -04001125 if (WARN_ON(priv->pm_state))
1126 drm_atomic_state_put(priv->pm_state);
Rob Clarkc8afe682013-06-26 12:44:06 -04001127
Daniel Mackec446d02018-05-28 21:53:38 +02001128 priv->pm_state = drm_atomic_helper_suspend(ddev);
1129 if (IS_ERR(priv->pm_state)) {
Bruce Wang3750e782018-10-05 17:04:01 -04001130 int ret = PTR_ERR(priv->pm_state);
1131 DRM_ERROR("Failed to suspend dpu, %d\n", ret);
1132 return ret;
Daniel Mackec446d02018-05-28 21:53:38 +02001133 }
1134
Rob Clarkc8afe682013-06-26 12:44:06 -04001135 return 0;
1136}
1137
1138static int msm_pm_resume(struct device *dev)
1139{
1140 struct drm_device *ddev = dev_get_drvdata(dev);
Daniel Mackec446d02018-05-28 21:53:38 +02001141 struct msm_drm_private *priv = ddev->dev_private;
Bruce Wang3750e782018-10-05 17:04:01 -04001142 int ret;
Jeykumar Sankaran036bfeb2018-06-27 15:24:17 -04001143
Bruce Wang3750e782018-10-05 17:04:01 -04001144 if (WARN_ON(!priv->pm_state))
1145 return -ENOENT;
Rob Clarkc8afe682013-06-26 12:44:06 -04001146
Bruce Wang3750e782018-10-05 17:04:01 -04001147 ret = drm_atomic_helper_resume(ddev, priv->pm_state);
1148 if (!ret)
1149 priv->pm_state = NULL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001150
Bruce Wang3750e782018-10-05 17:04:01 -04001151 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -04001152}
1153#endif
1154
Archit Taneja774e39e2017-07-28 16:17:07 +05301155#ifdef CONFIG_PM
1156static int msm_runtime_suspend(struct device *dev)
1157{
1158 struct drm_device *ddev = dev_get_drvdata(dev);
1159 struct msm_drm_private *priv = ddev->dev_private;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001160 struct msm_mdss *mdss = priv->mdss;
Archit Taneja774e39e2017-07-28 16:17:07 +05301161
1162 DBG("");
1163
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001164 if (mdss && mdss->funcs)
1165 return mdss->funcs->disable(mdss);
Archit Taneja774e39e2017-07-28 16:17:07 +05301166
1167 return 0;
1168}
1169
1170static int msm_runtime_resume(struct device *dev)
1171{
1172 struct drm_device *ddev = dev_get_drvdata(dev);
1173 struct msm_drm_private *priv = ddev->dev_private;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001174 struct msm_mdss *mdss = priv->mdss;
Archit Taneja774e39e2017-07-28 16:17:07 +05301175
1176 DBG("");
1177
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001178 if (mdss && mdss->funcs)
1179 return mdss->funcs->enable(mdss);
Archit Taneja774e39e2017-07-28 16:17:07 +05301180
1181 return 0;
1182}
1183#endif
1184
Rob Clarkc8afe682013-06-26 12:44:06 -04001185static const struct dev_pm_ops msm_pm_ops = {
1186 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
Archit Taneja774e39e2017-07-28 16:17:07 +05301187 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
Rob Clarkc8afe682013-06-26 12:44:06 -04001188};
1189
1190/*
Rob Clark060530f2014-03-03 14:19:12 -05001191 * Componentized driver support:
1192 */
1193
Archit Tanejae9fbdaf2015-11-18 12:15:14 +05301194/*
1195 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1196 * so probably some room for some helpers
Rob Clark060530f2014-03-03 14:19:12 -05001197 */
1198static int compare_of(struct device *dev, void *data)
1199{
1200 return dev->of_node == data;
1201}
Rob Clark41e69772013-12-15 16:23:05 -05001202
Archit Taneja812070e2016-05-19 10:38:39 +05301203/*
1204 * Identify what components need to be added by parsing what remote-endpoints
1205 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1206 * is no external component that we need to add since LVDS is within MDP4
1207 * itself.
1208 */
1209static int add_components_mdp(struct device *mdp_dev,
1210 struct component_match **matchptr)
1211{
1212 struct device_node *np = mdp_dev->of_node;
1213 struct device_node *ep_node;
Archit Taneja54011e22016-06-06 13:45:34 +05301214 struct device *master_dev;
1215
1216 /*
1217 * on MDP4 based platforms, the MDP platform device is the component
1218 * master that adds other display interface components to itself.
1219 *
1220 * on MDP5 based platforms, the MDSS platform device is the component
1221 * master that adds MDP5 and other display interface components to
1222 * itself.
1223 */
1224 if (of_device_is_compatible(np, "qcom,mdp4"))
1225 master_dev = mdp_dev;
1226 else
1227 master_dev = mdp_dev->parent;
Archit Taneja812070e2016-05-19 10:38:39 +05301228
1229 for_each_endpoint_of_node(np, ep_node) {
1230 struct device_node *intf;
1231 struct of_endpoint ep;
1232 int ret;
1233
1234 ret = of_graph_parse_endpoint(ep_node, &ep);
1235 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301236 DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
Archit Taneja812070e2016-05-19 10:38:39 +05301237 of_node_put(ep_node);
1238 return ret;
1239 }
1240
1241 /*
1242 * The LCDC/LVDS port on MDP4 is a speacial case where the
1243 * remote-endpoint isn't a component that we need to add
1244 */
1245 if (of_device_is_compatible(np, "qcom,mdp4") &&
Archit Tanejad8dd8052016-11-17 12:12:03 +05301246 ep.port == 0)
Archit Taneja812070e2016-05-19 10:38:39 +05301247 continue;
Archit Taneja812070e2016-05-19 10:38:39 +05301248
1249 /*
1250 * It's okay if some of the ports don't have a remote endpoint
1251 * specified. It just means that the port isn't connected to
1252 * any external interface.
1253 */
1254 intf = of_graph_get_remote_port_parent(ep_node);
Archit Tanejad8dd8052016-11-17 12:12:03 +05301255 if (!intf)
Archit Taneja812070e2016-05-19 10:38:39 +05301256 continue;
Archit Taneja812070e2016-05-19 10:38:39 +05301257
Russell King97ac0e42016-10-19 11:28:27 +01001258 drm_of_component_match_add(master_dev, matchptr, compare_of,
1259 intf);
Archit Taneja812070e2016-05-19 10:38:39 +05301260 of_node_put(intf);
Archit Taneja812070e2016-05-19 10:38:39 +05301261 }
1262
1263 return 0;
1264}
1265
Archit Taneja54011e22016-06-06 13:45:34 +05301266static int compare_name_mdp(struct device *dev, void *data)
1267{
1268 return (strstr(dev_name(dev), "mdp") != NULL);
1269}
1270
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301271static int add_display_components(struct device *dev,
1272 struct component_match **matchptr)
1273{
Archit Taneja54011e22016-06-06 13:45:34 +05301274 struct device *mdp_dev;
1275 int ret;
1276
1277 /*
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001278 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1279 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1280 * Populate the children devices, find the MDP5/DPU node, and then add
1281 * the interfaces to our components list.
Archit Taneja54011e22016-06-06 13:45:34 +05301282 */
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001283 if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1284 of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
Archit Taneja54011e22016-06-06 13:45:34 +05301285 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1286 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301287 DRM_DEV_ERROR(dev, "failed to populate children devices\n");
Archit Taneja54011e22016-06-06 13:45:34 +05301288 return ret;
1289 }
1290
1291 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1292 if (!mdp_dev) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301293 DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
Archit Taneja54011e22016-06-06 13:45:34 +05301294 of_platform_depopulate(dev);
1295 return -ENODEV;
1296 }
1297
1298 put_device(mdp_dev);
1299
1300 /* add the MDP component itself */
Russell King97ac0e42016-10-19 11:28:27 +01001301 drm_of_component_match_add(dev, matchptr, compare_of,
1302 mdp_dev->of_node);
Archit Taneja54011e22016-06-06 13:45:34 +05301303 } else {
1304 /* MDP4 */
1305 mdp_dev = dev;
1306 }
1307
1308 ret = add_components_mdp(mdp_dev, matchptr);
1309 if (ret)
1310 of_platform_depopulate(dev);
1311
1312 return ret;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301313}
1314
Archit Tanejadc3ea262016-05-19 13:33:52 +05301315/*
1316 * We don't know what's the best binding to link the gpu with the drm device.
1317 * Fow now, we just hunt for all the possible gpus that we support, and add them
1318 * as components.
1319 */
1320static const struct of_device_id msm_gpu_match[] = {
Rob Clark1db7afa2017-01-30 11:02:27 -05001321 { .compatible = "qcom,adreno" },
Archit Tanejadc3ea262016-05-19 13:33:52 +05301322 { .compatible = "qcom,adreno-3xx" },
1323 { .compatible = "qcom,kgsl-3d0" },
1324 { },
1325};
1326
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301327static int add_gpu_components(struct device *dev,
1328 struct component_match **matchptr)
1329{
Archit Tanejadc3ea262016-05-19 13:33:52 +05301330 struct device_node *np;
1331
1332 np = of_find_matching_node(NULL, msm_gpu_match);
1333 if (!np)
1334 return 0;
1335
Russell King97ac0e42016-10-19 11:28:27 +01001336 drm_of_component_match_add(dev, matchptr, compare_of, np);
Archit Tanejadc3ea262016-05-19 13:33:52 +05301337
1338 of_node_put(np);
1339
1340 return 0;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301341}
1342
Russell King84448282014-04-19 11:20:42 +01001343static int msm_drm_bind(struct device *dev)
1344{
Archit Taneja2b669872016-05-02 11:05:54 +05301345 return msm_drm_init(dev, &msm_driver);
Russell King84448282014-04-19 11:20:42 +01001346}
1347
1348static void msm_drm_unbind(struct device *dev)
1349{
Archit Taneja2b669872016-05-02 11:05:54 +05301350 msm_drm_uninit(dev);
Russell King84448282014-04-19 11:20:42 +01001351}
1352
1353static const struct component_master_ops msm_drm_ops = {
1354 .bind = msm_drm_bind,
1355 .unbind = msm_drm_unbind,
1356};
1357
1358/*
1359 * Platform driver:
1360 */
1361
1362static int msm_pdev_probe(struct platform_device *pdev)
1363{
1364 struct component_match *match = NULL;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301365 int ret;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +05301366
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301367 ret = add_display_components(&pdev->dev, &match);
1368 if (ret)
1369 return ret;
1370
1371 ret = add_gpu_components(&pdev->dev, &match);
1372 if (ret)
1373 return ret;
Rob Clark060530f2014-03-03 14:19:12 -05001374
Rob Clarkc83ea572016-11-07 13:31:30 -05001375 /* on all devices that I am aware of, iommu's which can map
1376 * any address the cpu can see are used:
1377 */
1378 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1379 if (ret)
1380 return ret;
1381
Russell King84448282014-04-19 11:20:42 +01001382 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
Rob Clarkc8afe682013-06-26 12:44:06 -04001383}
1384
1385static int msm_pdev_remove(struct platform_device *pdev)
1386{
Rob Clark060530f2014-03-03 14:19:12 -05001387 component_master_del(&pdev->dev, &msm_drm_ops);
Archit Taneja54011e22016-06-06 13:45:34 +05301388 of_platform_depopulate(&pdev->dev);
Rob Clarkc8afe682013-06-26 12:44:06 -04001389
1390 return 0;
1391}
1392
Rob Clark06c0dd92013-11-30 17:51:47 -05001393static const struct of_device_id dt_match[] = {
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -04001394 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1395 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001396 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
Rob Clark06c0dd92013-11-30 17:51:47 -05001397 {}
1398};
1399MODULE_DEVICE_TABLE(of, dt_match);
1400
Rob Clarkc8afe682013-06-26 12:44:06 -04001401static struct platform_driver msm_platform_driver = {
1402 .probe = msm_pdev_probe,
1403 .remove = msm_pdev_remove,
1404 .driver = {
Rob Clarkc8afe682013-06-26 12:44:06 -04001405 .name = "msm",
Rob Clark06c0dd92013-11-30 17:51:47 -05001406 .of_match_table = dt_match,
Rob Clarkc8afe682013-06-26 12:44:06 -04001407 .pm = &msm_pm_ops,
1408 },
Rob Clarkc8afe682013-06-26 12:44:06 -04001409};
1410
1411static int __init msm_drm_register(void)
1412{
Rob Clarkba4dd712017-07-06 16:33:44 -04001413 if (!modeset)
1414 return -EINVAL;
1415
Rob Clarkc8afe682013-06-26 12:44:06 -04001416 DBG("init");
Archit Taneja1dd0a0b2016-05-30 16:36:50 +05301417 msm_mdp_register();
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001418 msm_dpu_register();
Hai Lid5af49c2015-03-26 19:25:17 -04001419 msm_dsi_register();
Hai Li00453982014-12-12 14:41:17 -05001420 msm_edp_register();
Arnd Bergmannfcda50c2016-02-22 22:08:35 +01001421 msm_hdmi_register();
Rob Clarkbfd28b12014-09-05 13:06:37 -04001422 adreno_register();
Rob Clarkc8afe682013-06-26 12:44:06 -04001423 return platform_driver_register(&msm_platform_driver);
1424}
1425
1426static void __exit msm_drm_unregister(void)
1427{
1428 DBG("fini");
1429 platform_driver_unregister(&msm_platform_driver);
Arnd Bergmannfcda50c2016-02-22 22:08:35 +01001430 msm_hdmi_unregister();
Rob Clarkbfd28b12014-09-05 13:06:37 -04001431 adreno_unregister();
Hai Li00453982014-12-12 14:41:17 -05001432 msm_edp_unregister();
Hai Lid5af49c2015-03-26 19:25:17 -04001433 msm_dsi_unregister();
Archit Taneja1dd0a0b2016-05-30 16:36:50 +05301434 msm_mdp_unregister();
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001435 msm_dpu_unregister();
Rob Clarkc8afe682013-06-26 12:44:06 -04001436}
1437
1438module_init(msm_drm_register);
1439module_exit(msm_drm_unregister);
1440
1441MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1442MODULE_DESCRIPTION("MSM DRM Driver");
1443MODULE_LICENSE("GPL");