blob: 8a589d505b6f5cfebe8942b03aaaac87b8edd7d3 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04003 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Rob Clarkc8afe682013-06-26 12:44:06 -04004 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04006 */
7
Sam Ravnborgfeea39a2019-08-04 08:55:51 +02008#include <linux/dma-mapping.h>
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04009#include <linux/kthread.h>
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020010#include <linux/uaccess.h>
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -040011#include <uapi/linux/sched/types.h>
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020012
13#include <drm/drm_drv.h>
14#include <drm/drm_file.h>
15#include <drm/drm_ioctl.h>
16#include <drm/drm_irq.h>
17#include <drm/drm_prime.h>
Russell King97ac0e42016-10-19 11:28:27 +010018#include <drm/drm_of.h>
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020019#include <drm/drm_vblank.h>
Russell King97ac0e42016-10-19 11:28:27 +010020
Rob Clarkc8afe682013-06-26 12:44:06 -040021#include "msm_drv.h"
Rob Clarkedcd60c2016-03-16 12:56:12 -040022#include "msm_debugfs.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040023#include "msm_fence.h"
Rob Clarkf05c83e2018-11-29 10:27:22 -050024#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040025#include "msm_gpu.h"
Rob Clarkdd2da6e2013-11-30 16:12:10 -050026#include "msm_kms.h"
Jonathan Marekc2052a42018-11-14 17:08:04 -050027#include "adreno/adreno_gpu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Rob Clarka8d854c2016-06-01 14:02:02 -040029/*
30 * MSM driver version:
31 * - 1.0.0 - initial interface
32 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
Rob Clark7a3bcc02016-09-16 18:37:44 -040033 * - 1.2.0 - adds explicit fence support for submit ioctl
Jordan Crousef7de1542017-10-20 11:06:55 -060034 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
35 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
36 * MSM_GEM_INFO ioctl.
Rob Clark1fed8df2018-11-29 10:30:04 -050037 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
38 * GEM object's debug name
Jordan Crouseb0fb6602019-03-22 14:21:22 -060039 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
Rob Clarka8d854c2016-06-01 14:02:02 -040040 */
41#define MSM_VERSION_MAJOR 1
Jordan Crouseb0fb6602019-03-22 14:21:22 -060042#define MSM_VERSION_MINOR 5
Rob Clarka8d854c2016-06-01 14:02:02 -040043#define MSM_VERSION_PATCHLEVEL 0
44
Rob Clarkc8afe682013-06-26 12:44:06 -040045static const struct drm_mode_config_funcs mode_config_funcs = {
46 .fb_create = msm_framebuffer_create,
Noralf Trønnes4ccbc6e2017-12-05 19:24:59 +010047 .output_poll_changed = drm_fb_helper_output_poll_changed,
Rob Clark1f920172017-10-25 12:30:51 -040048 .atomic_check = drm_atomic_helper_check,
Sean Pauld14659f2018-02-28 14:19:05 -050049 .atomic_commit = drm_atomic_helper_commit,
50};
51
52static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
53 .atomic_commit_tail = msm_atomic_commit_tail,
Rob Clarkc8afe682013-06-26 12:44:06 -040054};
55
Rob Clarkc8afe682013-06-26 12:44:06 -040056#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
57static bool reglog = false;
58MODULE_PARM_DESC(reglog, "Enable register read/write logging");
59module_param(reglog, bool, 0600);
60#else
61#define reglog 0
62#endif
63
Archit Tanejaa9ee34b2015-07-13 12:12:07 +053064#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarke90dfec2015-01-30 17:05:41 -050065static bool fbdev = true;
66MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
67module_param(fbdev, bool, 0600);
68#endif
69
Rob Clark3a10ba82014-09-08 14:24:57 -040070static char *vram = "16m";
Rob Clark4313c7442016-02-03 14:02:04 -050071MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
Rob Clark871d8122013-11-16 12:56:06 -050072module_param(vram, charp, 0);
73
Rob Clark06d9f562016-11-05 11:08:12 -040074bool dumpstate = false;
75MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
76module_param(dumpstate, bool, 0600);
77
Rob Clarkba4dd712017-07-06 16:33:44 -040078static bool modeset = true;
79MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
80module_param(modeset, bool, 0600);
81
Rob Clark060530f2014-03-03 14:19:12 -050082/*
83 * Util/helpers:
84 */
85
Jordan Crouse8e54eea2018-08-06 11:33:21 -060086int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
87{
88 struct property *prop;
89 const char *name;
90 struct clk_bulk_data *local;
91 int i = 0, ret, count;
92
93 count = of_property_count_strings(dev->of_node, "clock-names");
94 if (count < 1)
95 return 0;
96
97 local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
98 count, GFP_KERNEL);
99 if (!local)
100 return -ENOMEM;
101
102 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
103 local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
104 if (!local[i].id) {
105 devm_kfree(dev, local);
106 return -ENOMEM;
107 }
108
109 i++;
110 }
111
112 ret = devm_clk_bulk_get(dev, count, local);
113
114 if (ret) {
115 for (i = 0; i < count; i++)
116 devm_kfree(dev, (void *) local[i].id);
117 devm_kfree(dev, local);
118
119 return ret;
120 }
121
122 *bulk = local;
123 return count;
124}
125
126struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
127 const char *name)
128{
129 int i;
130 char n[32];
131
132 snprintf(n, sizeof(n), "%s_clk", name);
133
134 for (i = 0; bulk && i < count; i++) {
135 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
136 return bulk[i].clk;
137 }
138
139
140 return NULL;
141}
142
Rob Clark720c3bb2017-01-30 11:30:58 -0500143struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
144{
145 struct clk *clk;
146 char name2[32];
147
148 clk = devm_clk_get(&pdev->dev, name);
149 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
150 return clk;
151
152 snprintf(name2, sizeof(name2), "%s_clk", name);
153
154 clk = devm_clk_get(&pdev->dev, name2);
155 if (!IS_ERR(clk))
156 dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
157 "\"%s\" instead of \"%s\"\n", name, name2);
158
159 return clk;
160}
161
Rob Clarkc8afe682013-06-26 12:44:06 -0400162void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
163 const char *dbgname)
164{
165 struct resource *res;
166 unsigned long size;
167 void __iomem *ptr;
168
169 if (name)
170 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
171 else
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173
174 if (!res) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530175 DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400176 return ERR_PTR(-EINVAL);
177 }
178
179 size = resource_size(res);
180
181 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
182 if (!ptr) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530183 DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400184 return ERR_PTR(-ENOMEM);
185 }
186
187 if (reglog)
Thierry Redingfc99f972015-04-09 16:39:51 +0200188 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
Rob Clarkc8afe682013-06-26 12:44:06 -0400189
190 return ptr;
191}
192
193void msm_writel(u32 data, void __iomem *addr)
194{
195 if (reglog)
Thierry Redingfc99f972015-04-09 16:39:51 +0200196 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
Rob Clarkc8afe682013-06-26 12:44:06 -0400197 writel(data, addr);
198}
199
200u32 msm_readl(const void __iomem *addr)
201{
202 u32 val = readl(addr);
203 if (reglog)
Joe Perches8dfe1622017-02-28 04:55:54 -0800204 pr_err("IO:R %p %08x\n", addr, val);
Rob Clarkc8afe682013-06-26 12:44:06 -0400205 return val;
206}
207
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800208struct msm_vblank_work {
209 struct work_struct work;
Hai Li78b1d472015-07-27 13:49:45 -0400210 int crtc_id;
211 bool enable;
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800212 struct msm_drm_private *priv;
Hai Li78b1d472015-07-27 13:49:45 -0400213};
214
Jeykumar Sankaran5aeb6652018-12-14 15:57:52 -0800215static void vblank_ctrl_worker(struct work_struct *work)
Hai Li78b1d472015-07-27 13:49:45 -0400216{
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800217 struct msm_vblank_work *vbl_work = container_of(work,
218 struct msm_vblank_work, work);
219 struct msm_drm_private *priv = vbl_work->priv;
Hai Li78b1d472015-07-27 13:49:45 -0400220 struct msm_kms *kms = priv->kms;
Hai Li78b1d472015-07-27 13:49:45 -0400221
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800222 if (vbl_work->enable)
223 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
224 else
225 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
Hai Li78b1d472015-07-27 13:49:45 -0400226
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800227 kfree(vbl_work);
Hai Li78b1d472015-07-27 13:49:45 -0400228}
229
230static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
231 int crtc_id, bool enable)
232{
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800233 struct msm_vblank_work *vbl_work;
Hai Li78b1d472015-07-27 13:49:45 -0400234
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800235 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
236 if (!vbl_work)
Hai Li78b1d472015-07-27 13:49:45 -0400237 return -ENOMEM;
238
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800239 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
Hai Li78b1d472015-07-27 13:49:45 -0400240
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800241 vbl_work->crtc_id = crtc_id;
242 vbl_work->enable = enable;
243 vbl_work->priv = priv;
Hai Li78b1d472015-07-27 13:49:45 -0400244
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800245 queue_work(priv->wq, &vbl_work->work);
Hai Li78b1d472015-07-27 13:49:45 -0400246
247 return 0;
248}
249
Archit Taneja2b669872016-05-02 11:05:54 +0530250static int msm_drm_uninit(struct device *dev)
Rob Clarkc8afe682013-06-26 12:44:06 -0400251{
Archit Taneja2b669872016-05-02 11:05:54 +0530252 struct platform_device *pdev = to_platform_device(dev);
253 struct drm_device *ddev = platform_get_drvdata(pdev);
254 struct msm_drm_private *priv = ddev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400255 struct msm_kms *kms = priv->kms;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400256 struct msm_mdss *mdss = priv->mdss;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400257 int i;
Hai Li78b1d472015-07-27 13:49:45 -0400258
Sean Paul2aa31762019-05-24 16:29:13 -0400259 /*
260 * Shutdown the hw if we're far enough along where things might be on.
261 * If we run this too early, we'll end up panicking in any variety of
262 * places. Since we don't register the drm device until late in
263 * msm_drm_init, drm_dev->registered is used as an indicator that the
264 * shutdown will be successful.
265 */
266 if (ddev->registered) {
267 drm_dev_unregister(ddev);
268 drm_atomic_helper_shutdown(ddev);
269 }
270
Hai Li78b1d472015-07-27 13:49:45 -0400271 /* We must cancel and cleanup any pending vblank enable/disable
272 * work before drm_irq_uninstall() to avoid work re-enabling an
273 * irq after uninstall has disabled it.
274 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400275
Jeykumar Sankaran48d1d282018-12-14 15:57:55 -0800276 flush_workqueue(priv->wq);
Rob Clarkc8afe682013-06-26 12:44:06 -0400277
Jeykumar Sankarand9db30c2018-12-14 15:57:54 -0800278 /* clean up event worker threads */
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400279 for (i = 0; i < priv->num_crtcs; i++) {
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400280 if (priv->event_thread[i].thread) {
Jeykumar Sankaran3c125682018-12-14 15:57:51 -0800281 kthread_destroy_worker(&priv->event_thread[i].worker);
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400282 priv->event_thread[i].thread = NULL;
283 }
284 }
285
Rob Clark68209392016-05-17 16:19:32 -0400286 msm_gem_shrinker_cleanup(ddev);
287
Archit Taneja2b669872016-05-02 11:05:54 +0530288 drm_kms_helper_poll_fini(ddev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530289
Noralf Trønnes85eac472017-03-07 21:49:22 +0100290 msm_perf_debugfs_cleanup(priv);
291 msm_rd_debugfs_cleanup(priv);
292
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530293#ifdef CONFIG_DRM_FBDEV_EMULATION
294 if (fbdev && priv->fbdev)
Archit Taneja2b669872016-05-02 11:05:54 +0530295 msm_fbdev_free(ddev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530296#endif
Sean Paul2aa31762019-05-24 16:29:13 -0400297
Archit Taneja2b669872016-05-02 11:05:54 +0530298 drm_mode_config_cleanup(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400299
Archit Taneja2b669872016-05-02 11:05:54 +0530300 pm_runtime_get_sync(dev);
301 drm_irq_uninstall(ddev);
302 pm_runtime_put_sync(dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400303
Archit Taneja16976082016-11-03 17:36:18 +0530304 if (kms && kms->funcs)
Rob Clarkc8afe682013-06-26 12:44:06 -0400305 kms->funcs->destroy(kms);
Rob Clarkc8afe682013-06-26 12:44:06 -0400306
Rob Clark871d8122013-11-16 12:56:06 -0500307 if (priv->vram.paddr) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700308 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
Rob Clark871d8122013-11-16 12:56:06 -0500309 drm_mm_takedown(&priv->vram.mm);
Archit Taneja2b669872016-05-02 11:05:54 +0530310 dma_free_attrs(dev, priv->vram.size, NULL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700311 priv->vram.paddr, attrs);
Rob Clark871d8122013-11-16 12:56:06 -0500312 }
313
Archit Taneja2b669872016-05-02 11:05:54 +0530314 component_unbind_all(dev, ddev);
Rob Clark060530f2014-03-03 14:19:12 -0500315
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400316 if (mdss && mdss->funcs)
317 mdss->funcs->destroy(ddev);
Archit Taneja0a6030d2016-05-08 21:36:28 +0530318
Archit Taneja2b669872016-05-02 11:05:54 +0530319 ddev->dev_private = NULL;
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200320 drm_dev_put(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400321
Sean Paul2aa31762019-05-24 16:29:13 -0400322 destroy_workqueue(priv->wq);
Rob Clarkc8afe682013-06-26 12:44:06 -0400323 kfree(priv);
324
325 return 0;
326}
327
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400328#define KMS_MDP4 4
329#define KMS_MDP5 5
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400330#define KMS_DPU 3
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400331
Rob Clark06c0dd92013-11-30 17:51:47 -0500332static int get_mdp_ver(struct platform_device *pdev)
333{
Rob Clark06c0dd92013-11-30 17:51:47 -0500334 struct device *dev = &pdev->dev;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530335
336 return (int) (unsigned long) of_device_get_match_data(dev);
Rob Clark06c0dd92013-11-30 17:51:47 -0500337}
338
Rob Clark072f1f92015-03-03 15:04:25 -0500339#include <linux/of_address.h>
340
Jonathan Marekc2052a42018-11-14 17:08:04 -0500341bool msm_use_mmu(struct drm_device *dev)
342{
343 struct msm_drm_private *priv = dev->dev_private;
344
345 /* a2xx comes with its own MMU */
346 return priv->is_a2xx || iommu_present(&platform_bus_type);
347}
348
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500349static int msm_init_vram(struct drm_device *dev)
Rob Clarkc8afe682013-06-26 12:44:06 -0400350{
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500351 struct msm_drm_private *priv = dev->dev_private;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530352 struct device_node *node;
Rob Clark072f1f92015-03-03 15:04:25 -0500353 unsigned long size = 0;
354 int ret = 0;
355
Rob Clark072f1f92015-03-03 15:04:25 -0500356 /* In the device-tree world, we could have a 'memory-region'
357 * phandle, which gives us a link to our "vram". Allocating
358 * is all nicely abstracted behind the dma api, but we need
359 * to know the entire size to allocate it all in one go. There
360 * are two cases:
361 * 1) device with no IOMMU, in which case we need exclusive
362 * access to a VRAM carveout big enough for all gpu
363 * buffers
364 * 2) device with IOMMU, but where the bootloader puts up
365 * a splash screen. In this case, the VRAM carveout
366 * need only be large enough for fbdev fb. But we need
367 * exclusive access to the buffer to avoid the kernel
368 * using those pages for other purposes (which appears
369 * as corruption on screen before we have a chance to
370 * load and do initial modeset)
371 */
Rob Clark072f1f92015-03-03 15:04:25 -0500372
373 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
374 if (node) {
375 struct resource r;
376 ret = of_address_to_resource(node, 0, &r);
Peter Chen2ca41c172016-07-04 16:49:50 +0800377 of_node_put(node);
Rob Clark072f1f92015-03-03 15:04:25 -0500378 if (ret)
379 return ret;
380 size = r.end - r.start;
Thierry Redingfc99f972015-04-09 16:39:51 +0200381 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
Rob Clarkc8afe682013-06-26 12:44:06 -0400382
Archit Tanejae9fbdaf2015-11-18 12:15:14 +0530383 /* if we have no IOMMU, then we need to use carveout allocator.
384 * Grab the entire CMA chunk carved out in early startup in
385 * mach-msm:
386 */
Jonathan Marekc2052a42018-11-14 17:08:04 -0500387 } else if (!msm_use_mmu(dev)) {
Rob Clark072f1f92015-03-03 15:04:25 -0500388 DRM_INFO("using %s VRAM carveout\n", vram);
389 size = memparse(vram, NULL);
390 }
391
392 if (size) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700393 unsigned long attrs = 0;
Rob Clark871d8122013-11-16 12:56:06 -0500394 void *p;
395
Rob Clark871d8122013-11-16 12:56:06 -0500396 priv->vram.size = size;
397
398 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600399 spin_lock_init(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -0500400
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700401 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
402 attrs |= DMA_ATTR_WRITE_COMBINE;
Rob Clark871d8122013-11-16 12:56:06 -0500403
404 /* note that for no-kernel-mapping, the vaddr returned
405 * is bogus, but non-null if allocation succeeded:
406 */
407 p = dma_alloc_attrs(dev->dev, size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700408 &priv->vram.paddr, GFP_KERNEL, attrs);
Rob Clark871d8122013-11-16 12:56:06 -0500409 if (!p) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530410 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
Rob Clark871d8122013-11-16 12:56:06 -0500411 priv->vram.paddr = 0;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500412 return -ENOMEM;
Rob Clark871d8122013-11-16 12:56:06 -0500413 }
414
Mamta Shukla6a41da12018-10-20 23:19:26 +0530415 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
Rob Clark871d8122013-11-16 12:56:06 -0500416 (uint32_t)priv->vram.paddr,
417 (uint32_t)(priv->vram.paddr + size));
418 }
419
Rob Clark072f1f92015-03-03 15:04:25 -0500420 return ret;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500421}
422
Archit Taneja2b669872016-05-02 11:05:54 +0530423static int msm_drm_init(struct device *dev, struct drm_driver *drv)
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500424{
Archit Taneja2b669872016-05-02 11:05:54 +0530425 struct platform_device *pdev = to_platform_device(dev);
426 struct drm_device *ddev;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500427 struct msm_drm_private *priv;
428 struct msm_kms *kms;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400429 struct msm_mdss *mdss;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400430 int ret, i;
431 struct sched_param param;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500432
Archit Taneja2b669872016-05-02 11:05:54 +0530433 ddev = drm_dev_alloc(drv, dev);
Tom Gundersen0f288602016-09-21 16:59:19 +0200434 if (IS_ERR(ddev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530435 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
Tom Gundersen0f288602016-09-21 16:59:19 +0200436 return PTR_ERR(ddev);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500437 }
438
Archit Taneja2b669872016-05-02 11:05:54 +0530439 platform_set_drvdata(pdev, ddev);
Archit Taneja2b669872016-05-02 11:05:54 +0530440
441 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
442 if (!priv) {
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400443 ret = -ENOMEM;
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200444 goto err_put_drm_dev;
Archit Taneja2b669872016-05-02 11:05:54 +0530445 }
446
447 ddev->dev_private = priv;
Rob Clark68209392016-05-17 16:19:32 -0400448 priv->dev = ddev;
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500449
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400450 switch (get_mdp_ver(pdev)) {
451 case KMS_MDP5:
452 ret = mdp5_mdss_init(ddev);
453 break;
454 case KMS_DPU:
455 ret = dpu_mdss_init(ddev);
456 break;
457 default:
458 ret = 0;
459 break;
460 }
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400461 if (ret)
462 goto err_free_priv;
Archit Taneja0a6030d2016-05-08 21:36:28 +0530463
Rajesh Yadavbc3220b2018-06-21 16:06:10 -0400464 mdss = priv->mdss;
465
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500466 priv->wq = alloc_ordered_workqueue("msm", 0);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500467
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700468 INIT_WORK(&priv->free_work, msm_gem_free_work);
469 init_llist_head(&priv->free_list);
470
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500471 INIT_LIST_HEAD(&priv->inactive_list);
Rob Clark5bf9c0b2015-03-03 15:04:24 -0500472
Archit Taneja2b669872016-05-02 11:05:54 +0530473 drm_mode_config_init(ddev);
Rob Clark060530f2014-03-03 14:19:12 -0500474
475 /* Bind all our sub-components: */
Archit Taneja2b669872016-05-02 11:05:54 +0530476 ret = component_bind_all(dev, ddev);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400477 if (ret)
478 goto err_destroy_mdss;
Rob Clark060530f2014-03-03 14:19:12 -0500479
Archit Taneja2b669872016-05-02 11:05:54 +0530480 ret = msm_init_vram(ddev);
Rob Clark13f15562015-05-07 15:20:13 -0400481 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400482 goto err_msm_uninit;
Rob Clark13f15562015-05-07 15:20:13 -0400483
Rob Clark68209392016-05-17 16:19:32 -0400484 msm_gem_shrinker_init(ddev);
485
Rob Clark06c0dd92013-11-30 17:51:47 -0500486 switch (get_mdp_ver(pdev)) {
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400487 case KMS_MDP4:
Archit Taneja2b669872016-05-02 11:05:54 +0530488 kms = mdp4_kms_init(ddev);
Archit Taneja0a6030d2016-05-08 21:36:28 +0530489 priv->kms = kms;
Rob Clark06c0dd92013-11-30 17:51:47 -0500490 break;
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -0400491 case KMS_MDP5:
Archit Taneja392ae6e2016-06-14 18:24:54 +0530492 kms = mdp5_kms_init(ddev);
Rob Clark06c0dd92013-11-30 17:51:47 -0500493 break;
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400494 case KMS_DPU:
495 kms = dpu_kms_init(ddev);
496 priv->kms = kms;
497 break;
Rob Clark06c0dd92013-11-30 17:51:47 -0500498 default:
Jonathan Mareke6f6d632018-12-04 10:16:58 -0500499 /* valid only for the dummy headless case, where of_node=NULL */
500 WARN_ON(dev->of_node);
501 kms = NULL;
Rob Clark06c0dd92013-11-30 17:51:47 -0500502 break;
503 }
504
Rob Clarkc8afe682013-06-26 12:44:06 -0400505 if (IS_ERR(kms)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530506 DRM_DEV_ERROR(dev, "failed to load kms\n");
Thomas Meyere4826a92013-09-16 23:19:54 +0200507 ret = PTR_ERR(kms);
Jonathan Marekb2ccfdf2018-11-21 20:52:35 -0500508 priv->kms = NULL;
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400509 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400510 }
511
Jeykumar Sankaranbb676df2018-06-11 14:13:20 -0700512 /* Enable normalization of plane zpos */
513 ddev->mode_config.normalize_zpos = true;
514
Rob Clarkc8afe682013-06-26 12:44:06 -0400515 if (kms) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400516 ret = kms->funcs->hw_init(kms);
517 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530518 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400519 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400520 }
521 }
522
Archit Taneja2b669872016-05-02 11:05:54 +0530523 ddev->mode_config.funcs = &mode_config_funcs;
Sean Pauld14659f2018-02-28 14:19:05 -0500524 ddev->mode_config.helper_private = &mode_config_helper_funcs;
Rob Clarkc8afe682013-06-26 12:44:06 -0400525
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400526 /**
527 * this priority was found during empiric testing to have appropriate
528 * realtime scheduling to process display updates and interact with
529 * other real time and normal priority task
530 */
531 param.sched_priority = 16;
532 for (i = 0; i < priv->num_crtcs; i++) {
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400533 /* initialize event thread */
534 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
535 kthread_init_worker(&priv->event_thread[i].worker);
536 priv->event_thread[i].dev = ddev;
537 priv->event_thread[i].thread =
538 kthread_run(kthread_worker_fn,
539 &priv->event_thread[i].worker,
540 "crtc_event:%d", priv->event_thread[i].crtc_id);
Jeykumar Sankaran7f9743a2018-10-10 14:11:16 -0700541 if (IS_ERR(priv->event_thread[i].thread)) {
Linus Torvalds4971f092018-12-25 11:48:26 -0800542 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
Jeykumar Sankaran7f9743a2018-10-10 14:11:16 -0700543 priv->event_thread[i].thread = NULL;
544 goto err_msm_uninit;
545 }
546
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400547 ret = sched_setscheduler(priv->event_thread[i].thread,
Jeykumar Sankaran7f9743a2018-10-10 14:11:16 -0700548 SCHED_FIFO, &param);
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400549 if (ret)
Jeykumar Sankaran7f9743a2018-10-10 14:11:16 -0700550 dev_warn(dev, "event_thread set priority failed:%d\n",
551 ret);
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400552 }
553
Archit Taneja2b669872016-05-02 11:05:54 +0530554 ret = drm_vblank_init(ddev, priv->num_crtcs);
Rob Clarkc8afe682013-06-26 12:44:06 -0400555 if (ret < 0) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530556 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400557 goto err_msm_uninit;
Rob Clarkc8afe682013-06-26 12:44:06 -0400558 }
559
Archit Tanejaa2b3a552016-05-18 15:06:03 +0530560 if (kms) {
561 pm_runtime_get_sync(dev);
562 ret = drm_irq_install(ddev, kms->irq);
563 pm_runtime_put_sync(dev);
564 if (ret < 0) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530565 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400566 goto err_msm_uninit;
Archit Tanejaa2b3a552016-05-18 15:06:03 +0530567 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400568 }
569
Archit Taneja2b669872016-05-02 11:05:54 +0530570 ret = drm_dev_register(ddev, 0);
Rob Clarka7d3c952014-05-30 14:47:38 -0400571 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400572 goto err_msm_uninit;
Rob Clarka7d3c952014-05-30 14:47:38 -0400573
Archit Taneja2b669872016-05-02 11:05:54 +0530574 drm_mode_config_reset(ddev);
575
576#ifdef CONFIG_DRM_FBDEV_EMULATION
Jonathan Mareke6f6d632018-12-04 10:16:58 -0500577 if (kms && fbdev)
Archit Taneja2b669872016-05-02 11:05:54 +0530578 priv->fbdev = msm_fbdev_init(ddev);
579#endif
580
581 ret = msm_debugfs_late_init(ddev);
582 if (ret)
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400583 goto err_msm_uninit;
Archit Taneja2b669872016-05-02 11:05:54 +0530584
585 drm_kms_helper_poll_init(ddev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400586
587 return 0;
588
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400589err_msm_uninit:
Archit Taneja2b669872016-05-02 11:05:54 +0530590 msm_drm_uninit(dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400591 return ret;
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400592err_destroy_mdss:
593 if (mdss && mdss->funcs)
594 mdss->funcs->destroy(ddev);
595err_free_priv:
596 kfree(priv);
Thomas Zimmermann4d8dc2d2018-09-26 13:48:59 +0200597err_put_drm_dev:
598 drm_dev_put(ddev);
Jeykumar Sankaran77050c32018-06-27 14:35:28 -0400599 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400600}
601
Archit Taneja2b669872016-05-02 11:05:54 +0530602/*
603 * DRM operations:
604 */
605
Rob Clark7198e6b2013-07-19 12:59:32 -0400606static void load_gpu(struct drm_device *dev)
607{
Rob Clarka1ad3522014-07-11 11:59:22 -0400608 static DEFINE_MUTEX(init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400609 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400610
Rob Clarka1ad3522014-07-11 11:59:22 -0400611 mutex_lock(&init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400612
Rob Clarke2550b72014-09-05 13:30:27 -0400613 if (!priv->gpu)
614 priv->gpu = adreno_load_gpu(dev);
Rob Clarka1ad3522014-07-11 11:59:22 -0400615
Rob Clarka1ad3522014-07-11 11:59:22 -0400616 mutex_unlock(&init_lock);
Rob Clark7198e6b2013-07-19 12:59:32 -0400617}
618
Jordan Crousef97deca2017-10-20 11:06:57 -0600619static int context_init(struct drm_device *dev, struct drm_file *file)
Rob Clark7198e6b2013-07-19 12:59:32 -0400620{
Jordan Crouse295b22a2019-05-07 12:02:07 -0600621 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400622 struct msm_file_private *ctx;
623
Rob Clark7198e6b2013-07-19 12:59:32 -0400624 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
625 if (!ctx)
626 return -ENOMEM;
627
Jordan Crousef97deca2017-10-20 11:06:57 -0600628 msm_submitqueue_init(dev, ctx);
Jordan Crousef7de1542017-10-20 11:06:55 -0600629
Brian Masney7af5cdb2019-06-26 22:05:15 -0400630 ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400631 file->driver_priv = ctx;
632
633 return 0;
634}
635
Jordan Crousef7de1542017-10-20 11:06:55 -0600636static int msm_open(struct drm_device *dev, struct drm_file *file)
637{
638 /* For now, load gpu on open.. to avoid the requirement of having
639 * firmware in the initrd.
640 */
641 load_gpu(dev);
642
Jordan Crousef97deca2017-10-20 11:06:57 -0600643 return context_init(dev, file);
Jordan Crousef7de1542017-10-20 11:06:55 -0600644}
645
646static void context_close(struct msm_file_private *ctx)
647{
648 msm_submitqueue_close(ctx);
649 kfree(ctx);
650}
651
Daniel Vetter94df1452017-03-08 15:12:46 +0100652static void msm_postclose(struct drm_device *dev, struct drm_file *file)
Rob Clarkc8afe682013-06-26 12:44:06 -0400653{
654 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400655 struct msm_file_private *ctx = file->driver_priv;
Rob Clark7198e6b2013-07-19 12:59:32 -0400656
Rob Clark7198e6b2013-07-19 12:59:32 -0400657 mutex_lock(&dev->struct_mutex);
658 if (ctx == priv->lastctx)
659 priv->lastctx = NULL;
660 mutex_unlock(&dev->struct_mutex);
661
Jordan Crousef7de1542017-10-20 11:06:55 -0600662 context_close(ctx);
Rob Clarkc8afe682013-06-26 12:44:06 -0400663}
664
Daniel Vettere9f0d762013-12-11 11:34:42 +0100665static irqreturn_t msm_irq(int irq, void *arg)
Rob Clarkc8afe682013-06-26 12:44:06 -0400666{
667 struct drm_device *dev = arg;
668 struct msm_drm_private *priv = dev->dev_private;
669 struct msm_kms *kms = priv->kms;
670 BUG_ON(!kms);
671 return kms->funcs->irq(kms);
672}
673
674static void msm_irq_preinstall(struct drm_device *dev)
675{
676 struct msm_drm_private *priv = dev->dev_private;
677 struct msm_kms *kms = priv->kms;
678 BUG_ON(!kms);
679 kms->funcs->irq_preinstall(kms);
680}
681
682static int msm_irq_postinstall(struct drm_device *dev)
683{
684 struct msm_drm_private *priv = dev->dev_private;
685 struct msm_kms *kms = priv->kms;
686 BUG_ON(!kms);
Jordan Crouseab07e0c2018-12-03 15:47:19 -0700687
688 if (kms->funcs->irq_postinstall)
689 return kms->funcs->irq_postinstall(kms);
690
691 return 0;
Rob Clarkc8afe682013-06-26 12:44:06 -0400692}
693
694static void msm_irq_uninstall(struct drm_device *dev)
695{
696 struct msm_drm_private *priv = dev->dev_private;
697 struct msm_kms *kms = priv->kms;
698 BUG_ON(!kms);
699 kms->funcs->irq_uninstall(kms);
700}
701
Thierry Reding88e72712015-09-24 18:35:31 +0200702static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
Rob Clarkc8afe682013-06-26 12:44:06 -0400703{
704 struct msm_drm_private *priv = dev->dev_private;
705 struct msm_kms *kms = priv->kms;
706 if (!kms)
707 return -ENXIO;
Thierry Reding88e72712015-09-24 18:35:31 +0200708 DBG("dev=%p, crtc=%u", dev, pipe);
709 return vblank_ctrl_queue_work(priv, pipe, true);
Rob Clarkc8afe682013-06-26 12:44:06 -0400710}
711
Thierry Reding88e72712015-09-24 18:35:31 +0200712static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
Rob Clarkc8afe682013-06-26 12:44:06 -0400713{
714 struct msm_drm_private *priv = dev->dev_private;
715 struct msm_kms *kms = priv->kms;
716 if (!kms)
717 return;
Thierry Reding88e72712015-09-24 18:35:31 +0200718 DBG("dev=%p, crtc=%u", dev, pipe);
719 vblank_ctrl_queue_work(priv, pipe, false);
Rob Clarkc8afe682013-06-26 12:44:06 -0400720}
721
722/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400723 * DRM ioctls:
724 */
725
726static int msm_ioctl_get_param(struct drm_device *dev, void *data,
727 struct drm_file *file)
728{
729 struct msm_drm_private *priv = dev->dev_private;
730 struct drm_msm_param *args = data;
731 struct msm_gpu *gpu;
732
733 /* for now, we just have 3d pipe.. eventually this would need to
734 * be more clever to dispatch to appropriate gpu module:
735 */
736 if (args->pipe != MSM_PIPE_3D0)
737 return -EINVAL;
738
739 gpu = priv->gpu;
740
741 if (!gpu)
742 return -ENXIO;
743
744 return gpu->funcs->get_param(gpu, args->param, &args->value);
745}
746
747static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
748 struct drm_file *file)
749{
750 struct drm_msm_gem_new *args = data;
Rob Clark93ddb0d2014-03-03 09:42:33 -0500751
752 if (args->flags & ~MSM_BO_FLAGS) {
753 DRM_ERROR("invalid flags: %08x\n", args->flags);
754 return -EINVAL;
755 }
756
Rob Clark7198e6b2013-07-19 12:59:32 -0400757 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700758 args->flags, &args->handle, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400759}
760
Rob Clark56c2da82015-05-11 11:50:03 -0400761static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
762{
763 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
764}
Rob Clark7198e6b2013-07-19 12:59:32 -0400765
766static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
767 struct drm_file *file)
768{
769 struct drm_msm_gem_cpu_prep *args = data;
770 struct drm_gem_object *obj;
Rob Clark56c2da82015-05-11 11:50:03 -0400771 ktime_t timeout = to_ktime(args->timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400772 int ret;
773
Rob Clark93ddb0d2014-03-03 09:42:33 -0500774 if (args->op & ~MSM_PREP_FLAGS) {
775 DRM_ERROR("invalid op: %08x\n", args->op);
776 return -EINVAL;
777 }
778
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100779 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400780 if (!obj)
781 return -ENOENT;
782
Rob Clark56c2da82015-05-11 11:50:03 -0400783 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400784
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100785 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400786
787 return ret;
788}
789
790static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
791 struct drm_file *file)
792{
793 struct drm_msm_gem_cpu_fini *args = data;
794 struct drm_gem_object *obj;
795 int ret;
796
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100797 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400798 if (!obj)
799 return -ENOENT;
800
801 ret = msm_gem_cpu_fini(obj);
802
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100803 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400804
805 return ret;
806}
807
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600808static int msm_ioctl_gem_info_iova(struct drm_device *dev,
809 struct drm_gem_object *obj, uint64_t *iova)
810{
811 struct msm_drm_private *priv = dev->dev_private;
812
813 if (!priv->gpu)
814 return -EINVAL;
815
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700816 /*
817 * Don't pin the memory here - just get an address so that userspace can
818 * be productive
819 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400820 return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600821}
822
Rob Clark7198e6b2013-07-19 12:59:32 -0400823static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
824 struct drm_file *file)
825{
826 struct drm_msm_gem_info *args = data;
827 struct drm_gem_object *obj;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500828 struct msm_gem_object *msm_obj;
829 int i, ret = 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400830
Rob Clark789d2e52018-11-29 09:54:42 -0500831 if (args->pad)
Rob Clark7198e6b2013-07-19 12:59:32 -0400832 return -EINVAL;
833
Rob Clark789d2e52018-11-29 09:54:42 -0500834 switch (args->info) {
835 case MSM_INFO_GET_OFFSET:
836 case MSM_INFO_GET_IOVA:
837 /* value returned as immediate, not pointer, so len==0: */
838 if (args->len)
839 return -EINVAL;
840 break;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500841 case MSM_INFO_SET_NAME:
842 case MSM_INFO_GET_NAME:
843 break;
Rob Clark789d2e52018-11-29 09:54:42 -0500844 default:
845 return -EINVAL;
846 }
847
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100848 obj = drm_gem_object_lookup(file, args->handle);
Rob Clark7198e6b2013-07-19 12:59:32 -0400849 if (!obj)
850 return -ENOENT;
851
Rob Clarkf05c83e2018-11-29 10:27:22 -0500852 msm_obj = to_msm_bo(obj);
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600853
Rob Clark789d2e52018-11-29 09:54:42 -0500854 switch (args->info) {
855 case MSM_INFO_GET_OFFSET:
856 args->value = msm_gem_mmap_offset(obj);
857 break;
858 case MSM_INFO_GET_IOVA:
859 ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
860 break;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500861 case MSM_INFO_SET_NAME:
862 /* length check should leave room for terminating null: */
863 if (args->len >= sizeof(msm_obj->name)) {
864 ret = -EINVAL;
865 break;
866 }
Dan Carpenter7cce8e42019-02-14 10:19:27 +0300867 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
Jordan Crouse860433e2019-02-19 11:40:19 -0700868 args->len)) {
869 msm_obj->name[0] = '\0';
Dan Carpenter7cce8e42019-02-14 10:19:27 +0300870 ret = -EFAULT;
Jordan Crouse860433e2019-02-19 11:40:19 -0700871 break;
872 }
Rob Clarkf05c83e2018-11-29 10:27:22 -0500873 msm_obj->name[args->len] = '\0';
874 for (i = 0; i < args->len; i++) {
875 if (!isprint(msm_obj->name[i])) {
876 msm_obj->name[i] = '\0';
877 break;
878 }
879 }
880 break;
881 case MSM_INFO_GET_NAME:
882 if (args->value && (args->len < strlen(msm_obj->name))) {
883 ret = -EINVAL;
884 break;
885 }
886 args->len = strlen(msm_obj->name);
887 if (args->value) {
Dan Carpenter7cce8e42019-02-14 10:19:27 +0300888 if (copy_to_user(u64_to_user_ptr(args->value),
889 msm_obj->name, args->len))
890 ret = -EFAULT;
Rob Clarkf05c83e2018-11-29 10:27:22 -0500891 }
892 break;
Jordan Crouse49fd08b2017-05-08 14:35:01 -0600893 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400894
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100895 drm_gem_object_put_unlocked(obj);
Rob Clark7198e6b2013-07-19 12:59:32 -0400896
897 return ret;
898}
899
900static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
901 struct drm_file *file)
902{
Rob Clarkca762a82016-03-15 17:22:13 -0400903 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400904 struct drm_msm_wait_fence *args = data;
Rob Clark56c2da82015-05-11 11:50:03 -0400905 ktime_t timeout = to_ktime(args->timeout);
Jordan Crousef97deca2017-10-20 11:06:57 -0600906 struct msm_gpu_submitqueue *queue;
907 struct msm_gpu *gpu = priv->gpu;
908 int ret;
Rob Clark93ddb0d2014-03-03 09:42:33 -0500909
910 if (args->pad) {
911 DRM_ERROR("invalid pad: %08x\n", args->pad);
912 return -EINVAL;
913 }
914
Jordan Crousef97deca2017-10-20 11:06:57 -0600915 if (!gpu)
Rob Clarkca762a82016-03-15 17:22:13 -0400916 return 0;
917
Jordan Crousef97deca2017-10-20 11:06:57 -0600918 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
919 if (!queue)
920 return -ENOENT;
921
922 ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
923 true);
924
925 msm_submitqueue_put(queue);
926 return ret;
Rob Clark7198e6b2013-07-19 12:59:32 -0400927}
928
Rob Clark4cd33c42016-05-17 15:44:49 -0400929static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
930 struct drm_file *file)
931{
932 struct drm_msm_gem_madvise *args = data;
933 struct drm_gem_object *obj;
934 int ret;
935
936 switch (args->madv) {
937 case MSM_MADV_DONTNEED:
938 case MSM_MADV_WILLNEED:
939 break;
940 default:
941 return -EINVAL;
942 }
943
944 ret = mutex_lock_interruptible(&dev->struct_mutex);
945 if (ret)
946 return ret;
947
948 obj = drm_gem_object_lookup(file, args->handle);
949 if (!obj) {
950 ret = -ENOENT;
951 goto unlock;
952 }
953
954 ret = msm_gem_madvise(obj, args->madv);
955 if (ret >= 0) {
956 args->retained = ret;
957 ret = 0;
958 }
959
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100960 drm_gem_object_put(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400961
962unlock:
963 mutex_unlock(&dev->struct_mutex);
964 return ret;
965}
966
Jordan Crousef7de1542017-10-20 11:06:55 -0600967
968static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
969 struct drm_file *file)
970{
971 struct drm_msm_submitqueue *args = data;
972
973 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
974 return -EINVAL;
975
Jordan Crousef97deca2017-10-20 11:06:57 -0600976 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
Jordan Crousef7de1542017-10-20 11:06:55 -0600977 args->flags, &args->id);
978}
979
Jordan Crouseb0fb6602019-03-22 14:21:22 -0600980static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
981 struct drm_file *file)
982{
983 return msm_submitqueue_query(dev, file->driver_priv, data);
984}
Jordan Crousef7de1542017-10-20 11:06:55 -0600985
986static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
987 struct drm_file *file)
988{
989 u32 id = *(u32 *) data;
990
991 return msm_submitqueue_remove(file->driver_priv, id);
992}
993
Rob Clark7198e6b2013-07-19 12:59:32 -0400994static const struct drm_ioctl_desc msm_ioctls[] = {
Emil Velikov34127c72019-05-27 09:17:35 +0100995 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
996 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
997 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
998 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
999 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1000 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
1001 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
1002 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
1003 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
1004 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1005 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
Rob Clark7198e6b2013-07-19 12:59:32 -04001006};
1007
Rob Clarkc8afe682013-06-26 12:44:06 -04001008static const struct vm_operations_struct vm_ops = {
1009 .fault = msm_gem_fault,
1010 .open = drm_gem_vm_open,
1011 .close = drm_gem_vm_close,
1012};
1013
1014static const struct file_operations fops = {
1015 .owner = THIS_MODULE,
1016 .open = drm_open,
1017 .release = drm_release,
1018 .unlocked_ioctl = drm_ioctl,
Rob Clarkc8afe682013-06-26 12:44:06 -04001019 .compat_ioctl = drm_compat_ioctl,
Rob Clarkc8afe682013-06-26 12:44:06 -04001020 .poll = drm_poll,
1021 .read = drm_read,
1022 .llseek = no_llseek,
1023 .mmap = msm_gem_mmap,
1024};
1025
1026static struct drm_driver msm_driver = {
Daniel Vetter5b38e742019-01-29 11:42:46 +01001027 .driver_features = DRIVER_GEM |
Rob Clarkb4b15c82013-09-28 12:01:25 -04001028 DRIVER_RENDER |
Rob Clarka5436e12015-06-04 10:12:22 -04001029 DRIVER_ATOMIC |
Rob Clark05b84912013-09-28 11:28:35 -04001030 DRIVER_MODESET,
Rob Clark7198e6b2013-07-19 12:59:32 -04001031 .open = msm_open,
Daniel Vetter94df1452017-03-08 15:12:46 +01001032 .postclose = msm_postclose,
Noralf Trønnes4ccbc6e2017-12-05 19:24:59 +01001033 .lastclose = drm_fb_helper_lastclose,
Rob Clarkc8afe682013-06-26 12:44:06 -04001034 .irq_handler = msm_irq,
1035 .irq_preinstall = msm_irq_preinstall,
1036 .irq_postinstall = msm_irq_postinstall,
1037 .irq_uninstall = msm_irq_uninstall,
Rob Clarkc8afe682013-06-26 12:44:06 -04001038 .enable_vblank = msm_enable_vblank,
1039 .disable_vblank = msm_disable_vblank,
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -07001040 .gem_free_object_unlocked = msm_gem_free_object,
Rob Clarkc8afe682013-06-26 12:44:06 -04001041 .gem_vm_ops = &vm_ops,
1042 .dumb_create = msm_gem_dumb_create,
1043 .dumb_map_offset = msm_gem_dumb_map_offset,
Rob Clark05b84912013-09-28 11:28:35 -04001044 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1045 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
Rob Clark05b84912013-09-28 11:28:35 -04001046 .gem_prime_pin = msm_gem_prime_pin,
1047 .gem_prime_unpin = msm_gem_prime_unpin,
1048 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1049 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1050 .gem_prime_vmap = msm_gem_prime_vmap,
1051 .gem_prime_vunmap = msm_gem_prime_vunmap,
Daniel Thompson77a147e2014-11-12 11:38:14 +00001052 .gem_prime_mmap = msm_gem_prime_mmap,
Rob Clarkc8afe682013-06-26 12:44:06 -04001053#ifdef CONFIG_DEBUG_FS
1054 .debugfs_init = msm_debugfs_init,
Rob Clarkc8afe682013-06-26 12:44:06 -04001055#endif
Rob Clark7198e6b2013-07-19 12:59:32 -04001056 .ioctls = msm_ioctls,
Jordan Crouse167b6062017-05-08 14:34:59 -06001057 .num_ioctls = ARRAY_SIZE(msm_ioctls),
Rob Clarkc8afe682013-06-26 12:44:06 -04001058 .fops = &fops,
1059 .name = "msm",
1060 .desc = "MSM Snapdragon DRM",
1061 .date = "20130625",
Rob Clarka8d854c2016-06-01 14:02:02 -04001062 .major = MSM_VERSION_MAJOR,
1063 .minor = MSM_VERSION_MINOR,
1064 .patchlevel = MSM_VERSION_PATCHLEVEL,
Rob Clarkc8afe682013-06-26 12:44:06 -04001065};
1066
1067#ifdef CONFIG_PM_SLEEP
1068static int msm_pm_suspend(struct device *dev)
1069{
1070 struct drm_device *ddev = dev_get_drvdata(dev);
Daniel Mackec446d02018-05-28 21:53:38 +02001071 struct msm_drm_private *priv = ddev->dev_private;
Jeykumar Sankaran036bfeb2018-06-27 15:24:17 -04001072
Bruce Wang3750e782018-10-05 17:04:01 -04001073 if (WARN_ON(priv->pm_state))
1074 drm_atomic_state_put(priv->pm_state);
Rob Clarkc8afe682013-06-26 12:44:06 -04001075
Daniel Mackec446d02018-05-28 21:53:38 +02001076 priv->pm_state = drm_atomic_helper_suspend(ddev);
1077 if (IS_ERR(priv->pm_state)) {
Bruce Wang3750e782018-10-05 17:04:01 -04001078 int ret = PTR_ERR(priv->pm_state);
1079 DRM_ERROR("Failed to suspend dpu, %d\n", ret);
1080 return ret;
Daniel Mackec446d02018-05-28 21:53:38 +02001081 }
1082
Rob Clarkc8afe682013-06-26 12:44:06 -04001083 return 0;
1084}
1085
1086static int msm_pm_resume(struct device *dev)
1087{
1088 struct drm_device *ddev = dev_get_drvdata(dev);
Daniel Mackec446d02018-05-28 21:53:38 +02001089 struct msm_drm_private *priv = ddev->dev_private;
Bruce Wang3750e782018-10-05 17:04:01 -04001090 int ret;
Jeykumar Sankaran036bfeb2018-06-27 15:24:17 -04001091
Bruce Wang3750e782018-10-05 17:04:01 -04001092 if (WARN_ON(!priv->pm_state))
1093 return -ENOENT;
Rob Clarkc8afe682013-06-26 12:44:06 -04001094
Bruce Wang3750e782018-10-05 17:04:01 -04001095 ret = drm_atomic_helper_resume(ddev, priv->pm_state);
1096 if (!ret)
1097 priv->pm_state = NULL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001098
Bruce Wang3750e782018-10-05 17:04:01 -04001099 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -04001100}
1101#endif
1102
Archit Taneja774e39e2017-07-28 16:17:07 +05301103#ifdef CONFIG_PM
1104static int msm_runtime_suspend(struct device *dev)
1105{
1106 struct drm_device *ddev = dev_get_drvdata(dev);
1107 struct msm_drm_private *priv = ddev->dev_private;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001108 struct msm_mdss *mdss = priv->mdss;
Archit Taneja774e39e2017-07-28 16:17:07 +05301109
1110 DBG("");
1111
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001112 if (mdss && mdss->funcs)
1113 return mdss->funcs->disable(mdss);
Archit Taneja774e39e2017-07-28 16:17:07 +05301114
1115 return 0;
1116}
1117
1118static int msm_runtime_resume(struct device *dev)
1119{
1120 struct drm_device *ddev = dev_get_drvdata(dev);
1121 struct msm_drm_private *priv = ddev->dev_private;
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001122 struct msm_mdss *mdss = priv->mdss;
Archit Taneja774e39e2017-07-28 16:17:07 +05301123
1124 DBG("");
1125
Rajesh Yadavbc3220b2018-06-21 16:06:10 -04001126 if (mdss && mdss->funcs)
1127 return mdss->funcs->enable(mdss);
Archit Taneja774e39e2017-07-28 16:17:07 +05301128
1129 return 0;
1130}
1131#endif
1132
Rob Clarkc8afe682013-06-26 12:44:06 -04001133static const struct dev_pm_ops msm_pm_ops = {
1134 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
Archit Taneja774e39e2017-07-28 16:17:07 +05301135 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
Rob Clarkc8afe682013-06-26 12:44:06 -04001136};
1137
1138/*
Rob Clark060530f2014-03-03 14:19:12 -05001139 * Componentized driver support:
1140 */
1141
Archit Tanejae9fbdaf2015-11-18 12:15:14 +05301142/*
1143 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1144 * so probably some room for some helpers
Rob Clark060530f2014-03-03 14:19:12 -05001145 */
1146static int compare_of(struct device *dev, void *data)
1147{
1148 return dev->of_node == data;
1149}
Rob Clark41e69772013-12-15 16:23:05 -05001150
Archit Taneja812070e2016-05-19 10:38:39 +05301151/*
1152 * Identify what components need to be added by parsing what remote-endpoints
1153 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1154 * is no external component that we need to add since LVDS is within MDP4
1155 * itself.
1156 */
1157static int add_components_mdp(struct device *mdp_dev,
1158 struct component_match **matchptr)
1159{
1160 struct device_node *np = mdp_dev->of_node;
1161 struct device_node *ep_node;
Archit Taneja54011e22016-06-06 13:45:34 +05301162 struct device *master_dev;
1163
1164 /*
1165 * on MDP4 based platforms, the MDP platform device is the component
1166 * master that adds other display interface components to itself.
1167 *
1168 * on MDP5 based platforms, the MDSS platform device is the component
1169 * master that adds MDP5 and other display interface components to
1170 * itself.
1171 */
1172 if (of_device_is_compatible(np, "qcom,mdp4"))
1173 master_dev = mdp_dev;
1174 else
1175 master_dev = mdp_dev->parent;
Archit Taneja812070e2016-05-19 10:38:39 +05301176
1177 for_each_endpoint_of_node(np, ep_node) {
1178 struct device_node *intf;
1179 struct of_endpoint ep;
1180 int ret;
1181
1182 ret = of_graph_parse_endpoint(ep_node, &ep);
1183 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301184 DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
Archit Taneja812070e2016-05-19 10:38:39 +05301185 of_node_put(ep_node);
1186 return ret;
1187 }
1188
1189 /*
1190 * The LCDC/LVDS port on MDP4 is a speacial case where the
1191 * remote-endpoint isn't a component that we need to add
1192 */
1193 if (of_device_is_compatible(np, "qcom,mdp4") &&
Archit Tanejad8dd8052016-11-17 12:12:03 +05301194 ep.port == 0)
Archit Taneja812070e2016-05-19 10:38:39 +05301195 continue;
Archit Taneja812070e2016-05-19 10:38:39 +05301196
1197 /*
1198 * It's okay if some of the ports don't have a remote endpoint
1199 * specified. It just means that the port isn't connected to
1200 * any external interface.
1201 */
1202 intf = of_graph_get_remote_port_parent(ep_node);
Archit Tanejad8dd8052016-11-17 12:12:03 +05301203 if (!intf)
Archit Taneja812070e2016-05-19 10:38:39 +05301204 continue;
Archit Taneja812070e2016-05-19 10:38:39 +05301205
Douglas Andersond1d9d0e2018-12-04 10:04:41 -08001206 if (of_device_is_available(intf))
1207 drm_of_component_match_add(master_dev, matchptr,
1208 compare_of, intf);
1209
Archit Taneja812070e2016-05-19 10:38:39 +05301210 of_node_put(intf);
Archit Taneja812070e2016-05-19 10:38:39 +05301211 }
1212
1213 return 0;
1214}
1215
Archit Taneja54011e22016-06-06 13:45:34 +05301216static int compare_name_mdp(struct device *dev, void *data)
1217{
1218 return (strstr(dev_name(dev), "mdp") != NULL);
1219}
1220
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301221static int add_display_components(struct device *dev,
1222 struct component_match **matchptr)
1223{
Archit Taneja54011e22016-06-06 13:45:34 +05301224 struct device *mdp_dev;
1225 int ret;
1226
1227 /*
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001228 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1229 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1230 * Populate the children devices, find the MDP5/DPU node, and then add
1231 * the interfaces to our components list.
Archit Taneja54011e22016-06-06 13:45:34 +05301232 */
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001233 if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1234 of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
Archit Taneja54011e22016-06-06 13:45:34 +05301235 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1236 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301237 DRM_DEV_ERROR(dev, "failed to populate children devices\n");
Archit Taneja54011e22016-06-06 13:45:34 +05301238 return ret;
1239 }
1240
1241 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1242 if (!mdp_dev) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301243 DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
Archit Taneja54011e22016-06-06 13:45:34 +05301244 of_platform_depopulate(dev);
1245 return -ENODEV;
1246 }
1247
1248 put_device(mdp_dev);
1249
1250 /* add the MDP component itself */
Russell King97ac0e42016-10-19 11:28:27 +01001251 drm_of_component_match_add(dev, matchptr, compare_of,
1252 mdp_dev->of_node);
Archit Taneja54011e22016-06-06 13:45:34 +05301253 } else {
1254 /* MDP4 */
1255 mdp_dev = dev;
1256 }
1257
1258 ret = add_components_mdp(mdp_dev, matchptr);
1259 if (ret)
1260 of_platform_depopulate(dev);
1261
1262 return ret;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301263}
1264
Archit Tanejadc3ea262016-05-19 13:33:52 +05301265/*
1266 * We don't know what's the best binding to link the gpu with the drm device.
1267 * Fow now, we just hunt for all the possible gpus that we support, and add them
1268 * as components.
1269 */
1270static const struct of_device_id msm_gpu_match[] = {
Rob Clark1db7afa2017-01-30 11:02:27 -05001271 { .compatible = "qcom,adreno" },
Archit Tanejadc3ea262016-05-19 13:33:52 +05301272 { .compatible = "qcom,adreno-3xx" },
Jonathan Mareke6f6d632018-12-04 10:16:58 -05001273 { .compatible = "amd,imageon" },
Archit Tanejadc3ea262016-05-19 13:33:52 +05301274 { .compatible = "qcom,kgsl-3d0" },
1275 { },
1276};
1277
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301278static int add_gpu_components(struct device *dev,
1279 struct component_match **matchptr)
1280{
Archit Tanejadc3ea262016-05-19 13:33:52 +05301281 struct device_node *np;
1282
1283 np = of_find_matching_node(NULL, msm_gpu_match);
1284 if (!np)
1285 return 0;
1286
Jeffrey Hugo9ca7ad62019-06-26 11:00:15 -07001287 if (of_device_is_available(np))
1288 drm_of_component_match_add(dev, matchptr, compare_of, np);
Archit Tanejadc3ea262016-05-19 13:33:52 +05301289
1290 of_node_put(np);
1291
1292 return 0;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301293}
1294
Russell King84448282014-04-19 11:20:42 +01001295static int msm_drm_bind(struct device *dev)
1296{
Archit Taneja2b669872016-05-02 11:05:54 +05301297 return msm_drm_init(dev, &msm_driver);
Russell King84448282014-04-19 11:20:42 +01001298}
1299
1300static void msm_drm_unbind(struct device *dev)
1301{
Archit Taneja2b669872016-05-02 11:05:54 +05301302 msm_drm_uninit(dev);
Russell King84448282014-04-19 11:20:42 +01001303}
1304
1305static const struct component_master_ops msm_drm_ops = {
1306 .bind = msm_drm_bind,
1307 .unbind = msm_drm_unbind,
1308};
1309
1310/*
1311 * Platform driver:
1312 */
1313
1314static int msm_pdev_probe(struct platform_device *pdev)
1315{
1316 struct component_match *match = NULL;
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301317 int ret;
Archit Tanejae9fbdaf2015-11-18 12:15:14 +05301318
Jonathan Mareke6f6d632018-12-04 10:16:58 -05001319 if (get_mdp_ver(pdev)) {
1320 ret = add_display_components(&pdev->dev, &match);
1321 if (ret)
1322 return ret;
1323 }
Archit Taneja7d526fcf2016-05-19 10:33:57 +05301324
1325 ret = add_gpu_components(&pdev->dev, &match);
1326 if (ret)
Sean Paul4368a152019-06-17 16:12:51 -04001327 goto fail;
Rob Clark060530f2014-03-03 14:19:12 -05001328
Rob Clarkc83ea572016-11-07 13:31:30 -05001329 /* on all devices that I am aware of, iommu's which can map
1330 * any address the cpu can see are used:
1331 */
1332 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1333 if (ret)
Sean Paul4368a152019-06-17 16:12:51 -04001334 goto fail;
Rob Clarkc83ea572016-11-07 13:31:30 -05001335
Sean Paul4368a152019-06-17 16:12:51 -04001336 ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1337 if (ret)
1338 goto fail;
1339
1340 return 0;
1341
1342fail:
1343 of_platform_depopulate(&pdev->dev);
1344 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -04001345}
1346
1347static int msm_pdev_remove(struct platform_device *pdev)
1348{
Rob Clark060530f2014-03-03 14:19:12 -05001349 component_master_del(&pdev->dev, &msm_drm_ops);
Archit Taneja54011e22016-06-06 13:45:34 +05301350 of_platform_depopulate(&pdev->dev);
Rob Clarkc8afe682013-06-26 12:44:06 -04001351
1352 return 0;
1353}
1354
Rob Clark06c0dd92013-11-30 17:51:47 -05001355static const struct of_device_id dt_match[] = {
Jeykumar Sankaranaaded2e2018-06-27 14:26:24 -04001356 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1357 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001358 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
Rob Clark06c0dd92013-11-30 17:51:47 -05001359 {}
1360};
1361MODULE_DEVICE_TABLE(of, dt_match);
1362
Rob Clarkc8afe682013-06-26 12:44:06 -04001363static struct platform_driver msm_platform_driver = {
1364 .probe = msm_pdev_probe,
1365 .remove = msm_pdev_remove,
1366 .driver = {
Rob Clarkc8afe682013-06-26 12:44:06 -04001367 .name = "msm",
Rob Clark06c0dd92013-11-30 17:51:47 -05001368 .of_match_table = dt_match,
Rob Clarkc8afe682013-06-26 12:44:06 -04001369 .pm = &msm_pm_ops,
1370 },
Rob Clarkc8afe682013-06-26 12:44:06 -04001371};
1372
1373static int __init msm_drm_register(void)
1374{
Rob Clarkba4dd712017-07-06 16:33:44 -04001375 if (!modeset)
1376 return -EINVAL;
1377
Rob Clarkc8afe682013-06-26 12:44:06 -04001378 DBG("init");
Archit Taneja1dd0a0b2016-05-30 16:36:50 +05301379 msm_mdp_register();
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001380 msm_dpu_register();
Hai Lid5af49c2015-03-26 19:25:17 -04001381 msm_dsi_register();
Hai Li00453982014-12-12 14:41:17 -05001382 msm_edp_register();
Arnd Bergmannfcda50c2016-02-22 22:08:35 +01001383 msm_hdmi_register();
Rob Clarkbfd28b12014-09-05 13:06:37 -04001384 adreno_register();
Rob Clarkc8afe682013-06-26 12:44:06 -04001385 return platform_driver_register(&msm_platform_driver);
1386}
1387
1388static void __exit msm_drm_unregister(void)
1389{
1390 DBG("fini");
1391 platform_driver_unregister(&msm_platform_driver);
Arnd Bergmannfcda50c2016-02-22 22:08:35 +01001392 msm_hdmi_unregister();
Rob Clarkbfd28b12014-09-05 13:06:37 -04001393 adreno_unregister();
Hai Li00453982014-12-12 14:41:17 -05001394 msm_edp_unregister();
Hai Lid5af49c2015-03-26 19:25:17 -04001395 msm_dsi_unregister();
Archit Taneja1dd0a0b2016-05-30 16:36:50 +05301396 msm_mdp_unregister();
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04001397 msm_dpu_unregister();
Rob Clarkc8afe682013-06-26 12:44:06 -04001398}
1399
1400module_init(msm_drm_register);
1401module_exit(msm_drm_unregister);
1402
1403MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1404MODULE_DESCRIPTION("MSM DRM Driver");
1405MODULE_LICENSE("GPL");