blob: 4ba3c67a6932ab14d515bd6223f21950e565ba0f [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -04003 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Rob Clarkc8afe682013-06-26 12:44:06 -04004 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04006 */
7
8#ifndef __MSM_DRV_H__
9#define __MSM_DRV_H__
10
11#include <linux/kernel.h>
12#include <linux/clk.h>
13#include <linux/cpufreq.h>
14#include <linux/module.h>
Rob Clark060530f2014-03-03 14:19:12 -050015#include <linux/component.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040016#include <linux/platform_device.h>
17#include <linux/pm.h>
18#include <linux/pm_runtime.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/iommu.h>
22#include <linux/types.h>
Archit Taneja3d6df062015-06-09 14:17:22 +053023#include <linux/of_graph.h>
Archit Tanejae9fbdaf2015-11-18 12:15:14 +053024#include <linux/of_device.h>
Masahiro Yamada87dfb312019-05-14 15:46:51 -070025#include <linux/sizes.h>
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -040026#include <linux/kthread.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040027
Rob Clarkc8afe682013-06-26 12:44:06 -040028#include <drm/drmP.h>
Rob Clarkcf3a7e42014-11-08 13:21:06 -050029#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
Rob Clarkcf3a7e42014-11-08 13:21:06 -050031#include <drm/drm_plane_helper.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010032#include <drm/drm_probe_helper.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040033#include <drm/drm_fb_helper.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040034#include <drm/msm_drm.h>
Daniel Vetterd9fc9412014-09-23 15:46:53 +020035#include <drm/drm_gem.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040036
37struct msm_kms;
Rob Clark7198e6b2013-07-19 12:59:32 -040038struct msm_gpu;
Rob Clark871d8122013-11-16 12:56:06 -050039struct msm_mmu;
Archit Taneja990a4002016-05-07 23:11:25 +053040struct msm_mdss;
Rob Clarka7d3c952014-05-30 14:47:38 -040041struct msm_rd_state;
Rob Clark70c70f02014-05-30 14:49:43 -040042struct msm_perf_state;
Rob Clarka7d3c952014-05-30 14:47:38 -040043struct msm_gem_submit;
Rob Clarkca762a82016-03-15 17:22:13 -040044struct msm_fence_context;
Rob Clark667ce332016-09-28 19:58:32 -040045struct msm_gem_address_space;
46struct msm_gem_vma;
Rob Clarkc8afe682013-06-26 12:44:06 -040047
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -040048#define MAX_CRTCS 8
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -040049#define MAX_PLANES 20
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -040050#define MAX_ENCODERS 8
51#define MAX_BRIDGES 8
52#define MAX_CONNECTORS 8
53
Sean Paul96fc56a2018-08-29 13:49:47 -040054#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
55
Rob Clark7198e6b2013-07-19 12:59:32 -040056struct msm_file_private {
Jordan Crousef7de1542017-10-20 11:06:55 -060057 rwlock_t queuelock;
58 struct list_head submitqueues;
59 int queueid;
Rob Clark7198e6b2013-07-19 12:59:32 -040060};
Rob Clarkc8afe682013-06-26 12:44:06 -040061
jilai wang12987782015-06-25 17:37:42 -040062enum msm_mdp_plane_property {
63 PLANE_PROP_ZPOS,
64 PLANE_PROP_ALPHA,
65 PLANE_PROP_PREMULTIPLIED,
66 PLANE_PROP_MAX_NUM
67};
68
Jordan Crouseb1fc2832017-10-20 11:07:01 -060069#define MSM_GPU_MAX_RINGS 4
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -040070#define MAX_H_TILES_PER_DISPLAY 2
71
72/**
73 * enum msm_display_caps - features/capabilities supported by displays
74 * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
75 * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
76 * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
77 * @MSM_DISPLAY_CAP_EDID: EDID supported
78 */
79enum msm_display_caps {
80 MSM_DISPLAY_CAP_VID_MODE = BIT(0),
81 MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
82 MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
83 MSM_DISPLAY_CAP_EDID = BIT(3),
84};
85
86/**
87 * enum msm_event_wait - type of HW events to wait for
88 * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
89 * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
90 * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
91 */
92enum msm_event_wait {
93 MSM_ENC_COMMIT_DONE = 0,
94 MSM_ENC_TX_COMPLETE,
95 MSM_ENC_VBLANK,
96};
97
98/**
99 * struct msm_display_topology - defines a display topology pipeline
100 * @num_lm: number of layer mixers used
101 * @num_enc: number of compression encoder blocks used
102 * @num_intf: number of interfaces the panel is mounted on
103 */
104struct msm_display_topology {
105 u32 num_lm;
106 u32 num_enc;
107 u32 num_intf;
108};
109
110/**
111 * struct msm_display_info - defines display properties
Jeykumar Sankaran9b9c8e72018-12-17 14:35:03 -0800112 * @intf_type: DRM_MODE_ENCODER_ type
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400113 * @capabilities: Bitmask of display flags
114 * @num_of_h_tiles: Number of horizontal tiles in case of split interface
115 * @h_tile_instance: Controller instance used per tile. Number of elements is
116 * based on num_of_h_tiles
117 * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
118 * used instead of panel TE in cmd mode panels
119 */
120struct msm_display_info {
121 int intf_type;
122 uint32_t capabilities;
123 uint32_t num_of_h_tiles;
124 uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
125 bool is_te_using_watchdog_timer;
126};
127
128/* Commit/Event thread specific structure */
129struct msm_drm_thread {
130 struct drm_device *dev;
131 struct task_struct *thread;
132 unsigned int crtc_id;
133 struct kthread_worker worker;
134};
Jordan Crousef97deca2017-10-20 11:06:57 -0600135
Rob Clarkc8afe682013-06-26 12:44:06 -0400136struct msm_drm_private {
137
Rob Clark68209392016-05-17 16:19:32 -0400138 struct drm_device *dev;
139
Rob Clarkc8afe682013-06-26 12:44:06 -0400140 struct msm_kms *kms;
141
Rob Clark060530f2014-03-03 14:19:12 -0500142 /* subordinate devices, if present: */
Rob Clark067fef32014-11-04 13:33:14 -0500143 struct platform_device *gpu_pdev;
144
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400145 /* top level MDSS wrapper device (for MDP5/DPU only) */
Archit Taneja990a4002016-05-07 23:11:25 +0530146 struct msm_mdss *mdss;
147
Rob Clark067fef32014-11-04 13:33:14 -0500148 /* possibly this should be in the kms component, but it is
149 * shared by both mdp4 and mdp5..
150 */
151 struct hdmi *hdmi;
Rob Clark060530f2014-03-03 14:19:12 -0500152
Hai Liab5b0102015-01-07 18:47:44 -0500153 /* eDP is for mdp5 only, but kms has not been created
154 * when edp_bind() and edp_init() are called. Here is the only
155 * place to keep the edp instance.
156 */
157 struct msm_edp *edp;
158
Hai Lia6895542015-03-31 14:36:33 -0400159 /* DSI is shared by mdp4 and mdp5 */
160 struct msm_dsi *dsi[2];
161
Rob Clark7198e6b2013-07-19 12:59:32 -0400162 /* when we have more than one 'msm_gpu' these need to be an array: */
163 struct msm_gpu *gpu;
164 struct msm_file_private *lastctx;
Jonathan Marekc2052a42018-11-14 17:08:04 -0500165 /* gpu is only set on open(), but we need this info earlier */
166 bool is_a2xx;
Rob Clark7198e6b2013-07-19 12:59:32 -0400167
Rob Clarkc8afe682013-06-26 12:44:06 -0400168 struct drm_fb_helper *fbdev;
169
Rob Clark2165e2b2017-09-15 09:04:52 -0400170 struct msm_rd_state *rd; /* debugfs to dump all submits */
171 struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
Rob Clark70c70f02014-05-30 14:49:43 -0400172 struct msm_perf_state *perf;
Rob Clarka7d3c952014-05-30 14:47:38 -0400173
Rob Clarkc8afe682013-06-26 12:44:06 -0400174 /* list of GEM objects: */
175 struct list_head inactive_list;
176
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700177 /* worker for delayed free of objects: */
178 struct work_struct free_work;
179 struct llist_head free_list;
180
Rob Clarkc8afe682013-06-26 12:44:06 -0400181 struct workqueue_struct *wq;
182
Rob Clarka8623912013-10-08 12:57:48 -0400183 unsigned int num_planes;
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -0400184 struct drm_plane *planes[MAX_PLANES];
Rob Clarka8623912013-10-08 12:57:48 -0400185
Rob Clarkc8afe682013-06-26 12:44:06 -0400186 unsigned int num_crtcs;
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -0400187 struct drm_crtc *crtcs[MAX_CRTCS];
Rob Clarkc8afe682013-06-26 12:44:06 -0400188
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400189 struct msm_drm_thread event_thread[MAX_CRTCS];
190
Rob Clarkc8afe682013-06-26 12:44:06 -0400191 unsigned int num_encoders;
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -0400192 struct drm_encoder *encoders[MAX_ENCODERS];
Rob Clarkc8afe682013-06-26 12:44:06 -0400193
Rob Clarka3376e32013-08-30 13:02:15 -0400194 unsigned int num_bridges;
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -0400195 struct drm_bridge *bridges[MAX_BRIDGES];
Rob Clarka3376e32013-08-30 13:02:15 -0400196
Rob Clarkc8afe682013-06-26 12:44:06 -0400197 unsigned int num_connectors;
Jeykumar Sankaran7305a0c2018-06-27 14:55:25 -0400198 struct drm_connector *connectors[MAX_CONNECTORS];
Rob Clark871d8122013-11-16 12:56:06 -0500199
jilai wang12987782015-06-25 17:37:42 -0400200 /* Properties */
201 struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
202
Rob Clark871d8122013-11-16 12:56:06 -0500203 /* VRAM carveout, used when no IOMMU: */
204 struct {
205 unsigned long size;
206 dma_addr_t paddr;
207 /* NOTE: mm managed at the page level, size is in # of pages
208 * and position mm_node->start is in # of pages:
209 */
210 struct drm_mm mm;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600211 spinlock_t lock; /* Protects drm_mm node allocation/removal */
Rob Clark871d8122013-11-16 12:56:06 -0500212 } vram;
Hai Li78b1d472015-07-27 13:49:45 -0400213
Rob Clarke1e9db22016-05-27 11:16:28 -0400214 struct notifier_block vmap_notifier;
Rob Clark68209392016-05-17 16:19:32 -0400215 struct shrinker shrinker;
216
Daniel Mackec446d02018-05-28 21:53:38 +0200217 struct drm_atomic_state *pm_state;
Rob Clarkc8afe682013-06-26 12:44:06 -0400218};
219
220struct msm_format {
221 uint32_t pixel_format;
222};
223
Sean Pauldb8f4d52018-04-03 10:42:23 -0400224int msm_atomic_prepare_fb(struct drm_plane *plane,
225 struct drm_plane_state *new_state);
Sean Pauld14659f2018-02-28 14:19:05 -0500226void msm_atomic_commit_tail(struct drm_atomic_state *state);
Rob Clark870d7382016-11-04 13:51:42 -0400227struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
228void msm_atomic_state_clear(struct drm_atomic_state *state);
229void msm_atomic_state_free(struct drm_atomic_state *state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500230
Jordan Crousec0ee9792018-11-07 15:35:48 -0700231int msm_gem_init_vma(struct msm_gem_address_space *aspace,
232 struct msm_gem_vma *vma, int npages);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700233void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
234 struct msm_gem_vma *vma);
Rob Clark667ce332016-09-28 19:58:32 -0400235void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
Jordan Crouse70dc51b2018-11-07 15:35:47 -0700236 struct msm_gem_vma *vma);
Rob Clark667ce332016-09-28 19:58:32 -0400237int msm_gem_map_vma(struct msm_gem_address_space *aspace,
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500238 struct msm_gem_vma *vma, int prot,
239 struct sg_table *sgt, int npages);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700240void msm_gem_close_vma(struct msm_gem_address_space *aspace,
241 struct msm_gem_vma *vma);
Rob Clark667ce332016-09-28 19:58:32 -0400242
Jordan Crouseee546cd2017-03-07 10:02:52 -0700243void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
244
Rob Clark667ce332016-09-28 19:58:32 -0400245struct msm_gem_address_space *
246msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
247 const char *name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400248
Jonathan Marekc2052a42018-11-14 17:08:04 -0500249struct msm_gem_address_space *
250msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
251 const char *name, uint64_t va_start, uint64_t va_end);
252
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400253int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
254void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
255
Jonathan Marekc2052a42018-11-14 17:08:04 -0500256bool msm_use_mmu(struct drm_device *dev);
257
Rob Clark40e68152016-05-03 09:50:26 -0400258void msm_gem_submit_free(struct msm_gem_submit *submit);
Rob Clark7198e6b2013-07-19 12:59:32 -0400259int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
260 struct drm_file *file);
261
Rob Clark68209392016-05-17 16:19:32 -0400262void msm_gem_shrinker_init(struct drm_device *dev);
263void msm_gem_shrinker_cleanup(struct drm_device *dev);
264
Daniel Thompson77a147e2014-11-12 11:38:14 +0000265int msm_gem_mmap_obj(struct drm_gem_object *obj,
266 struct vm_area_struct *vma);
Rob Clarkc8afe682013-06-26 12:44:06 -0400267int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530268vm_fault_t msm_gem_fault(struct vm_fault *vmf);
Rob Clarkc8afe682013-06-26 12:44:06 -0400269uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
Rob Clark8bdcd942017-06-13 11:07:08 -0400270int msm_gem_get_iova(struct drm_gem_object *obj,
271 struct msm_gem_address_space *aspace, uint64_t *iova);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700272int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
273 struct msm_gem_address_space *aspace, uint64_t *iova);
Rob Clark8bdcd942017-06-13 11:07:08 -0400274uint64_t msm_gem_iova(struct drm_gem_object *obj,
275 struct msm_gem_address_space *aspace);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700276void msm_gem_unpin_iova(struct drm_gem_object *obj,
277 struct msm_gem_address_space *aspace);
Rob Clark05b84912013-09-28 11:28:35 -0400278struct page **msm_gem_get_pages(struct drm_gem_object *obj);
279void msm_gem_put_pages(struct drm_gem_object *obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400280int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
281 struct drm_mode_create_dumb *args);
Rob Clarkc8afe682013-06-26 12:44:06 -0400282int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
283 uint32_t handle, uint64_t *offset);
Rob Clark05b84912013-09-28 11:28:35 -0400284struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
285void *msm_gem_prime_vmap(struct drm_gem_object *obj);
286void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
Daniel Thompson77a147e2014-11-12 11:38:14 +0000287int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
Rob Clark05b84912013-09-28 11:28:35 -0400288struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
Maarten Lankhorstb5e9c1a2014-01-09 11:03:14 +0100289 struct dma_buf_attachment *attach, struct sg_table *sg);
Rob Clark05b84912013-09-28 11:28:35 -0400290int msm_gem_prime_pin(struct drm_gem_object *obj);
291void msm_gem_prime_unpin(struct drm_gem_object *obj);
Rob Clark18f23042016-05-26 16:24:35 -0400292void *msm_gem_get_vaddr(struct drm_gem_object *obj);
Rob Clarkfad33f42017-09-15 08:38:20 -0400293void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
Rob Clark18f23042016-05-26 16:24:35 -0400294void msm_gem_put_vaddr(struct drm_gem_object *obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400295int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400296int msm_gem_sync_object(struct drm_gem_object *obj,
297 struct msm_fence_context *fctx, bool exclusive);
Rob Clark7198e6b2013-07-19 12:59:32 -0400298void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100299 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400300void msm_gem_move_to_inactive(struct drm_gem_object *obj);
Rob Clarkba00c3f2016-03-16 18:18:17 -0400301int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
Rob Clark7198e6b2013-07-19 12:59:32 -0400302int msm_gem_cpu_fini(struct drm_gem_object *obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400303void msm_gem_free_object(struct drm_gem_object *obj);
304int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -0700305 uint32_t size, uint32_t flags, uint32_t *handle, char *name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400306struct drm_gem_object *msm_gem_new(struct drm_device *dev,
307 uint32_t size, uint32_t flags);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600308struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
309 uint32_t size, uint32_t flags);
Jordan Crouse82232862017-07-27 10:42:40 -0600310void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
311 uint32_t flags, struct msm_gem_address_space *aspace,
312 struct drm_gem_object **bo, uint64_t *iova);
313void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
314 uint32_t flags, struct msm_gem_address_space *aspace,
315 struct drm_gem_object **bo, uint64_t *iova);
Jordan Crouse1e29dff2018-11-07 15:35:46 -0700316void msm_gem_kernel_put(struct drm_gem_object *bo,
317 struct msm_gem_address_space *aspace, bool locked);
Rob Clark05b84912013-09-28 11:28:35 -0400318struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400319 struct dma_buf *dmabuf, struct sg_table *sgt);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700320void msm_gem_free_work(struct work_struct *work);
Rob Clarkc8afe682013-06-26 12:44:06 -0400321
Joe Perches023014e2019-01-17 14:17:36 -0800322__printf(2, 3)
Jordan Crouse0815d772018-11-07 15:35:52 -0700323void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
324
Rob Clark8bdcd942017-06-13 11:07:08 -0400325int msm_framebuffer_prepare(struct drm_framebuffer *fb,
326 struct msm_gem_address_space *aspace);
327void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
328 struct msm_gem_address_space *aspace);
329uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
330 struct msm_gem_address_space *aspace, int plane);
Rob Clarkc8afe682013-06-26 12:44:06 -0400331struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
332const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
Rob Clarkc8afe682013-06-26 12:44:06 -0400333struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
Ville Syrjälä1eb83452015-11-11 19:11:29 +0200334 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
Rob Clark466e5602017-07-11 10:40:13 -0400335struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
336 int w, int h, int p, uint32_t format);
Rob Clarkc8afe682013-06-26 12:44:06 -0400337
338struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
Archit Taneja1aaa57f2016-02-25 11:19:45 +0530339void msm_fbdev_free(struct drm_device *dev);
Rob Clarkc8afe682013-06-26 12:44:06 -0400340
Rob Clarkdada25b2013-12-01 12:12:54 -0500341struct hdmi;
Arnd Bergmannfcda50c2016-02-22 22:08:35 +0100342int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
Rob Clark067fef32014-11-04 13:33:14 -0500343 struct drm_encoder *encoder);
Arnd Bergmannfcda50c2016-02-22 22:08:35 +0100344void __init msm_hdmi_register(void);
345void __exit msm_hdmi_unregister(void);
Rob Clarkc8afe682013-06-26 12:44:06 -0400346
Hai Li00453982014-12-12 14:41:17 -0500347struct msm_edp;
348void __init msm_edp_register(void);
349void __exit msm_edp_unregister(void);
350int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
351 struct drm_encoder *encoder);
352
Hai Lia6895542015-03-31 14:36:33 -0400353struct msm_dsi;
Hai Lia6895542015-03-31 14:36:33 -0400354#ifdef CONFIG_DRM_MSM_DSI
355void __init msm_dsi_register(void);
356void __exit msm_dsi_unregister(void);
357int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
Archit Taneja97e001192017-01-16 09:42:03 +0530358 struct drm_encoder *encoder);
Hai Lia6895542015-03-31 14:36:33 -0400359#else
360static inline void __init msm_dsi_register(void)
361{
362}
363static inline void __exit msm_dsi_unregister(void)
364{
365}
366static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
Archit Taneja97e001192017-01-16 09:42:03 +0530367 struct drm_device *dev,
368 struct drm_encoder *encoder)
Hai Lia6895542015-03-31 14:36:33 -0400369{
370 return -EINVAL;
371}
372#endif
373
Archit Taneja1dd0a0b2016-05-30 16:36:50 +0530374void __init msm_mdp_register(void);
375void __exit msm_mdp_unregister(void);
Jeykumar Sankaran25fdd592018-06-27 15:26:09 -0400376void __init msm_dpu_register(void);
377void __exit msm_dpu_unregister(void);
Archit Taneja1dd0a0b2016-05-30 16:36:50 +0530378
Rob Clarkc8afe682013-06-26 12:44:06 -0400379#ifdef CONFIG_DEBUG_FS
380void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
381void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
382void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
Rob Clarka7d3c952014-05-30 14:47:38 -0400383int msm_debugfs_late_init(struct drm_device *dev);
384int msm_rd_debugfs_init(struct drm_minor *minor);
Noralf Trønnes85eac472017-03-07 21:49:22 +0100385void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
Joe Perches023014e2019-01-17 14:17:36 -0800386__printf(3, 4)
Rob Clark998b9a52017-09-15 10:46:45 -0400387void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
388 const char *fmt, ...);
Rob Clark70c70f02014-05-30 14:49:43 -0400389int msm_perf_debugfs_init(struct drm_minor *minor);
Noralf Trønnes85eac472017-03-07 21:49:22 +0100390void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
Rob Clarka7d3c952014-05-30 14:47:38 -0400391#else
392static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
Joe Perches023014e2019-01-17 14:17:36 -0800393__printf(3, 4)
Arnd Bergmanne6756d72017-11-02 12:21:32 +0100394static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
395 const char *fmt, ...) {}
Arnd Bergmann3a270e42017-03-20 10:39:25 +0100396static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
397static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
Rob Clarkc8afe682013-06-26 12:44:06 -0400398#endif
399
Rob Clark720c3bb2017-01-30 11:30:58 -0500400struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600401int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
402
403struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
404 const char *name);
Rob Clarkc8afe682013-06-26 12:44:06 -0400405void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
406 const char *dbgname);
407void msm_writel(u32 data, void __iomem *addr);
408u32 msm_readl(const void __iomem *addr);
409
Jordan Crousef7de1542017-10-20 11:06:55 -0600410struct msm_gpu_submitqueue;
Jordan Crousef97deca2017-10-20 11:06:57 -0600411int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
Jordan Crousef7de1542017-10-20 11:06:55 -0600412struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
413 u32 id);
Jordan Crousef97deca2017-10-20 11:06:57 -0600414int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
415 u32 prio, u32 flags, u32 *id);
Jordan Crouseb0fb6602019-03-22 14:21:22 -0600416int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
417 struct drm_msm_submitqueue_query *args);
Jordan Crousef7de1542017-10-20 11:06:55 -0600418int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
419void msm_submitqueue_close(struct msm_file_private *ctx);
420
421void msm_submitqueue_destroy(struct kref *kref);
422
423
Rob Clark7ed216e2016-11-01 17:42:33 -0400424#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
425#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
Rob Clarkc8afe682013-06-26 12:44:06 -0400426
427static inline int align_pitch(int width, int bpp)
428{
429 int bytespp = (bpp + 7) / 8;
430 /* adreno needs pitch aligned to 32 pixels: */
431 return bytespp * ALIGN(width, 32);
432}
433
434/* for the generated headers: */
435#define INVALID_IDX(idx) ({BUG(); 0;})
Rob Clark7198e6b2013-07-19 12:59:32 -0400436#define fui(x) ({BUG(); 0;})
437#define util_float_to_half(x) ({BUG(); 0;})
438
Rob Clarkc8afe682013-06-26 12:44:06 -0400439
440#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
441
442/* for conditionally setting boolean flag(s): */
443#define COND(bool, val) ((bool) ? (val) : 0)
444
Rob Clark340ff412016-03-16 14:57:22 -0400445static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
446{
447 ktime_t now = ktime_get();
448 unsigned long remaining_jiffies;
449
450 if (ktime_compare(*timeout, now) < 0) {
451 remaining_jiffies = 0;
452 } else {
453 ktime_t rem = ktime_sub(*timeout, now);
454 struct timespec ts = ktime_to_timespec(rem);
455 remaining_jiffies = timespec_to_jiffies(&ts);
456 }
457
458 return remaining_jiffies;
459}
Rob Clarkc8afe682013-06-26 12:44:06 -0400460
461#endif /* __MSM_DRV_H__ */