Thomas Gleixner | 74ba920 | 2019-05-20 09:19:02 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 2 | /* |
axel lin | 0b7f1cc | 2011-01-14 09:39:11 +0000 | [diff] [blame] | 3 | * pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 4 | * |
| 5 | * This driver needs a DirectFB counterpart in user space, communication |
| 6 | * is handled via mmap()ed memory areas and an ioctl. |
| 7 | * |
| 8 | * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> |
| 9 | * Copyright (c) 2009 Janine Kropp <nin@directfb.org> |
| 10 | * Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org> |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | /* |
| 14 | * WARNING: This controller is attached to System Bus 2 of the PXA which |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 15 | * needs its arbiter to be enabled explicitly (CKENB & 1<<9). |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 16 | * There is currently no way to do this from Linux, so you need to teach |
| 17 | * your bootloader for now. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/module.h> |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/dma-mapping.h> |
| 23 | #include <linux/miscdevice.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/uaccess.h> |
| 27 | #include <linux/ioctl.h> |
| 28 | #include <linux/delay.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/clk.h> |
| 32 | #include <linux/fs.h> |
| 33 | #include <linux/io.h> |
Daniel Mack | aa45ee8e | 2018-07-24 19:11:25 +0200 | [diff] [blame] | 34 | #include <linux/of.h> |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 35 | |
| 36 | #include "pxa3xx-gcu.h" |
| 37 | |
| 38 | #define DRV_NAME "pxa3xx-gcu" |
| 39 | #define MISCDEV_MINOR 197 |
| 40 | |
| 41 | #define REG_GCCR 0x00 |
| 42 | #define GCCR_SYNC_CLR (1 << 9) |
| 43 | #define GCCR_BP_RST (1 << 8) |
| 44 | #define GCCR_ABORT (1 << 6) |
| 45 | #define GCCR_STOP (1 << 4) |
| 46 | |
| 47 | #define REG_GCISCR 0x04 |
| 48 | #define REG_GCIECR 0x08 |
| 49 | #define REG_GCRBBR 0x20 |
| 50 | #define REG_GCRBLR 0x24 |
| 51 | #define REG_GCRBHR 0x28 |
| 52 | #define REG_GCRBTR 0x2C |
| 53 | #define REG_GCRBEXHR 0x30 |
| 54 | |
| 55 | #define IE_EOB (1 << 0) |
| 56 | #define IE_EEOB (1 << 5) |
| 57 | #define IE_ALL 0xff |
| 58 | |
| 59 | #define SHARED_SIZE PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared)) |
| 60 | |
| 61 | /* #define PXA3XX_GCU_DEBUG */ |
| 62 | /* #define PXA3XX_GCU_DEBUG_TIMER */ |
| 63 | |
| 64 | #ifdef PXA3XX_GCU_DEBUG |
| 65 | #define QDUMP(msg) \ |
| 66 | do { \ |
| 67 | QPRINT(priv, KERN_DEBUG, msg); \ |
| 68 | } while (0) |
| 69 | #else |
| 70 | #define QDUMP(msg) do {} while (0) |
| 71 | #endif |
| 72 | |
| 73 | #define QERROR(msg) \ |
| 74 | do { \ |
| 75 | QPRINT(priv, KERN_ERR, msg); \ |
| 76 | } while (0) |
| 77 | |
| 78 | struct pxa3xx_gcu_batch { |
| 79 | struct pxa3xx_gcu_batch *next; |
| 80 | u32 *ptr; |
| 81 | dma_addr_t phys; |
| 82 | unsigned long length; |
| 83 | }; |
| 84 | |
| 85 | struct pxa3xx_gcu_priv { |
Christoph Hellwig | 02c486f | 2018-12-21 14:44:02 +0100 | [diff] [blame] | 86 | struct device *dev; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 87 | void __iomem *mmio_base; |
| 88 | struct clk *clk; |
| 89 | struct pxa3xx_gcu_shared *shared; |
| 90 | dma_addr_t shared_phys; |
| 91 | struct resource *resource_mem; |
| 92 | struct miscdevice misc_dev; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 93 | wait_queue_head_t wait_idle; |
| 94 | wait_queue_head_t wait_free; |
| 95 | spinlock_t spinlock; |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 96 | struct timespec64 base_time; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 97 | |
| 98 | struct pxa3xx_gcu_batch *free; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 99 | struct pxa3xx_gcu_batch *ready; |
| 100 | struct pxa3xx_gcu_batch *ready_last; |
| 101 | struct pxa3xx_gcu_batch *running; |
| 102 | }; |
| 103 | |
| 104 | static inline unsigned long |
| 105 | gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off) |
| 106 | { |
| 107 | return __raw_readl(priv->mmio_base + off); |
| 108 | } |
| 109 | |
| 110 | static inline void |
| 111 | gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val) |
| 112 | { |
| 113 | __raw_writel(val, priv->mmio_base + off); |
| 114 | } |
| 115 | |
| 116 | #define QPRINT(priv, level, msg) \ |
| 117 | do { \ |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 118 | struct timespec64 ts; \ |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 119 | struct pxa3xx_gcu_shared *shared = priv->shared; \ |
| 120 | u32 base = gc_readl(priv, REG_GCRBBR); \ |
| 121 | \ |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 122 | ktime_get_ts64(&ts); \ |
| 123 | ts = timespec64_sub(ts, priv->base_time); \ |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 124 | \ |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 125 | printk(level "%lld.%03ld.%03ld - %-17s: %-21s (%s, " \ |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 126 | "STATUS " \ |
| 127 | "0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, " \ |
| 128 | "T %5ld)\n", \ |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 129 | (s64)(ts.tv_sec), \ |
| 130 | ts.tv_nsec / NSEC_PER_MSEC, \ |
| 131 | (ts.tv_nsec % NSEC_PER_MSEC) / USEC_PER_MSEC, \ |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 132 | __func__, msg, \ |
| 133 | shared->hw_running ? "running" : " idle", \ |
| 134 | gc_readl(priv, REG_GCISCR), \ |
| 135 | gc_readl(priv, REG_GCRBBR), \ |
| 136 | gc_readl(priv, REG_GCRBLR), \ |
| 137 | (gc_readl(priv, REG_GCRBEXHR) - base) / 4, \ |
| 138 | (gc_readl(priv, REG_GCRBHR) - base) / 4, \ |
| 139 | (gc_readl(priv, REG_GCRBTR) - base) / 4); \ |
| 140 | } while (0) |
| 141 | |
| 142 | static void |
| 143 | pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv) |
| 144 | { |
| 145 | QDUMP("RESET"); |
| 146 | |
| 147 | /* disable interrupts */ |
| 148 | gc_writel(priv, REG_GCIECR, 0); |
| 149 | |
| 150 | /* reset hardware */ |
| 151 | gc_writel(priv, REG_GCCR, GCCR_ABORT); |
| 152 | gc_writel(priv, REG_GCCR, 0); |
| 153 | |
| 154 | memset(priv->shared, 0, SHARED_SIZE); |
| 155 | priv->shared->buffer_phys = priv->shared_phys; |
| 156 | priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC; |
| 157 | |
Arnd Bergmann | f7a7535 | 2018-01-04 16:53:49 +0100 | [diff] [blame] | 158 | ktime_get_ts64(&priv->base_time); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 159 | |
| 160 | /* set up the ring buffer pointers */ |
| 161 | gc_writel(priv, REG_GCRBLR, 0); |
| 162 | gc_writel(priv, REG_GCRBBR, priv->shared_phys); |
| 163 | gc_writel(priv, REG_GCRBTR, priv->shared_phys); |
| 164 | |
| 165 | /* enable all IRQs except EOB */ |
| 166 | gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB); |
| 167 | } |
| 168 | |
| 169 | static void |
| 170 | dump_whole_state(struct pxa3xx_gcu_priv *priv) |
| 171 | { |
| 172 | struct pxa3xx_gcu_shared *sh = priv->shared; |
| 173 | u32 base = gc_readl(priv, REG_GCRBBR); |
| 174 | |
| 175 | QDUMP("DUMP"); |
| 176 | |
| 177 | printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n" |
| 178 | "%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n", |
| 179 | sh->hw_running ? "running" : "idle ", |
| 180 | gc_readl(priv, REG_GCISCR), |
| 181 | gc_readl(priv, REG_GCRBBR), |
| 182 | gc_readl(priv, REG_GCRBLR), |
| 183 | (gc_readl(priv, REG_GCRBEXHR) - base) / 4, |
| 184 | (gc_readl(priv, REG_GCRBHR) - base) / 4, |
| 185 | (gc_readl(priv, REG_GCRBTR) - base) / 4); |
| 186 | } |
| 187 | |
| 188 | static void |
| 189 | flush_running(struct pxa3xx_gcu_priv *priv) |
| 190 | { |
| 191 | struct pxa3xx_gcu_batch *running = priv->running; |
| 192 | struct pxa3xx_gcu_batch *next; |
| 193 | |
| 194 | while (running) { |
| 195 | next = running->next; |
| 196 | running->next = priv->free; |
| 197 | priv->free = running; |
| 198 | running = next; |
| 199 | } |
| 200 | |
| 201 | priv->running = NULL; |
| 202 | } |
| 203 | |
| 204 | static void |
| 205 | run_ready(struct pxa3xx_gcu_priv *priv) |
| 206 | { |
| 207 | unsigned int num = 0; |
| 208 | struct pxa3xx_gcu_shared *shared = priv->shared; |
| 209 | struct pxa3xx_gcu_batch *ready = priv->ready; |
| 210 | |
| 211 | QDUMP("Start"); |
| 212 | |
| 213 | BUG_ON(!ready); |
| 214 | |
| 215 | shared->buffer[num++] = 0x05000000; |
| 216 | |
| 217 | while (ready) { |
| 218 | shared->buffer[num++] = 0x00000001; |
| 219 | shared->buffer[num++] = ready->phys; |
| 220 | ready = ready->next; |
| 221 | } |
| 222 | |
| 223 | shared->buffer[num++] = 0x05000000; |
| 224 | priv->running = priv->ready; |
| 225 | priv->ready = priv->ready_last = NULL; |
| 226 | gc_writel(priv, REG_GCRBLR, 0); |
| 227 | shared->hw_running = 1; |
| 228 | |
| 229 | /* ring base address */ |
| 230 | gc_writel(priv, REG_GCRBBR, shared->buffer_phys); |
| 231 | |
| 232 | /* ring tail address */ |
| 233 | gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4); |
| 234 | |
| 235 | /* ring length */ |
| 236 | gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4); |
| 237 | } |
| 238 | |
| 239 | static irqreturn_t |
| 240 | pxa3xx_gcu_handle_irq(int irq, void *ctx) |
| 241 | { |
| 242 | struct pxa3xx_gcu_priv *priv = ctx; |
| 243 | struct pxa3xx_gcu_shared *shared = priv->shared; |
| 244 | u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL; |
| 245 | |
| 246 | QDUMP("-Interrupt"); |
| 247 | |
| 248 | if (!status) |
| 249 | return IRQ_NONE; |
| 250 | |
| 251 | spin_lock(&priv->spinlock); |
| 252 | shared->num_interrupts++; |
| 253 | |
| 254 | if (status & IE_EEOB) { |
| 255 | QDUMP(" [EEOB]"); |
| 256 | |
| 257 | flush_running(priv); |
| 258 | wake_up_all(&priv->wait_free); |
| 259 | |
| 260 | if (priv->ready) { |
| 261 | run_ready(priv); |
| 262 | } else { |
| 263 | /* There is no more data prepared by the userspace. |
| 264 | * Set hw_running = 0 and wait for the next userspace |
| 265 | * kick-off */ |
| 266 | shared->num_idle++; |
| 267 | shared->hw_running = 0; |
| 268 | |
| 269 | QDUMP(" '-> Idle."); |
| 270 | |
| 271 | /* set ring buffer length to zero */ |
| 272 | gc_writel(priv, REG_GCRBLR, 0); |
| 273 | |
| 274 | wake_up_all(&priv->wait_idle); |
| 275 | } |
| 276 | |
| 277 | shared->num_done++; |
| 278 | } else { |
| 279 | QERROR(" [???]"); |
| 280 | dump_whole_state(priv); |
| 281 | } |
| 282 | |
| 283 | /* Clear the interrupt */ |
| 284 | gc_writel(priv, REG_GCISCR, status); |
| 285 | spin_unlock(&priv->spinlock); |
| 286 | |
| 287 | return IRQ_HANDLED; |
| 288 | } |
| 289 | |
| 290 | static int |
| 291 | pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv) |
| 292 | { |
| 293 | int ret = 0; |
| 294 | |
| 295 | QDUMP("Waiting for idle..."); |
| 296 | |
| 297 | /* Does not need to be atomic. There's a lock in user space, |
| 298 | * but anyhow, this is just for statistics. */ |
| 299 | priv->shared->num_wait_idle++; |
| 300 | |
| 301 | while (priv->shared->hw_running) { |
| 302 | int num = priv->shared->num_interrupts; |
| 303 | u32 rbexhr = gc_readl(priv, REG_GCRBEXHR); |
| 304 | |
| 305 | ret = wait_event_interruptible_timeout(priv->wait_idle, |
| 306 | !priv->shared->hw_running, HZ*4); |
| 307 | |
Axel Lin | 688ec34 | 2012-03-27 11:15:56 +0800 | [diff] [blame] | 308 | if (ret != 0) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 309 | break; |
| 310 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 311 | if (gc_readl(priv, REG_GCRBEXHR) == rbexhr && |
| 312 | priv->shared->num_interrupts == num) { |
| 313 | QERROR("TIMEOUT"); |
| 314 | ret = -ETIMEDOUT; |
| 315 | break; |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | QDUMP("done"); |
| 320 | |
| 321 | return ret; |
| 322 | } |
| 323 | |
| 324 | static int |
| 325 | pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv) |
| 326 | { |
| 327 | int ret = 0; |
| 328 | |
| 329 | QDUMP("Waiting for free..."); |
| 330 | |
| 331 | /* Does not need to be atomic. There's a lock in user space, |
| 332 | * but anyhow, this is just for statistics. */ |
| 333 | priv->shared->num_wait_free++; |
| 334 | |
| 335 | while (!priv->free) { |
| 336 | u32 rbexhr = gc_readl(priv, REG_GCRBEXHR); |
| 337 | |
| 338 | ret = wait_event_interruptible_timeout(priv->wait_free, |
| 339 | priv->free, HZ*4); |
| 340 | |
| 341 | if (ret < 0) |
| 342 | break; |
| 343 | |
| 344 | if (ret > 0) |
| 345 | continue; |
| 346 | |
| 347 | if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) { |
| 348 | QERROR("TIMEOUT"); |
| 349 | ret = -ETIMEDOUT; |
| 350 | break; |
| 351 | } |
| 352 | } |
| 353 | |
| 354 | QDUMP("done"); |
| 355 | |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | /* Misc device layer */ |
| 360 | |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 361 | static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file) |
Al Viro | 996142e | 2013-04-05 20:39:36 -0400 | [diff] [blame] | 362 | { |
| 363 | struct miscdevice *dev = file->private_data; |
| 364 | return container_of(dev, struct pxa3xx_gcu_priv, misc_dev); |
| 365 | } |
| 366 | |
Daniel Mack | 3437b2b | 2014-03-05 17:12:48 +0100 | [diff] [blame] | 367 | /* |
| 368 | * provide an empty .open callback, so the core sets file->private_data |
| 369 | * for us. |
| 370 | */ |
| 371 | static int pxa3xx_gcu_open(struct inode *inode, struct file *file) |
| 372 | { |
| 373 | return 0; |
| 374 | } |
| 375 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 376 | static ssize_t |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 377 | pxa3xx_gcu_write(struct file *file, const char *buff, |
| 378 | size_t count, loff_t *offp) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 379 | { |
| 380 | int ret; |
| 381 | unsigned long flags; |
| 382 | struct pxa3xx_gcu_batch *buffer; |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 383 | struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 384 | |
| 385 | int words = count / 4; |
| 386 | |
| 387 | /* Does not need to be atomic. There's a lock in user space, |
| 388 | * but anyhow, this is just for statistics. */ |
| 389 | priv->shared->num_writes++; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 390 | priv->shared->num_words += words; |
| 391 | |
| 392 | /* Last word reserved for batch buffer end command */ |
| 393 | if (words >= PXA3XX_GCU_BATCH_WORDS) |
| 394 | return -E2BIG; |
| 395 | |
| 396 | /* Wait for a free buffer */ |
| 397 | if (!priv->free) { |
| 398 | ret = pxa3xx_gcu_wait_free(priv); |
| 399 | if (ret < 0) |
| 400 | return ret; |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Get buffer from free list |
| 405 | */ |
| 406 | spin_lock_irqsave(&priv->spinlock, flags); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 407 | buffer = priv->free; |
| 408 | priv->free = buffer->next; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 409 | spin_unlock_irqrestore(&priv->spinlock, flags); |
| 410 | |
| 411 | |
| 412 | /* Copy data from user into buffer */ |
| 413 | ret = copy_from_user(buffer->ptr, buff, words * 4); |
| 414 | if (ret) { |
| 415 | spin_lock_irqsave(&priv->spinlock, flags); |
| 416 | buffer->next = priv->free; |
| 417 | priv->free = buffer; |
| 418 | spin_unlock_irqrestore(&priv->spinlock, flags); |
axel lin | 0b7f1cc | 2011-01-14 09:39:11 +0000 | [diff] [blame] | 419 | return -EFAULT; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 420 | } |
| 421 | |
| 422 | buffer->length = words; |
| 423 | |
| 424 | /* Append batch buffer end command */ |
| 425 | buffer->ptr[words] = 0x01000000; |
| 426 | |
| 427 | /* |
| 428 | * Add buffer to ready list |
| 429 | */ |
| 430 | spin_lock_irqsave(&priv->spinlock, flags); |
| 431 | |
| 432 | buffer->next = NULL; |
| 433 | |
| 434 | if (priv->ready) { |
| 435 | BUG_ON(priv->ready_last == NULL); |
| 436 | |
| 437 | priv->ready_last->next = buffer; |
| 438 | } else |
| 439 | priv->ready = buffer; |
| 440 | |
| 441 | priv->ready_last = buffer; |
| 442 | |
| 443 | if (!priv->shared->hw_running) |
| 444 | run_ready(priv); |
| 445 | |
| 446 | spin_unlock_irqrestore(&priv->spinlock, flags); |
| 447 | |
| 448 | return words * 4; |
| 449 | } |
| 450 | |
| 451 | |
| 452 | static long |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 453 | pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 454 | { |
| 455 | unsigned long flags; |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 456 | struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 457 | |
| 458 | switch (cmd) { |
| 459 | case PXA3XX_GCU_IOCTL_RESET: |
| 460 | spin_lock_irqsave(&priv->spinlock, flags); |
| 461 | pxa3xx_gcu_reset(priv); |
| 462 | spin_unlock_irqrestore(&priv->spinlock, flags); |
| 463 | return 0; |
| 464 | |
| 465 | case PXA3XX_GCU_IOCTL_WAIT_IDLE: |
| 466 | return pxa3xx_gcu_wait_idle(priv); |
| 467 | } |
| 468 | |
| 469 | return -ENOSYS; |
| 470 | } |
| 471 | |
| 472 | static int |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 473 | pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 474 | { |
| 475 | unsigned int size = vma->vm_end - vma->vm_start; |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 476 | struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 477 | |
| 478 | switch (vma->vm_pgoff) { |
| 479 | case 0: |
| 480 | /* hand out the shared data area */ |
| 481 | if (size != SHARED_SIZE) |
| 482 | return -EINVAL; |
| 483 | |
Christoph Hellwig | 02c486f | 2018-12-21 14:44:02 +0100 | [diff] [blame] | 484 | return dma_mmap_coherent(priv->dev, vma, |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 485 | priv->shared, priv->shared_phys, size); |
| 486 | |
| 487 | case SHARED_SIZE >> PAGE_SHIFT: |
| 488 | /* hand out the MMIO base for direct register access |
| 489 | * from userspace */ |
| 490 | if (size != resource_size(priv->resource_mem)) |
| 491 | return -EINVAL; |
| 492 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 493 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 494 | |
| 495 | return io_remap_pfn_range(vma, vma->vm_start, |
| 496 | priv->resource_mem->start >> PAGE_SHIFT, |
| 497 | size, vma->vm_page_prot); |
| 498 | } |
| 499 | |
| 500 | return -EINVAL; |
| 501 | } |
| 502 | |
| 503 | |
| 504 | #ifdef PXA3XX_GCU_DEBUG_TIMER |
| 505 | static struct timer_list pxa3xx_gcu_debug_timer; |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 506 | static struct pxa3xx_gcu_priv *debug_timer_priv; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 507 | |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 508 | static void pxa3xx_gcu_debug_timedout(struct timer_list *unused) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 509 | { |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 510 | struct pxa3xx_gcu_priv *priv = debug_timer_priv; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 511 | |
| 512 | QERROR("Timer DUMP"); |
| 513 | |
Himanshu Jha | 5eabff1 | 2017-10-17 16:01:12 +0200 | [diff] [blame] | 514 | mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 515 | } |
| 516 | |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 517 | static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 518 | { |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 519 | /* init the timer structure */ |
| 520 | debug_timer_priv = priv; |
| 521 | timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0); |
| 522 | pxa3xx_gcu_debug_timedout(NULL); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 523 | } |
| 524 | #else |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 525 | static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {} |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 526 | #endif |
| 527 | |
| 528 | static int |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 529 | pxa3xx_gcu_add_buffer(struct device *dev, |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 530 | struct pxa3xx_gcu_priv *priv) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 531 | { |
| 532 | struct pxa3xx_gcu_batch *buffer; |
| 533 | |
| 534 | buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL); |
| 535 | if (!buffer) |
| 536 | return -ENOMEM; |
| 537 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 538 | buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4, |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 539 | &buffer->phys, GFP_KERNEL); |
| 540 | if (!buffer->ptr) { |
| 541 | kfree(buffer); |
| 542 | return -ENOMEM; |
| 543 | } |
| 544 | |
| 545 | buffer->next = priv->free; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 546 | priv->free = buffer; |
| 547 | |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | static void |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 552 | pxa3xx_gcu_free_buffers(struct device *dev, |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 553 | struct pxa3xx_gcu_priv *priv) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 554 | { |
| 555 | struct pxa3xx_gcu_batch *next, *buffer = priv->free; |
| 556 | |
| 557 | while (buffer) { |
| 558 | next = buffer->next; |
| 559 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 560 | dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4, |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 561 | buffer->ptr, buffer->phys); |
| 562 | |
| 563 | kfree(buffer); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 564 | buffer = next; |
| 565 | } |
| 566 | |
| 567 | priv->free = NULL; |
| 568 | } |
| 569 | |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 570 | static const struct file_operations pxa3xx_gcu_miscdev_fops = { |
| 571 | .owner = THIS_MODULE, |
Daniel Mack | 3437b2b | 2014-03-05 17:12:48 +0100 | [diff] [blame] | 572 | .open = pxa3xx_gcu_open, |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 573 | .write = pxa3xx_gcu_write, |
| 574 | .unlocked_ioctl = pxa3xx_gcu_ioctl, |
| 575 | .mmap = pxa3xx_gcu_mmap, |
Al Viro | 264bd66 | 2013-04-05 20:44:08 -0400 | [diff] [blame] | 576 | }; |
| 577 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 578 | static int pxa3xx_gcu_probe(struct platform_device *pdev) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 579 | { |
| 580 | int i, ret, irq; |
| 581 | struct resource *r; |
| 582 | struct pxa3xx_gcu_priv *priv; |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 583 | struct device *dev = &pdev->dev; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 584 | |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 585 | priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 586 | if (!priv) |
| 587 | return -ENOMEM; |
| 588 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 589 | init_waitqueue_head(&priv->wait_idle); |
| 590 | init_waitqueue_head(&priv->wait_free); |
| 591 | spin_lock_init(&priv->spinlock); |
| 592 | |
| 593 | /* we allocate the misc device structure as part of our own allocation, |
| 594 | * so we can get a pointer to our priv structure later on with |
| 595 | * container_of(). This isn't really necessary as we have a fixed minor |
| 596 | * number anyway, but this is to avoid statics. */ |
| 597 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 598 | priv->misc_dev.minor = MISCDEV_MINOR, |
| 599 | priv->misc_dev.name = DRV_NAME, |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 600 | priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 601 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 602 | /* handle IO resources */ |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 603 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
Jingoo Han | 9b22b8c5 | 2014-04-08 13:53:10 +0900 | [diff] [blame] | 604 | priv->mmio_base = devm_ioremap_resource(dev, r); |
| 605 | if (IS_ERR(priv->mmio_base)) |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 606 | return PTR_ERR(priv->mmio_base); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 607 | |
| 608 | /* enable the clock */ |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 609 | priv->clk = devm_clk_get(dev, NULL); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 610 | if (IS_ERR(priv->clk)) { |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 611 | dev_err(dev, "failed to get clock\n"); |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 612 | return PTR_ERR(priv->clk); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 613 | } |
| 614 | |
| 615 | /* request the IRQ */ |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 616 | irq = platform_get_irq(pdev, 0); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 617 | if (irq < 0) { |
Gustavo A. R. Silva | 7588f1e | 2017-08-21 16:49:58 +0200 | [diff] [blame] | 618 | dev_err(dev, "no IRQ defined: %d\n", irq); |
| 619 | return irq; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 620 | } |
| 621 | |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 622 | ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq, |
| 623 | 0, DRV_NAME, priv); |
| 624 | if (ret < 0) { |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 625 | dev_err(dev, "request_irq failed\n"); |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 626 | return ret; |
| 627 | } |
| 628 | |
| 629 | /* allocate dma memory */ |
| 630 | priv->shared = dma_alloc_coherent(dev, SHARED_SIZE, |
| 631 | &priv->shared_phys, GFP_KERNEL); |
| 632 | if (!priv->shared) { |
| 633 | dev_err(dev, "failed to allocate DMA memory\n"); |
| 634 | return -ENOMEM; |
| 635 | } |
| 636 | |
| 637 | /* register misc device */ |
| 638 | ret = misc_register(&priv->misc_dev); |
| 639 | if (ret < 0) { |
| 640 | dev_err(dev, "misc_register() for minor %d failed\n", |
| 641 | MISCDEV_MINOR); |
| 642 | goto err_free_dma; |
| 643 | } |
| 644 | |
Robert Jarzmik | 9e6e35e | 2015-08-03 22:15:34 +0200 | [diff] [blame] | 645 | ret = clk_prepare_enable(priv->clk); |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 646 | if (ret < 0) { |
| 647 | dev_err(dev, "failed to enable clock\n"); |
| 648 | goto err_misc_deregister; |
| 649 | } |
| 650 | |
| 651 | for (i = 0; i < 8; i++) { |
| 652 | ret = pxa3xx_gcu_add_buffer(dev, priv); |
| 653 | if (ret) { |
| 654 | dev_err(dev, "failed to allocate DMA memory\n"); |
| 655 | goto err_disable_clk; |
| 656 | } |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 657 | } |
| 658 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 659 | platform_set_drvdata(pdev, priv); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 660 | priv->resource_mem = r; |
Christoph Hellwig | 02c486f | 2018-12-21 14:44:02 +0100 | [diff] [blame] | 661 | priv->dev = dev; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 662 | pxa3xx_gcu_reset(priv); |
Kees Cook | e4a67df | 2017-11-10 16:34:51 +0100 | [diff] [blame] | 663 | pxa3xx_gcu_init_debug_timer(priv); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 664 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 665 | dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n", |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 666 | (void *) r->start, (void *) priv->shared_phys, |
| 667 | SHARED_SIZE, irq); |
| 668 | return 0; |
| 669 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 670 | err_free_dma: |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 671 | dma_free_coherent(dev, SHARED_SIZE, |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 672 | priv->shared, priv->shared_phys); |
| 673 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 674 | err_misc_deregister: |
| 675 | misc_deregister(&priv->misc_dev); |
| 676 | |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 677 | err_disable_clk: |
Robert Jarzmik | 9e6e35e | 2015-08-03 22:15:34 +0200 | [diff] [blame] | 678 | clk_disable_unprepare(priv->clk); |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 679 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 680 | return ret; |
| 681 | } |
| 682 | |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 683 | static int pxa3xx_gcu_remove(struct platform_device *pdev) |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 684 | { |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 685 | struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev); |
Daniel Mack | 9e4f967 | 2014-03-05 17:12:47 +0100 | [diff] [blame] | 686 | struct device *dev = &pdev->dev; |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 687 | |
| 688 | pxa3xx_gcu_wait_idle(priv); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 689 | misc_deregister(&priv->misc_dev); |
Daniel Mack | a9b47c7 | 2014-03-05 17:12:49 +0100 | [diff] [blame] | 690 | dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys); |
Daniel Mack | 109393a | 2014-03-05 17:12:46 +0100 | [diff] [blame] | 691 | pxa3xx_gcu_free_buffers(dev, priv); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 692 | |
| 693 | return 0; |
| 694 | } |
| 695 | |
Daniel Mack | aa45ee8e | 2018-07-24 19:11:25 +0200 | [diff] [blame] | 696 | #ifdef CONFIG_OF |
| 697 | static const struct of_device_id pxa3xx_gcu_of_match[] = { |
| 698 | { .compatible = "marvell,pxa300-gcu", }, |
| 699 | { } |
| 700 | }; |
| 701 | MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match); |
| 702 | #endif |
| 703 | |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 704 | static struct platform_driver pxa3xx_gcu_driver = { |
| 705 | .probe = pxa3xx_gcu_probe, |
Greg Kroah-Hartman | 48c68c4 | 2012-12-21 13:07:39 -0800 | [diff] [blame] | 706 | .remove = pxa3xx_gcu_remove, |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 707 | .driver = { |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 708 | .name = DRV_NAME, |
Daniel Mack | aa45ee8e | 2018-07-24 19:11:25 +0200 | [diff] [blame] | 709 | .of_match_table = of_match_ptr(pxa3xx_gcu_of_match), |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 710 | }, |
| 711 | }; |
| 712 | |
Axel Lin | 4277f2c | 2011-11-26 10:25:54 +0800 | [diff] [blame] | 713 | module_platform_driver(pxa3xx_gcu_driver); |
Daniel Mack | 364dbdf | 2010-11-04 14:44:00 -0400 | [diff] [blame] | 714 | |
| 715 | MODULE_DESCRIPTION("PXA3xx graphics controller unit driver"); |
| 716 | MODULE_LICENSE("GPL"); |
| 717 | MODULE_ALIAS_MISCDEV(MISCDEV_MINOR); |
| 718 | MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, " |
| 719 | "Denis Oliver Kropp <dok@directfb.org>, " |
| 720 | "Daniel Mack <daniel@caiaq.de>"); |