Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 1 | /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 2 | * |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 3 | * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice (including the |
| 13 | * next paragraph) shall be included in all copies or substantial portions |
| 14 | * of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 19 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 22 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 24 | * Authors: |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 25 | * Thomas Hellstrom. |
| 26 | * Partially based on code obtained from Digeo Inc. |
| 27 | */ |
| 28 | |
| 29 | |
| 30 | /* |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 31 | * Unmaps the DMA mappings. |
| 32 | * FIXME: Is this a NoOp on x86? Also |
| 33 | * FIXME: What happens if this one is called and a pending blit has previously done |
| 34 | * the same DMA mappings? |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 35 | */ |
| 36 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 37 | #include <linux/pagemap.h> |
Thomas Zimmermann | 02c484a | 2019-12-03 11:04:06 +0100 | [diff] [blame] | 38 | #include <linux/pci.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 39 | #include <linux/slab.h> |
Sam Ravnborg | 0005cbda | 2019-07-23 22:09:44 +0200 | [diff] [blame] | 40 | #include <linux/vmalloc.h> |
| 41 | |
| 42 | #include <drm/drm_device.h> |
Sam Ravnborg | 0005cbda | 2019-07-23 22:09:44 +0200 | [diff] [blame] | 43 | #include <drm/via_drm.h> |
| 44 | |
| 45 | #include "via_dmablit.h" |
| 46 | #include "via_drv.h" |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 47 | |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 48 | #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) |
| 49 | #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) |
| 50 | #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 51 | |
| 52 | typedef struct _drm_via_descriptor { |
| 53 | uint32_t mem_addr; |
| 54 | uint32_t dev_addr; |
| 55 | uint32_t size; |
| 56 | uint32_t next; |
| 57 | } drm_via_descriptor_t; |
| 58 | |
| 59 | |
| 60 | /* |
| 61 | * Unmap a DMA mapping. |
| 62 | */ |
| 63 | |
| 64 | |
| 65 | |
| 66 | static void |
| 67 | via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) |
| 68 | { |
| 69 | int num_desc = vsg->num_desc; |
| 70 | unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; |
| 71 | unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 72 | drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 73 | descriptor_this_page; |
| 74 | dma_addr_t next = vsg->chain_start; |
| 75 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 76 | while (num_desc--) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 77 | if (descriptor_this_page-- == 0) { |
| 78 | cur_descriptor_page--; |
| 79 | descriptor_this_page = vsg->descriptors_per_page - 1; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 80 | desc_ptr = vsg->desc_pages[cur_descriptor_page] + |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 81 | descriptor_this_page; |
| 82 | } |
| 83 | dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); |
| 84 | dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); |
| 85 | next = (dma_addr_t) desc_ptr->next; |
| 86 | desc_ptr--; |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * If mode = 0, count how many descriptors are needed. |
| 92 | * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. |
| 93 | * Descriptors are run in reverse order by the hardware because we are not allowed to update the |
| 94 | * 'next' field without syncing calls when the descriptor is already mapped. |
| 95 | */ |
| 96 | |
| 97 | static void |
| 98 | via_map_blit_for_device(struct pci_dev *pdev, |
| 99 | const drm_via_dmablit_t *xfer, |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 100 | drm_via_sg_info_t *vsg, |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 101 | int mode) |
| 102 | { |
| 103 | unsigned cur_descriptor_page = 0; |
| 104 | unsigned num_descriptors_this_page = 0; |
| 105 | unsigned char *mem_addr = xfer->mem_addr; |
| 106 | unsigned char *cur_mem; |
| 107 | unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); |
| 108 | uint32_t fb_addr = xfer->fb_addr; |
| 109 | uint32_t cur_fb; |
| 110 | unsigned long line_len; |
| 111 | unsigned remaining_len; |
| 112 | int num_desc = 0; |
| 113 | int cur_line; |
| 114 | dma_addr_t next = 0 | VIA_DMA_DPR_EC; |
Dave Airlie | 339363c | 2006-01-26 08:32:14 +1100 | [diff] [blame] | 115 | drm_via_descriptor_t *desc_ptr = NULL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 116 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 117 | if (mode == 1) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 118 | desc_ptr = vsg->desc_pages[cur_descriptor_page]; |
| 119 | |
| 120 | for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { |
| 121 | |
| 122 | line_len = xfer->line_length; |
| 123 | cur_fb = fb_addr; |
| 124 | cur_mem = mem_addr; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 125 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 126 | while (line_len > 0) { |
| 127 | |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 128 | remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 129 | line_len -= remaining_len; |
| 130 | |
| 131 | if (mode == 1) { |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 132 | desc_ptr->mem_addr = |
| 133 | dma_map_page(&pdev->dev, |
| 134 | vsg->pages[VIA_PFN(cur_mem) - |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 135 | VIA_PFN(first_addr)], |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 136 | VIA_PGOFF(cur_mem), remaining_len, |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 137 | vsg->direction); |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 138 | desc_ptr->dev_addr = cur_fb; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 139 | |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 140 | desc_ptr->size = remaining_len; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 141 | desc_ptr->next = (uint32_t) next; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 142 | next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 143 | DMA_TO_DEVICE); |
| 144 | desc_ptr++; |
| 145 | if (++num_descriptors_this_page >= vsg->descriptors_per_page) { |
| 146 | num_descriptors_this_page = 0; |
| 147 | desc_ptr = vsg->desc_pages[++cur_descriptor_page]; |
| 148 | } |
| 149 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 150 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 151 | num_desc++; |
| 152 | cur_mem += remaining_len; |
| 153 | cur_fb += remaining_len; |
| 154 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 155 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 156 | mem_addr += xfer->mem_stride; |
| 157 | fb_addr += xfer->fb_stride; |
| 158 | } |
| 159 | |
| 160 | if (mode == 1) { |
| 161 | vsg->chain_start = next; |
| 162 | vsg->state = dr_via_device_mapped; |
| 163 | } |
| 164 | vsg->num_desc = num_desc; |
| 165 | } |
| 166 | |
| 167 | /* |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 168 | * Function that frees up all resources for a blit. It is usable even if the |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 169 | * blit info has only been partially built as long as the status enum is consistent |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 170 | * with the actual status of the used resources. |
| 171 | */ |
| 172 | |
| 173 | |
Dave Airlie | ce60fe0 | 2006-02-02 19:21:38 +1100 | [diff] [blame] | 174 | static void |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 175 | via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 176 | { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 177 | int i; |
| 178 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 179 | switch (vsg->state) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 180 | case dr_via_device_mapped: |
| 181 | via_unmap_blit_from_device(pdev, vsg); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 182 | fallthrough; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 183 | case dr_via_desc_pages_alloc: |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 184 | for (i = 0; i < vsg->num_desc_pages; ++i) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 185 | if (vsg->desc_pages[i] != NULL) |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 186 | free_page((unsigned long)vsg->desc_pages[i]); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 187 | } |
| 188 | kfree(vsg->desc_pages); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 189 | fallthrough; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 190 | case dr_via_pages_locked: |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 191 | unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, |
| 192 | (vsg->direction == DMA_FROM_DEVICE)); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 193 | fallthrough; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 194 | case dr_via_pages_alloc: |
| 195 | vfree(vsg->pages); |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 196 | fallthrough; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 197 | default: |
| 198 | vsg->state = dr_via_sg_init; |
| 199 | } |
Figo.zhang | c5c0755 | 2009-06-06 18:26:26 +0800 | [diff] [blame] | 200 | vfree(vsg->bounce_buffer); |
| 201 | vsg->bounce_buffer = NULL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 202 | vsg->free_on_sequence = 0; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 203 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 204 | |
| 205 | /* |
| 206 | * Fire a blit engine. |
| 207 | */ |
| 208 | |
| 209 | static void |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 210 | via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 211 | { |
| 212 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 213 | |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 214 | via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0); |
| 215 | via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0); |
| 216 | via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 217 | VIA_DMA_CSR_DE); |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 218 | via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); |
| 219 | via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0); |
| 220 | via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); |
Daniel Vetter | 85b2331 | 2013-12-11 11:34:45 +0100 | [diff] [blame] | 221 | wmb(); |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 222 | via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); |
| 223 | via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will |
| 228 | * occur here if the calling user does not have access to the submitted address. |
| 229 | */ |
| 230 | |
| 231 | static int |
| 232 | via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) |
| 233 | { |
| 234 | int ret; |
| 235 | unsigned long first_pfn = VIA_PFN(xfer->mem_addr); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 236 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 237 | first_pfn + 1; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 238 | |
Kees Cook | fad953c | 2018-06-12 14:27:37 -0700 | [diff] [blame] | 239 | vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages)); |
Joe Perches | ec3789c | 2010-11-05 03:07:34 +0000 | [diff] [blame] | 240 | if (NULL == vsg->pages) |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 241 | return -ENOMEM; |
John Hubbard | a5adf0a | 2020-01-30 22:13:09 -0800 | [diff] [blame] | 242 | ret = pin_user_pages_fast((unsigned long)xfer->mem_addr, |
Ira Weiny | 73b0140 | 2019-05-13 17:17:11 -0700 | [diff] [blame] | 243 | vsg->num_pages, |
| 244 | vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, |
Al Viro | a6e0d12 | 2017-09-22 18:16:11 -0400 | [diff] [blame] | 245 | vsg->pages); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 246 | if (ret != vsg->num_pages) { |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 247 | if (ret < 0) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 248 | return ret; |
| 249 | vsg->state = dr_via_pages_locked; |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 250 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 251 | } |
| 252 | vsg->state = dr_via_pages_locked; |
| 253 | DRM_DEBUG("DMA pages locked\n"); |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | /* |
| 258 | * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the |
| 259 | * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 260 | * quite large for some blits, and pages don't need to be contiguous. |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 261 | */ |
| 262 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 263 | static int |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 264 | via_alloc_desc_pages(drm_via_sg_info_t *vsg) |
| 265 | { |
| 266 | int i; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 267 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 268 | vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 269 | vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 270 | vsg->descriptors_per_page; |
| 271 | |
Yoann Padioleau | dd00cc4 | 2007-07-19 01:49:03 -0700 | [diff] [blame] | 272 | if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 273 | return -ENOMEM; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 274 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 275 | vsg->state = dr_via_desc_pages_alloc; |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 276 | for (i = 0; i < vsg->num_desc_pages; ++i) { |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 277 | if (NULL == (vsg->desc_pages[i] = |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 278 | (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 279 | return -ENOMEM; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 280 | } |
| 281 | DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, |
| 282 | vsg->num_desc); |
| 283 | return 0; |
| 284 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 285 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 286 | static void |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 287 | via_abort_dmablit(struct drm_device *dev, int engine) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 288 | { |
| 289 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 290 | |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 291 | via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 292 | } |
| 293 | |
| 294 | static void |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 295 | via_dmablit_engine_off(struct drm_device *dev, int engine) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 296 | { |
| 297 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 298 | |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 299 | via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | |
| 303 | |
| 304 | /* |
| 305 | * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. |
| 306 | * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue |
| 307 | * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while |
| 308 | * the workqueue task takes care of processing associated with the old blit. |
| 309 | */ |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 310 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 311 | void |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 312 | via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 313 | { |
| 314 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 315 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; |
| 316 | int cur; |
| 317 | int done_transfer; |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 318 | unsigned long irqsave = 0; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 319 | uint32_t status = 0; |
| 320 | |
| 321 | DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", |
| 322 | engine, from_irq, (unsigned long) blitq); |
| 323 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 324 | if (from_irq) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 325 | spin_lock(&blitq->blit_lock); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 326 | else |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 327 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 328 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 329 | done_transfer = blitq->is_active && |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 330 | ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 331 | done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 332 | |
| 333 | cur = blitq->cur; |
| 334 | if (done_transfer) { |
| 335 | |
| 336 | blitq->blits[cur]->aborted = blitq->aborting; |
| 337 | blitq->done_blit_handle++; |
Daniel Vetter | 57ed0f7 | 2013-12-11 11:34:43 +0100 | [diff] [blame] | 338 | wake_up(blitq->blit_queue + cur); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 339 | |
| 340 | cur++; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 341 | if (cur >= VIA_NUM_BLIT_SLOTS) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 342 | cur = 0; |
| 343 | blitq->cur = cur; |
| 344 | |
| 345 | /* |
| 346 | * Clear transfer done flag. |
| 347 | */ |
| 348 | |
Sam Ravnborg | 3bf2a06 | 2019-07-23 22:09:41 +0200 | [diff] [blame] | 349 | via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 350 | |
| 351 | blitq->is_active = 0; |
| 352 | blitq->aborting = 0; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 353 | schedule_work(&blitq->wq); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 354 | |
| 355 | } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { |
| 356 | |
| 357 | /* |
| 358 | * Abort transfer after one second. |
| 359 | */ |
| 360 | |
| 361 | via_abort_dmablit(dev, engine); |
| 362 | blitq->aborting = 1; |
Daniel Vetter | bfd8303 | 2013-12-11 11:34:41 +0100 | [diff] [blame] | 363 | blitq->end = jiffies + HZ; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 364 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 365 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 366 | if (!blitq->is_active) { |
| 367 | if (blitq->num_outstanding) { |
| 368 | via_fire_dmablit(dev, blitq->blits[cur], engine); |
| 369 | blitq->is_active = 1; |
| 370 | blitq->cur = cur; |
| 371 | blitq->num_outstanding--; |
Daniel Vetter | bfd8303 | 2013-12-11 11:34:41 +0100 | [diff] [blame] | 372 | blitq->end = jiffies + HZ; |
Jiri Slaby | 40565f1 | 2007-02-12 00:52:31 -0800 | [diff] [blame] | 373 | if (!timer_pending(&blitq->poll_timer)) |
| 374 | mod_timer(&blitq->poll_timer, jiffies + 1); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 375 | } else { |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 376 | if (timer_pending(&blitq->poll_timer)) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 377 | del_timer(&blitq->poll_timer); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 378 | via_dmablit_engine_off(dev, engine); |
| 379 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 380 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 381 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 382 | if (from_irq) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 383 | spin_unlock(&blitq->blit_lock); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 384 | else |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 385 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 386 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 387 | |
| 388 | |
| 389 | |
| 390 | /* |
| 391 | * Check whether this blit is still active, performing necessary locking. |
| 392 | */ |
| 393 | |
| 394 | static int |
| 395 | via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) |
| 396 | { |
| 397 | unsigned long irqsave; |
| 398 | uint32_t slot; |
| 399 | int active; |
| 400 | |
| 401 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 402 | |
| 403 | /* |
| 404 | * Allow for handle wraparounds. |
| 405 | */ |
| 406 | |
| 407 | active = ((blitq->done_blit_handle - handle) > (1 << 23)) && |
| 408 | ((blitq->cur_blit_handle - handle) <= (1 << 23)); |
| 409 | |
| 410 | if (queue && active) { |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 411 | slot = handle - blitq->done_blit_handle + blitq->cur - 1; |
| 412 | if (slot >= VIA_NUM_BLIT_SLOTS) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 413 | slot -= VIA_NUM_BLIT_SLOTS; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 414 | *queue = blitq->blit_queue + slot; |
| 415 | } |
| 416 | |
| 417 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 418 | |
| 419 | return active; |
| 420 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 421 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 422 | /* |
| 423 | * Sync. Wait for at least three seconds for the blit to be performed. |
| 424 | */ |
| 425 | |
| 426 | static int |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 427 | via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 428 | { |
| 429 | |
| 430 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 431 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; |
| 432 | wait_queue_head_t *queue; |
| 433 | int ret = 0; |
| 434 | |
| 435 | if (via_dmablit_active(blitq, engine, handle, &queue)) { |
Sam Ravnborg | 9154e60 | 2019-07-23 22:09:42 +0200 | [diff] [blame] | 436 | VIA_WAIT_ON(ret, *queue, 3 * HZ, |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 437 | !via_dmablit_active(blitq, engine, handle, NULL)); |
| 438 | } |
| 439 | DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", |
| 440 | handle, engine, ret); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 441 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 442 | return ret; |
| 443 | } |
| 444 | |
| 445 | |
| 446 | /* |
| 447 | * A timer that regularly polls the blit engine in cases where we don't have interrupts: |
| 448 | * a) Broken hardware (typically those that don't have any video capture facility). |
| 449 | * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. |
| 450 | * The timer and hardware IRQ's can and do work in parallel. If the hardware has |
| 451 | * irqs, it will shorten the latency somewhat. |
| 452 | */ |
| 453 | |
| 454 | |
| 455 | |
| 456 | static void |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 457 | via_dmablit_timer(struct timer_list *t) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 458 | { |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 459 | drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer); |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 460 | struct drm_device *dev = blitq->dev; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 461 | int engine = (int) |
| 462 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 463 | |
| 464 | DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 465 | (unsigned long) jiffies); |
| 466 | |
| 467 | via_dmablit_handler(dev, engine, 0); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 468 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 469 | if (!timer_pending(&blitq->poll_timer)) { |
Jiri Slaby | 40565f1 | 2007-02-12 00:52:31 -0800 | [diff] [blame] | 470 | mod_timer(&blitq->poll_timer, jiffies + 1); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 471 | |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 472 | /* |
| 473 | * Rerun handler to delete timer if engines are off, and |
| 474 | * to shorten abort latency. This is a little nasty. |
| 475 | */ |
| 476 | |
| 477 | via_dmablit_handler(dev, engine, 0); |
| 478 | |
| 479 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 480 | } |
| 481 | |
| 482 | |
| 483 | |
| 484 | |
| 485 | /* |
| 486 | * Workqueue task that frees data and mappings associated with a blit. |
| 487 | * Also wakes up waiting processes. Each of these tasks handles one |
| 488 | * blit engine only and may not be called on each interrupt. |
| 489 | */ |
| 490 | |
| 491 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 492 | static void |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 493 | via_dmablit_workqueue(struct work_struct *work) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 494 | { |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 495 | drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 496 | struct drm_device *dev = blitq->dev; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 497 | unsigned long irqsave; |
| 498 | drm_via_sg_info_t *cur_sg; |
| 499 | int cur_released; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 500 | |
| 501 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 502 | DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 503 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); |
| 504 | |
| 505 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 506 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 507 | while (blitq->serviced != blitq->cur) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 508 | |
| 509 | cur_released = blitq->serviced++; |
| 510 | |
| 511 | DRM_DEBUG("Releasing blit slot %d\n", cur_released); |
| 512 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 513 | if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 514 | blitq->serviced = 0; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 515 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 516 | cur_sg = blitq->blits[cur_released]; |
| 517 | blitq->num_free++; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 518 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 519 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 520 | |
Daniel Vetter | 57ed0f7 | 2013-12-11 11:34:43 +0100 | [diff] [blame] | 521 | wake_up(&blitq->busy_queue); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 522 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 523 | via_free_sg_info(dev->pdev, cur_sg); |
| 524 | kfree(cur_sg); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 525 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 526 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 527 | } |
| 528 | |
| 529 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 530 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 531 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 532 | |
| 533 | /* |
| 534 | * Init all blit engines. Currently we use two, but some hardware have 4. |
| 535 | */ |
| 536 | |
| 537 | |
| 538 | void |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 539 | via_init_dmablit(struct drm_device *dev) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 540 | { |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 541 | int i, j; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 542 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 543 | drm_via_blitq_t *blitq; |
| 544 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 545 | pci_set_master(dev->pdev); |
| 546 | |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 547 | for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 548 | blitq = dev_priv->blit_queues + i; |
| 549 | blitq->dev = dev; |
| 550 | blitq->cur_blit_handle = 0; |
| 551 | blitq->done_blit_handle = 0; |
| 552 | blitq->head = 0; |
| 553 | blitq->cur = 0; |
| 554 | blitq->serviced = 0; |
Simon Farnsworth | 22c806c | 2007-07-23 18:32:01 +1000 | [diff] [blame] | 555 | blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 556 | blitq->num_outstanding = 0; |
| 557 | blitq->is_active = 0; |
| 558 | blitq->aborting = 0; |
Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 559 | spin_lock_init(&blitq->blit_lock); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 560 | for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) |
Daniel Vetter | 57ed0f7 | 2013-12-11 11:34:43 +0100 | [diff] [blame] | 561 | init_waitqueue_head(blitq->blit_queue + j); |
| 562 | init_waitqueue_head(&blitq->busy_queue); |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 563 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 564 | timer_setup(&blitq->poll_timer, via_dmablit_timer, 0); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 565 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | /* |
| 569 | * Build all info and do all mappings required for a blit. |
| 570 | */ |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 571 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 572 | |
| 573 | static int |
Dave Airlie | 84b1fd1 | 2007-07-11 15:53:27 +1000 | [diff] [blame] | 574 | via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 575 | { |
| 576 | int draw = xfer->to_fb; |
| 577 | int ret = 0; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 578 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 579 | vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
Dave Airlie | 339363c | 2006-01-26 08:32:14 +1100 | [diff] [blame] | 580 | vsg->bounce_buffer = NULL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 581 | |
| 582 | vsg->state = dr_via_sg_init; |
| 583 | |
| 584 | if (xfer->num_lines <= 0 || xfer->line_length <= 0) { |
| 585 | DRM_ERROR("Zero size bitblt.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 586 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | /* |
| 590 | * Below check is a driver limitation, not a hardware one. We |
| 591 | * don't want to lock unused pages, and don't want to incoporate the |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 592 | * extra logic of avoiding them. Make sure there are no. |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 593 | * (Not a big limitation anyway.) |
| 594 | */ |
| 595 | |
Thomas Hellstrom | f0fb6d77 | 2008-03-17 10:07:20 +1000 | [diff] [blame] | 596 | if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 597 | DRM_ERROR("Too large system memory stride. Stride: %d, " |
| 598 | "Length: %d\n", xfer->mem_stride, xfer->line_length); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 599 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 600 | } |
| 601 | |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 602 | if ((xfer->mem_stride == xfer->line_length) && |
| 603 | (xfer->fb_stride == xfer->line_length)) { |
| 604 | xfer->mem_stride *= xfer->num_lines; |
| 605 | xfer->line_length = xfer->mem_stride; |
| 606 | xfer->fb_stride = xfer->mem_stride; |
| 607 | xfer->num_lines = 1; |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * Don't lock an arbitrary large number of pages, since that causes a |
| 612 | * DOS security hole. |
| 613 | */ |
| 614 | |
| 615 | if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { |
| 616 | DRM_ERROR("Too large PCI DMA bitblt.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 617 | return -EINVAL; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 618 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 619 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 620 | /* |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 621 | * we allow a negative fb stride to allow flipping of images in |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 622 | * transfer. |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 623 | */ |
| 624 | |
| 625 | if (xfer->mem_stride < xfer->line_length || |
| 626 | abs(xfer->fb_stride) < xfer->line_length) { |
| 627 | DRM_ERROR("Invalid frame-buffer / memory stride.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 628 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | /* |
| 632 | * A hardware bug seems to be worked around if system memory addresses start on |
| 633 | * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted |
| 634 | * about this. Meanwhile, impose the following restrictions: |
| 635 | */ |
| 636 | |
| 637 | #ifdef VIA_BUGFREE |
| 638 | if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 639 | ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 640 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 641 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 642 | } |
| 643 | #else |
| 644 | if ((((unsigned long)xfer->mem_addr & 15) || |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 645 | ((unsigned long)xfer->fb_addr & 3)) || |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 646 | ((xfer->num_lines > 1) && |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 647 | ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 648 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 649 | return -EINVAL; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 650 | } |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 651 | #endif |
| 652 | |
| 653 | if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { |
| 654 | DRM_ERROR("Could not lock DMA pages.\n"); |
| 655 | via_free_sg_info(dev->pdev, vsg); |
| 656 | return ret; |
| 657 | } |
| 658 | |
| 659 | via_map_blit_for_device(dev->pdev, xfer, vsg, 0); |
| 660 | if (0 != (ret = via_alloc_desc_pages(vsg))) { |
| 661 | DRM_ERROR("Could not allocate DMA descriptor pages.\n"); |
| 662 | via_free_sg_info(dev->pdev, vsg); |
| 663 | return ret; |
| 664 | } |
| 665 | via_map_blit_for_device(dev->pdev, xfer, vsg, 1); |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 666 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 667 | return 0; |
| 668 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 669 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 670 | |
| 671 | /* |
| 672 | * Reserve one free slot in the blit queue. Will wait for one second for one |
| 673 | * to become available. Otherwise -EBUSY is returned. |
| 674 | */ |
| 675 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 676 | static int |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 677 | via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) |
| 678 | { |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 679 | int ret = 0; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 680 | unsigned long irqsave; |
| 681 | |
| 682 | DRM_DEBUG("Num free is %d\n", blitq->num_free); |
| 683 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 684 | while (blitq->num_free == 0) { |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 685 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 686 | |
Sam Ravnborg | 9154e60 | 2019-07-23 22:09:42 +0200 | [diff] [blame] | 687 | VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 688 | if (ret) |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 689 | return (-EINTR == ret) ? -EAGAIN : ret; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 690 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 691 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 692 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 693 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 694 | blitq->num_free--; |
| 695 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 696 | |
| 697 | return 0; |
| 698 | } |
| 699 | |
| 700 | /* |
| 701 | * Hand back a free slot if we changed our mind. |
| 702 | */ |
| 703 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 704 | static void |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 705 | via_dmablit_release_slot(drm_via_blitq_t *blitq) |
| 706 | { |
| 707 | unsigned long irqsave; |
| 708 | |
| 709 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 710 | blitq->num_free++; |
| 711 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
Daniel Vetter | 57ed0f7 | 2013-12-11 11:34:43 +0100 | [diff] [blame] | 712 | wake_up(&blitq->busy_queue); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 713 | } |
| 714 | |
| 715 | /* |
| 716 | * Grab a free slot. Build blit info and queue a blit. |
| 717 | */ |
| 718 | |
| 719 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 720 | static int |
| 721 | via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 722 | { |
| 723 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; |
| 724 | drm_via_sg_info_t *vsg; |
| 725 | drm_via_blitq_t *blitq; |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 726 | int ret; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 727 | int engine; |
| 728 | unsigned long irqsave; |
| 729 | |
| 730 | if (dev_priv == NULL) { |
| 731 | DRM_ERROR("Called without initialization.\n"); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 732 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 733 | } |
| 734 | |
| 735 | engine = (xfer->to_fb) ? 0 : 1; |
| 736 | blitq = dev_priv->blit_queues + engine; |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 737 | if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 738 | return ret; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 739 | if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { |
| 740 | via_dmablit_release_slot(blitq); |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 741 | return -ENOMEM; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 742 | } |
| 743 | if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { |
| 744 | via_dmablit_release_slot(blitq); |
| 745 | kfree(vsg); |
| 746 | return ret; |
| 747 | } |
| 748 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 749 | |
| 750 | blitq->blits[blitq->head++] = vsg; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 751 | if (blitq->head >= VIA_NUM_BLIT_SLOTS) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 752 | blitq->head = 0; |
| 753 | blitq->num_outstanding++; |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 754 | xfer->sync.sync_handle = ++blitq->cur_blit_handle; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 755 | |
| 756 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 757 | xfer->sync.engine = engine; |
| 758 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 759 | via_dmablit_handler(dev, engine, 0); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 760 | |
| 761 | return 0; |
| 762 | } |
| 763 | |
| 764 | /* |
| 765 | * Sync on a previously submitted blit. Note that the X server use signals extensively, and |
Dave Airlie | d40c853 | 2006-08-19 17:40:50 +1000 | [diff] [blame] | 766 | * that there is a very big probability that this IOCTL will be interrupted by a signal. In that |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 767 | * case it returns with -EAGAIN for the signal to be delivered. |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 768 | * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). |
| 769 | */ |
| 770 | |
| 771 | int |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 772 | via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 773 | { |
Eric Anholt | c153f45 | 2007-09-03 12:06:45 +1000 | [diff] [blame] | 774 | drm_via_blitsync_t *sync = data; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 775 | int err; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 776 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 777 | if (sync->engine >= VIA_NUM_BLIT_ENGINES) |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 778 | return -EINVAL; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 779 | |
Eric Anholt | c153f45 | 2007-09-03 12:06:45 +1000 | [diff] [blame] | 780 | err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 781 | |
Eric Anholt | 20caafa | 2007-08-25 19:22:43 +1000 | [diff] [blame] | 782 | if (-EINTR == err) |
| 783 | err = -EAGAIN; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 784 | |
| 785 | return err; |
| 786 | } |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 787 | |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 788 | |
| 789 | /* |
| 790 | * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 791 | * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 792 | * be reissued. See the above IOCTL code. |
| 793 | */ |
| 794 | |
Dave Airlie | bc5f452 | 2007-11-05 12:50:58 +1000 | [diff] [blame] | 795 | int |
Nicolas Kaiser | 58c1e85 | 2010-07-11 15:32:42 +0200 | [diff] [blame] | 796 | via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 797 | { |
Eric Anholt | c153f45 | 2007-09-03 12:06:45 +1000 | [diff] [blame] | 798 | drm_via_dmablit_t *xfer = data; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 799 | int err; |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 800 | |
Eric Anholt | c153f45 | 2007-09-03 12:06:45 +1000 | [diff] [blame] | 801 | err = via_dmablit(dev, xfer); |
Dave Airlie | 443448d | 2006-01-02 14:26:20 +1100 | [diff] [blame] | 802 | |
| 803 | return err; |
| 804 | } |