Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Scatter-Gather buffer |
| 4 | * |
| 5 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/slab.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/vmalloc.h> |
Takashi Iwai | 9d069dc | 2012-09-20 20:29:12 -0700 | [diff] [blame] | 11 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <sound/memalloc.h> |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 13 | #include "memalloc_local.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 15 | struct snd_sg_page { |
| 16 | void *buf; |
| 17 | dma_addr_t addr; |
| 18 | }; |
| 19 | |
| 20 | struct snd_sg_buf { |
| 21 | int size; /* allocated byte size */ |
| 22 | int pages; /* allocated pages */ |
| 23 | int tblsize; /* allocated table size */ |
| 24 | struct snd_sg_page *table; /* address table */ |
| 25 | struct page **page_table; /* page table (for vmap/vunmap) */ |
| 26 | struct device *dev; |
| 27 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | /* table entries are align to 32 */ |
| 30 | #define SGBUF_TBL_ALIGN 32 |
Clemens Ladisch | 7ab3992 | 2006-10-09 08:13:32 +0200 | [diff] [blame] | 31 | #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 33 | static void snd_dma_sg_free(struct snd_dma_buffer *dmab) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | { |
| 35 | struct snd_sg_buf *sgbuf = dmab->private_data; |
| 36 | struct snd_dma_buffer tmpb; |
| 37 | int i; |
| 38 | |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 39 | if (!sgbuf) |
| 40 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Markus Elfring | d712eaf | 2014-11-21 18:34:48 +0100 | [diff] [blame] | 42 | vunmap(dmab->area); |
Takashi Iwai | 6af845e | 2009-03-17 14:00:06 +0100 | [diff] [blame] | 43 | dmab->area = NULL; |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; |
Takashi Iwai | 58a95df | 2021-08-02 09:28:02 +0200 | [diff] [blame] | 46 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) |
| 47 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | tmpb.dev.dev = sgbuf->dev; |
| 49 | for (i = 0; i < sgbuf->pages; i++) { |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 50 | if (!(sgbuf->table[i].addr & ~PAGE_MASK)) |
| 51 | continue; /* continuous pages */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | tmpb.area = sgbuf->table[i].buf; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 53 | tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; |
| 54 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | snd_dma_free_pages(&tmpb); |
| 56 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
| 58 | kfree(sgbuf->table); |
| 59 | kfree(sgbuf->page_table); |
| 60 | kfree(sgbuf); |
| 61 | dmab->private_data = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 64 | #define MAX_ALLOC_PAGES 32 |
| 65 | |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 66 | static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | { |
| 68 | struct snd_sg_buf *sgbuf; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 69 | unsigned int i, pages, chunk, maxpages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | struct snd_dma_buffer tmpb; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 71 | struct snd_sg_page *table; |
| 72 | struct page **pgtable; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 73 | int type = SNDRV_DMA_TYPE_DEV; |
| 74 | pgprot_t prot = PAGE_KERNEL; |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 75 | void *area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Panagiotis Issaris | 59feddb | 2006-07-25 15:28:03 +0200 | [diff] [blame] | 77 | dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 78 | if (!sgbuf) |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 79 | return NULL; |
Takashi Iwai | 58a95df | 2021-08-02 09:28:02 +0200 | [diff] [blame] | 80 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) { |
| 81 | type = SNDRV_DMA_TYPE_DEV_WC; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 82 | #ifdef pgprot_noncached |
| 83 | prot = pgprot_noncached(PAGE_KERNEL); |
| 84 | #endif |
| 85 | } |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 86 | sgbuf->dev = dmab->dev.dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | pages = snd_sgbuf_aligned_pages(size); |
| 88 | sgbuf->tblsize = sgbuf_align_table(pages); |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 89 | table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); |
| 90 | if (!table) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 92 | sgbuf->table = table; |
| 93 | pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); |
| 94 | if (!pgtable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 96 | sgbuf->page_table = pgtable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 98 | /* allocate pages */ |
| 99 | maxpages = MAX_ALLOC_PAGES; |
| 100 | while (pages > 0) { |
| 101 | chunk = pages; |
| 102 | /* don't be too eager to take a huge chunk */ |
| 103 | if (chunk > maxpages) |
| 104 | chunk = maxpages; |
| 105 | chunk <<= PAGE_SHIFT; |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 106 | if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev, |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 107 | chunk, &tmpb) < 0) { |
| 108 | if (!sgbuf->pages) |
Takashi Iwai | c810f90 | 2012-08-03 12:48:32 +0200 | [diff] [blame] | 109 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 110 | size = sgbuf->pages * PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | break; |
| 112 | } |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 113 | chunk = tmpb.bytes >> PAGE_SHIFT; |
| 114 | for (i = 0; i < chunk; i++) { |
| 115 | table->buf = tmpb.area; |
| 116 | table->addr = tmpb.addr; |
| 117 | if (!i) |
| 118 | table->addr |= chunk; /* mark head */ |
| 119 | table++; |
| 120 | *pgtable++ = virt_to_page(tmpb.area); |
| 121 | tmpb.area += PAGE_SIZE; |
| 122 | tmpb.addr += PAGE_SIZE; |
| 123 | } |
| 124 | sgbuf->pages += chunk; |
| 125 | pages -= chunk; |
| 126 | if (chunk < maxpages) |
| 127 | maxpages = chunk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | sgbuf->size = size; |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 131 | area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); |
| 132 | if (!area) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | goto _failed; |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 134 | return area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
| 136 | _failed: |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 137 | snd_dma_sg_free(dmab); /* free the table */ |
Takashi Iwai | 723c125 | 2021-08-02 09:28:01 +0200 | [diff] [blame] | 138 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | } |
Takashi Iwai | 9d069dc | 2012-09-20 20:29:12 -0700 | [diff] [blame] | 140 | |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 141 | static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab, |
| 142 | size_t offset) |
| 143 | { |
| 144 | struct snd_sg_buf *sgbuf = dmab->private_data; |
| 145 | dma_addr_t addr; |
| 146 | |
| 147 | addr = sgbuf->table[offset >> PAGE_SHIFT].addr; |
| 148 | addr &= ~((dma_addr_t)PAGE_SIZE - 1); |
| 149 | return addr + offset % PAGE_SIZE; |
| 150 | } |
| 151 | |
| 152 | static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab, |
| 153 | size_t offset) |
| 154 | { |
| 155 | struct snd_sg_buf *sgbuf = dmab->private_data; |
| 156 | unsigned int idx = offset >> PAGE_SHIFT; |
| 157 | |
| 158 | if (idx >= (unsigned int)sgbuf->pages) |
| 159 | return NULL; |
| 160 | return sgbuf->page_table[idx]; |
| 161 | } |
| 162 | |
| 163 | static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab, |
| 164 | unsigned int ofs, |
| 165 | unsigned int size) |
Takashi Iwai | 9d069dc | 2012-09-20 20:29:12 -0700 | [diff] [blame] | 166 | { |
| 167 | struct snd_sg_buf *sg = dmab->private_data; |
| 168 | unsigned int start, end, pg; |
| 169 | |
| 170 | start = ofs >> PAGE_SHIFT; |
| 171 | end = (ofs + size - 1) >> PAGE_SHIFT; |
| 172 | /* check page continuity */ |
| 173 | pg = sg->table[start].addr >> PAGE_SHIFT; |
| 174 | for (;;) { |
| 175 | start++; |
| 176 | if (start > end) |
| 177 | break; |
| 178 | pg++; |
| 179 | if ((sg->table[start].addr >> PAGE_SHIFT) != pg) |
| 180 | return (start << PAGE_SHIFT) - ofs; |
| 181 | } |
| 182 | /* ok, all on continuous pages */ |
| 183 | return size; |
| 184 | } |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 185 | |
Takashi Iwai | 9732c14 | 2021-08-08 10:00:34 +0200 | [diff] [blame] | 186 | static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab, |
| 187 | struct vm_area_struct *area) |
| 188 | { |
| 189 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) |
| 190 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); |
| 191 | return -ENOENT; /* continue with the default mmap handler */ |
| 192 | } |
| 193 | |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 194 | const struct snd_malloc_ops snd_dma_sg_ops = { |
| 195 | .alloc = snd_dma_sg_alloc, |
| 196 | .free = snd_dma_sg_free, |
| 197 | .get_addr = snd_dma_sg_get_addr, |
| 198 | .get_page = snd_dma_sg_get_page, |
| 199 | .get_chunk_size = snd_dma_sg_get_chunk_size, |
Takashi Iwai | 9732c14 | 2021-08-08 10:00:34 +0200 | [diff] [blame] | 200 | .mmap = snd_dma_sg_mmap, |
Takashi Iwai | 37af81c | 2021-06-09 18:25:49 +0200 | [diff] [blame] | 201 | }; |