Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Scatter-Gather buffer |
| 4 | * |
| 5 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/slab.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/vmalloc.h> |
Takashi Iwai | 9d069dc | 2012-09-20 20:29:12 -0700 | [diff] [blame] | 11 | #include <linux/export.h> |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 12 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <sound/memalloc.h> |
| 14 | |
| 15 | |
| 16 | /* table entries are align to 32 */ |
| 17 | #define SGBUF_TBL_ALIGN 32 |
Clemens Ladisch | 7ab3992 | 2006-10-09 08:13:32 +0200 | [diff] [blame] | 18 | #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) |
| 21 | { |
| 22 | struct snd_sg_buf *sgbuf = dmab->private_data; |
| 23 | struct snd_dma_buffer tmpb; |
| 24 | int i; |
| 25 | |
| 26 | if (! sgbuf) |
| 27 | return -EINVAL; |
| 28 | |
Markus Elfring | d712eaf | 2014-11-21 18:34:48 +0100 | [diff] [blame] | 29 | vunmap(dmab->area); |
Takashi Iwai | 6af845e | 2009-03-17 14:00:06 +0100 | [diff] [blame] | 30 | dmab->area = NULL; |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 33 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) |
| 34 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | tmpb.dev.dev = sgbuf->dev; |
| 36 | for (i = 0; i < sgbuf->pages; i++) { |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 37 | if (!(sgbuf->table[i].addr & ~PAGE_MASK)) |
| 38 | continue; /* continuous pages */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | tmpb.area = sgbuf->table[i].buf; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 40 | tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; |
| 41 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | snd_dma_free_pages(&tmpb); |
| 43 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
| 45 | kfree(sgbuf->table); |
| 46 | kfree(sgbuf->page_table); |
| 47 | kfree(sgbuf); |
| 48 | dmab->private_data = NULL; |
| 49 | |
| 50 | return 0; |
| 51 | } |
| 52 | |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 53 | #define MAX_ALLOC_PAGES 32 |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | void *snd_malloc_sgbuf_pages(struct device *device, |
| 56 | size_t size, struct snd_dma_buffer *dmab, |
| 57 | size_t *res_size) |
| 58 | { |
| 59 | struct snd_sg_buf *sgbuf; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 60 | unsigned int i, pages, chunk, maxpages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | struct snd_dma_buffer tmpb; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 62 | struct snd_sg_page *table; |
| 63 | struct page **pgtable; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 64 | int type = SNDRV_DMA_TYPE_DEV; |
| 65 | pgprot_t prot = PAGE_KERNEL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | dmab->area = NULL; |
| 68 | dmab->addr = 0; |
Panagiotis Issaris | 59feddb | 2006-07-25 15:28:03 +0200 | [diff] [blame] | 69 | dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | if (! sgbuf) |
| 71 | return NULL; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 72 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) { |
| 73 | type = SNDRV_DMA_TYPE_DEV_UC; |
| 74 | #ifdef pgprot_noncached |
| 75 | prot = pgprot_noncached(PAGE_KERNEL); |
| 76 | #endif |
| 77 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | sgbuf->dev = device; |
| 79 | pages = snd_sgbuf_aligned_pages(size); |
| 80 | sgbuf->tblsize = sgbuf_align_table(pages); |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 81 | table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); |
| 82 | if (!table) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 84 | sgbuf->table = table; |
| 85 | pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); |
| 86 | if (!pgtable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 88 | sgbuf->page_table = pgtable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 90 | /* allocate pages */ |
| 91 | maxpages = MAX_ALLOC_PAGES; |
| 92 | while (pages > 0) { |
| 93 | chunk = pages; |
| 94 | /* don't be too eager to take a huge chunk */ |
| 95 | if (chunk > maxpages) |
| 96 | chunk = maxpages; |
| 97 | chunk <<= PAGE_SHIFT; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 98 | if (snd_dma_alloc_pages_fallback(type, device, |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 99 | chunk, &tmpb) < 0) { |
| 100 | if (!sgbuf->pages) |
Takashi Iwai | c810f90 | 2012-08-03 12:48:32 +0200 | [diff] [blame] | 101 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 102 | if (!res_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 104 | size = sgbuf->pages * PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | break; |
| 106 | } |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 107 | chunk = tmpb.bytes >> PAGE_SHIFT; |
| 108 | for (i = 0; i < chunk; i++) { |
| 109 | table->buf = tmpb.area; |
| 110 | table->addr = tmpb.addr; |
| 111 | if (!i) |
| 112 | table->addr |= chunk; /* mark head */ |
| 113 | table++; |
| 114 | *pgtable++ = virt_to_page(tmpb.area); |
| 115 | tmpb.area += PAGE_SIZE; |
| 116 | tmpb.addr += PAGE_SIZE; |
| 117 | } |
| 118 | sgbuf->pages += chunk; |
| 119 | pages -= chunk; |
| 120 | if (chunk < maxpages) |
| 121 | maxpages = chunk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | sgbuf->size = size; |
Takashi Iwai | 42e748a | 2018-08-08 17:01:00 +0200 | [diff] [blame] | 125 | dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | if (! dmab->area) |
| 127 | goto _failed; |
Takashi Iwai | 51e9f2e | 2008-07-30 15:13:33 +0200 | [diff] [blame] | 128 | if (res_size) |
| 129 | *res_size = sgbuf->size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | return dmab->area; |
| 131 | |
| 132 | _failed: |
| 133 | snd_free_sgbuf_pages(dmab); /* free the table */ |
| 134 | return NULL; |
| 135 | } |
Takashi Iwai | 9d069dc | 2012-09-20 20:29:12 -0700 | [diff] [blame] | 136 | |
| 137 | /* |
| 138 | * compute the max chunk size with continuous pages on sg-buffer |
| 139 | */ |
| 140 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, |
| 141 | unsigned int ofs, unsigned int size) |
| 142 | { |
| 143 | struct snd_sg_buf *sg = dmab->private_data; |
| 144 | unsigned int start, end, pg; |
| 145 | |
| 146 | start = ofs >> PAGE_SHIFT; |
| 147 | end = (ofs + size - 1) >> PAGE_SHIFT; |
| 148 | /* check page continuity */ |
| 149 | pg = sg->table[start].addr >> PAGE_SHIFT; |
| 150 | for (;;) { |
| 151 | start++; |
| 152 | if (start > end) |
| 153 | break; |
| 154 | pg++; |
| 155 | if ((sg->table[start].addr >> PAGE_SHIFT) != pg) |
| 156 | return (start << PAGE_SHIFT) - ofs; |
| 157 | } |
| 158 | /* ok, all on continuous pages */ |
| 159 | return size; |
| 160 | } |
| 161 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); |