blob: 8352a5cdb19f57d825a7d5b3111947cb743a8b08 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Scatter-Gather buffer
4 *
5 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/vmalloc.h>
Takashi Iwai9d069dc2012-09-20 20:29:12 -070011#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <sound/memalloc.h>
Takashi Iwai37af81c2021-06-09 18:25:49 +020013#include "memalloc_local.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Takashi Iwai37af81c2021-06-09 18:25:49 +020015struct snd_sg_page {
16 void *buf;
17 dma_addr_t addr;
18};
19
20struct snd_sg_buf {
21 int size; /* allocated byte size */
22 int pages; /* allocated pages */
23 int tblsize; /* allocated table size */
24 struct snd_sg_page *table; /* address table */
25 struct page **page_table; /* page table (for vmap/vunmap) */
26 struct device *dev;
27};
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/* table entries are align to 32 */
30#define SGBUF_TBL_ALIGN 32
Clemens Ladisch7ab39922006-10-09 08:13:32 +020031#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Takashi Iwai37af81c2021-06-09 18:25:49 +020033static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
35 struct snd_sg_buf *sgbuf = dmab->private_data;
36 struct snd_dma_buffer tmpb;
37 int i;
38
Takashi Iwai37af81c2021-06-09 18:25:49 +020039 if (!sgbuf)
40 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Markus Elfringd712eaf2014-11-21 18:34:48 +010042 vunmap(dmab->area);
Takashi Iwai6af845e2009-03-17 14:00:06 +010043 dmab->area = NULL;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
Takashi Iwai58a95df2021-08-02 09:28:02 +020046 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
47 tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 tmpb.dev.dev = sgbuf->dev;
49 for (i = 0; i < sgbuf->pages; i++) {
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020050 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
51 continue; /* continuous pages */
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 tmpb.area = sgbuf->table[i].buf;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020053 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
54 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 snd_dma_free_pages(&tmpb);
56 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58 kfree(sgbuf->table);
59 kfree(sgbuf->page_table);
60 kfree(sgbuf);
61 dmab->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020064#define MAX_ALLOC_PAGES 32
65
Takashi Iwai723c1252021-08-02 09:28:01 +020066static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
68 struct snd_sg_buf *sgbuf;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020069 unsigned int i, pages, chunk, maxpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct snd_dma_buffer tmpb;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020071 struct snd_sg_page *table;
72 struct page **pgtable;
Takashi Iwai42e748a2018-08-08 17:01:00 +020073 int type = SNDRV_DMA_TYPE_DEV;
74 pgprot_t prot = PAGE_KERNEL;
Takashi Iwai723c1252021-08-02 09:28:01 +020075 void *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Panagiotis Issaris59feddb2006-07-25 15:28:03 +020077 dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
Takashi Iwai37af81c2021-06-09 18:25:49 +020078 if (!sgbuf)
Takashi Iwai723c1252021-08-02 09:28:01 +020079 return NULL;
Takashi Iwai58a95df2021-08-02 09:28:02 +020080 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
81 type = SNDRV_DMA_TYPE_DEV_WC;
Takashi Iwai42e748a2018-08-08 17:01:00 +020082#ifdef pgprot_noncached
83 prot = pgprot_noncached(PAGE_KERNEL);
84#endif
85 }
Takashi Iwai37af81c2021-06-09 18:25:49 +020086 sgbuf->dev = dmab->dev.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 pages = snd_sgbuf_aligned_pages(size);
88 sgbuf->tblsize = sgbuf_align_table(pages);
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020089 table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
90 if (!table)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020092 sgbuf->table = table;
93 pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
94 if (!pgtable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020096 sgbuf->page_table = pgtable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020098 /* allocate pages */
99 maxpages = MAX_ALLOC_PAGES;
100 while (pages > 0) {
101 chunk = pages;
102 /* don't be too eager to take a huge chunk */
103 if (chunk > maxpages)
104 chunk = maxpages;
105 chunk <<= PAGE_SHIFT;
Takashi Iwai37af81c2021-06-09 18:25:49 +0200106 if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200107 chunk, &tmpb) < 0) {
108 if (!sgbuf->pages)
Takashi Iwaic810f902012-08-03 12:48:32 +0200109 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200110 size = sgbuf->pages * PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 break;
112 }
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200113 chunk = tmpb.bytes >> PAGE_SHIFT;
114 for (i = 0; i < chunk; i++) {
115 table->buf = tmpb.area;
116 table->addr = tmpb.addr;
117 if (!i)
118 table->addr |= chunk; /* mark head */
119 table++;
120 *pgtable++ = virt_to_page(tmpb.area);
121 tmpb.area += PAGE_SIZE;
122 tmpb.addr += PAGE_SIZE;
123 }
124 sgbuf->pages += chunk;
125 pages -= chunk;
126 if (chunk < maxpages)
127 maxpages = chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 }
129
130 sgbuf->size = size;
Takashi Iwai723c1252021-08-02 09:28:01 +0200131 area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
132 if (!area)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 goto _failed;
Takashi Iwai723c1252021-08-02 09:28:01 +0200134 return area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 _failed:
Takashi Iwai37af81c2021-06-09 18:25:49 +0200137 snd_dma_sg_free(dmab); /* free the table */
Takashi Iwai723c1252021-08-02 09:28:01 +0200138 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
Takashi Iwai9d069dc2012-09-20 20:29:12 -0700140
Takashi Iwai37af81c2021-06-09 18:25:49 +0200141static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
142 size_t offset)
143{
144 struct snd_sg_buf *sgbuf = dmab->private_data;
145 dma_addr_t addr;
146
147 addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
148 addr &= ~((dma_addr_t)PAGE_SIZE - 1);
149 return addr + offset % PAGE_SIZE;
150}
151
152static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
153 size_t offset)
154{
155 struct snd_sg_buf *sgbuf = dmab->private_data;
156 unsigned int idx = offset >> PAGE_SHIFT;
157
158 if (idx >= (unsigned int)sgbuf->pages)
159 return NULL;
160 return sgbuf->page_table[idx];
161}
162
163static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
164 unsigned int ofs,
165 unsigned int size)
Takashi Iwai9d069dc2012-09-20 20:29:12 -0700166{
167 struct snd_sg_buf *sg = dmab->private_data;
168 unsigned int start, end, pg;
169
170 start = ofs >> PAGE_SHIFT;
171 end = (ofs + size - 1) >> PAGE_SHIFT;
172 /* check page continuity */
173 pg = sg->table[start].addr >> PAGE_SHIFT;
174 for (;;) {
175 start++;
176 if (start > end)
177 break;
178 pg++;
179 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
180 return (start << PAGE_SHIFT) - ofs;
181 }
182 /* ok, all on continuous pages */
183 return size;
184}
Takashi Iwai37af81c2021-06-09 18:25:49 +0200185
Takashi Iwai9732c142021-08-08 10:00:34 +0200186static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
187 struct vm_area_struct *area)
188{
189 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
190 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
191 return -ENOENT; /* continue with the default mmap handler */
192}
193
Takashi Iwai37af81c2021-06-09 18:25:49 +0200194const struct snd_malloc_ops snd_dma_sg_ops = {
195 .alloc = snd_dma_sg_alloc,
196 .free = snd_dma_sg_free,
197 .get_addr = snd_dma_sg_get_addr,
198 .get_page = snd_dma_sg_get_page,
199 .get_chunk_size = snd_dma_sg_get_chunk_size,
Takashi Iwai9732c142021-08-08 10:00:34 +0200200 .mmap = snd_dma_sg_mmap,
Takashi Iwai37af81c2021-06-09 18:25:49 +0200201};