blob: 653dfffb3ac845400c19b9f57243af70aae4437f [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Jaroslav Kyselac1017a42007-10-15 09:50:19 +02003 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Takashi Iwai <tiwai@suse.de>
5 *
6 * Generic memory allocators
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9#ifndef __SOUND_MEMALLOC_H
10#define __SOUND_MEMALLOC_H
11
Takashi Iwaia25684a2021-10-17 09:48:57 +020012#include <linux/dma-direction.h>
Takashi Iwai83fbcae2021-06-10 13:09:35 +020013#include <asm/page.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct device;
Takashi Iwaia202bd12021-06-09 18:25:50 +020016struct vm_area_struct;
Takashi Iwaia25684a2021-10-17 09:48:57 +020017struct sg_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19/*
20 * buffer device info
21 */
22struct snd_dma_device {
23 int type; /* SNDRV_DMA_TYPE_XXX */
Takashi Iwaia25684a2021-10-17 09:48:57 +020024 enum dma_data_direction dir; /* DMA direction */
25 bool need_sync; /* explicit sync needed? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 struct device *dev; /* generic device */
27};
28
Antonio Ospite019d80d2013-01-29 12:56:26 +010029#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31
32/*
33 * buffer types
34 */
35#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
36#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
37#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
Takashi Iwai58a95df2021-08-02 09:28:02 +020038#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
Takashi Iwaia5606f82013-10-24 14:25:32 +020039#ifdef CONFIG_GENERIC_ALLOCATOR
Nicolin Chen05503212013-10-23 11:47:43 +080040#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
Takashi Iwaia5606f82013-10-24 14:25:32 +020041#else
42#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
43#endif
Takashi Iwai1fe7f392019-11-05 09:01:36 +010044#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
Takashi Iwaia25684a2021-10-17 09:48:57 +020045#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
Takashi Iwai73325f62021-10-17 09:48:58 +020046#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
Takashi Iwai2c95b922021-11-16 08:33:58 +010047#ifdef CONFIG_SND_DMA_SGBUF
48#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
49#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
50#else
51#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
52#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
53#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/*
56 * info for buffer allocation
57 */
58struct snd_dma_buffer {
59 struct snd_dma_device dev; /* device type */
60 unsigned char *area; /* virtual pointer */
61 dma_addr_t addr; /* physical address */
62 size_t bytes; /* buffer size in bytes */
63 void *private_data; /* private for allocator; don't touch */
64};
65
Pierre-Louis Bossart4cae99d2018-07-25 15:15:56 -050066/*
67 * return the pages matching with the given byte size
68 */
69static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
70{
71 return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
72}
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/* allocate/release a buffer */
Takashi Iwaia25684a2021-10-17 09:48:57 +020075int snd_dma_alloc_dir_pages(int type, struct device *dev,
76 enum dma_data_direction dir, size_t size,
77 struct snd_dma_buffer *dmab);
78
79static inline int snd_dma_alloc_pages(int type, struct device *dev,
80 size_t size, struct snd_dma_buffer *dmab)
81{
82 return snd_dma_alloc_dir_pages(type, dev, DMA_BIDIRECTIONAL, size, dmab);
83}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
86 struct snd_dma_buffer *dmab);
87void snd_dma_free_pages(struct snd_dma_buffer *dmab);
Takashi Iwaia202bd12021-06-09 18:25:50 +020088int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
89 struct vm_area_struct *area);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Takashi Iwaia25684a2021-10-17 09:48:57 +020091enum snd_dma_sync_mode { SNDRV_DMA_SYNC_CPU, SNDRV_DMA_SYNC_DEVICE };
92#ifdef CONFIG_HAS_DMA
93void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
94 enum snd_dma_sync_mode mode);
95#else
96static inline void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
97 enum snd_dma_sync_mode mode) {}
98#endif
99
Takashi Iwai37af81c2021-06-09 18:25:49 +0200100dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
101struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
102unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
103 unsigned int ofs, unsigned int size);
104
Takashi Iwai427ae262021-07-15 09:58:23 +0200105/* device-managed memory allocator */
Takashi Iwaia25684a2021-10-17 09:48:57 +0200106struct snd_dma_buffer *snd_devm_alloc_dir_pages(struct device *dev, int type,
107 enum dma_data_direction dir,
108 size_t size);
109
110static inline struct snd_dma_buffer *
111snd_devm_alloc_pages(struct device *dev, int type, size_t size)
112{
113 return snd_devm_alloc_dir_pages(dev, type, DMA_BIDIRECTIONAL, size);
114}
115
116static inline struct sg_table *
117snd_dma_noncontig_sg_table(struct snd_dma_buffer *dmab)
118{
119 return dmab->private_data;
120}
Takashi Iwai427ae262021-07-15 09:58:23 +0200121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#endif /* __SOUND_MEMALLOC_H */
123