blob: b4526668072e774cf83dcb5aeb7a0d1225b5eea9 [file] [log] [blame]
Christoph Hellwigf0edfea2018-08-24 10:31:08 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2014 The Linux Foundation
4 */
Christoph Hellwig695cebe2020-10-20 10:41:07 +02005#include <linux/dma-map-ops.h>
Christoph Hellwigf0edfea2018-08-24 10:31:08 +02006#include <linux/slab.h>
7#include <linux/vmalloc.h>
8
Christoph Hellwig5cf45372019-06-03 09:14:31 +02009struct page **dma_common_find_pages(void *cpu_addr)
10{
11 struct vm_struct *area = find_vm_area(cpu_addr);
12
13 if (!area || area->flags != VM_DMA_COHERENT)
14 return NULL;
15 return area->pages;
16}
17
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020018/*
19 * Remaps an array of PAGE_SIZE pages into another vm_area.
20 * Cannot be used in non-sleeping contexts
21 */
22void *dma_common_pages_remap(struct page **pages, size_t size,
Christoph Hellwig51231742019-08-30 08:51:01 +020023 pgprot_t prot, const void *caller)
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020024{
Christoph Hellwig515e5b62020-06-01 21:50:32 -070025 void *vaddr;
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020026
Eric Auger8e36baf2020-06-23 14:07:55 +020027 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
28 VM_DMA_COHERENT, prot);
Christoph Hellwig515e5b62020-06-01 21:50:32 -070029 if (vaddr)
30 find_vm_area(vaddr)->pages = pages;
31 return vaddr;
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020032}
33
34/*
35 * Remaps an allocated contiguous region into another vm_area.
36 * Cannot be used in non-sleeping contexts
37 */
38void *dma_common_contiguous_remap(struct page *page, size_t size,
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020039 pgprot_t prot, const void *caller)
40{
Eric Auger8e36baf2020-06-23 14:07:55 +020041 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020042 struct page **pages;
Christoph Hellwig515e5b62020-06-01 21:50:32 -070043 void *vaddr;
44 int i;
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020045
Christoph Hellwig515e5b62020-06-01 21:50:32 -070046 pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020047 if (!pages)
48 return NULL;
Christoph Hellwig515e5b62020-06-01 21:50:32 -070049 for (i = 0; i < count; i++)
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020050 pages[i] = nth_page(page, i);
Christoph Hellwig515e5b62020-06-01 21:50:32 -070051 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020052 kfree(pages);
53
Christoph Hellwig515e5b62020-06-01 21:50:32 -070054 return vaddr;
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020055}
56
57/*
58 * Unmaps a range previously mapped by dma_common_*_remap
59 */
Christoph Hellwig51231742019-08-30 08:51:01 +020060void dma_common_free_remap(void *cpu_addr, size_t size)
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020061{
Andrey Smirnov2cf2aa62019-10-05 10:23:30 +020062 struct vm_struct *area = find_vm_area(cpu_addr);
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020063
Andrey Smirnov2cf2aa62019-10-05 10:23:30 +020064 if (!area || area->flags != VM_DMA_COHERENT) {
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020065 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
66 return;
67 }
68
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020069 vunmap(cpu_addr);
70}