blob: b8c9011987f49790369680678fc26b3c0a29f3cf [file] [log] [blame]
Bjorn Helgaas8cfab3c2018-01-26 12:50:27 -06001// SPDX-License-Identifier: GPL-2.0
David Woodhousef7195822017-04-12 13:25:59 +01002/*
Bjorn Helgaasdf62ab52018-03-09 16:36:33 -06003 * Generic PCI resource mmap helper
David Woodhousef7195822017-04-12 13:25:59 +01004 *
5 * Copyright © 2017 Amazon.com, Inc. or its affiliates.
6 *
7 * Author: David Woodhouse <dwmw2@infradead.org>
David Woodhousef7195822017-04-12 13:25:59 +01008 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/pci.h>
13
14#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
15
16/*
17 * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
18 * pci_mmap_page_range() (if needed) as a wrapper round it.
19 */
20
21#ifdef HAVE_PCI_MMAP
22int pci_mmap_page_range(struct pci_dev *pdev, int bar,
23 struct vm_area_struct *vma,
24 enum pci_mmap_state mmap_state, int write_combine)
25{
26 resource_size_t start, end;
27
28 pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
29
30 /* Adjust vm_pgoff to be the offset within the resource */
31 vma->vm_pgoff -= start >> PAGE_SHIFT;
32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
33 write_combine);
34}
35#endif
36
37static const struct vm_operations_struct pci_phys_vm_ops = {
38#ifdef CONFIG_HAVE_IOREMAP_PROT
39 .access = generic_access_phys,
40#endif
41};
42
43int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
44 struct vm_area_struct *vma,
45 enum pci_mmap_state mmap_state, int write_combine)
46{
47 unsigned long size;
David Woodhouse2bea36f2017-04-12 13:26:08 +010048 int ret;
David Woodhousef7195822017-04-12 13:25:59 +010049
50 size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
51 if (vma->vm_pgoff + vma_pages(vma) > size)
52 return -EINVAL;
53
54 if (write_combine)
55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
56 else
57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
58
David Woodhouse2bea36f2017-04-12 13:26:08 +010059 if (mmap_state == pci_mmap_io) {
60 ret = pci_iobar_pfn(pdev, bar, vma);
61 if (ret)
62 return ret;
63 } else
64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
65
David Woodhousef7195822017-04-12 13:25:59 +010066 vma->vm_ops = &pci_phys_vm_ops;
67
68 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
69 vma->vm_end - vma->vm_start,
70 vma->vm_page_prot);
71}
72
73#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
74
75/*
Bjorn Helgaasf6b6aef2019-05-30 08:05:58 -050076 * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
David Woodhousef7195822017-04-12 13:25:59 +010077 * the architecture's pci_mmap_page_range(), converting to "user visible"
78 * addresses as necessary.
79 */
80
81int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
82 struct vm_area_struct *vma,
83 enum pci_mmap_state mmap_state, int write_combine)
84{
85 resource_size_t start, end;
86
87 /*
88 * pci_mmap_page_range() expects the same kind of entry as coming
89 * from /proc/bus/pci/ which is a "user visible" value. If this is
90 * different from the resource itself, arch will do necessary fixup.
91 */
92 pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
93 vma->vm_pgoff += start >> PAGE_SHIFT;
94 return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
95}
96#endif