Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2014 Mellanox Technologies. All rights reserved. |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef IB_UMEM_ODP_H |
| 7 | #define IB_UMEM_ODP_H |
| 8 | |
| 9 | #include <rdma/ib_umem.h> |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 10 | #include <rdma/ib_verbs.h> |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 11 | |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 12 | struct ib_umem_odp { |
Jason Gunthorpe | 41b4deea | 2018-09-16 20:48:05 +0300 | [diff] [blame] | 13 | struct ib_umem umem; |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 14 | struct mmu_interval_notifier notifier; |
| 15 | struct pid *tgid; |
Jason Gunthorpe | c9990ab | 2018-09-16 20:48:07 +0300 | [diff] [blame] | 16 | |
Yishai Hadas | 36f30e4 | 2020-09-30 19:38:25 +0300 | [diff] [blame] | 17 | /* An array of the pfns included in the on-demand paging umem. */ |
| 18 | unsigned long *pfn_list; |
| 19 | |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 20 | /* |
Yishai Hadas | 36f30e4 | 2020-09-30 19:38:25 +0300 | [diff] [blame] | 21 | * An array with DMA addresses mapped for pfns in pfn_list. |
| 22 | * The lower two bits designate access permissions. |
| 23 | * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT. |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 24 | */ |
| 25 | dma_addr_t *dma_list; |
| 26 | /* |
| 27 | * The umem_mutex protects the page_list and dma_list fields of an ODP |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 28 | * umem, allowing only a single thread to map/unmap pages. The mutex |
| 29 | * also protects access to the mmu notifier counters. |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 30 | */ |
| 31 | struct mutex umem_mutex; |
| 32 | void *private; /* for the HW driver to use. */ |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 33 | |
Shiraz Saleem | d10bcf9 | 2019-04-02 14:52:52 -0500 | [diff] [blame] | 34 | int npages; |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 35 | |
Jason Gunthorpe | fd7dbf0 | 2019-08-19 14:17:01 +0300 | [diff] [blame] | 36 | /* |
| 37 | * An implicit odp umem cannot be DMA mapped, has 0 length, and serves |
| 38 | * only as an anchor for the driver to hold onto the per_mm. FIXME: |
| 39 | * This should be removed and drivers should work with the per_mm |
| 40 | * directly. |
| 41 | */ |
| 42 | bool is_implicit_odp; |
| 43 | |
Jason Gunthorpe | d2183c6 | 2019-05-20 09:05:25 +0300 | [diff] [blame] | 44 | unsigned int page_shift; |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 45 | }; |
| 46 | |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 47 | static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) |
| 48 | { |
Jason Gunthorpe | 41b4deea | 2018-09-16 20:48:05 +0300 | [diff] [blame] | 49 | return container_of(umem, struct ib_umem_odp, umem); |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 50 | } |
| 51 | |
Jason Gunthorpe | d2183c6 | 2019-05-20 09:05:25 +0300 | [diff] [blame] | 52 | /* Returns the first page of an ODP umem. */ |
| 53 | static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) |
| 54 | { |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 55 | return umem_odp->notifier.interval_tree.start; |
Jason Gunthorpe | d2183c6 | 2019-05-20 09:05:25 +0300 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | /* Returns the address of the page after the last one of an ODP umem. */ |
| 59 | static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) |
| 60 | { |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 61 | return umem_odp->notifier.interval_tree.last + 1; |
Jason Gunthorpe | d2183c6 | 2019-05-20 09:05:25 +0300 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) |
| 65 | { |
| 66 | return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> |
| 67 | umem_odp->page_shift; |
| 68 | } |
| 69 | |
Leon Romanovsky | 13859d5d | 2019-01-08 16:07:26 +0200 | [diff] [blame] | 70 | /* |
| 71 | * The lower 2 bits of the DMA address signal the R/W permissions for |
| 72 | * the entry. To upgrade the permissions, provide the appropriate |
| 73 | * bitmask to the map_dma_pages function. |
| 74 | * |
| 75 | * Be aware that upgrading a mapped address might result in change of |
| 76 | * the DMA address for the page. |
| 77 | */ |
| 78 | #define ODP_READ_ALLOWED_BIT (1<<0ULL) |
| 79 | #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) |
| 80 | |
| 81 | #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) |
| 82 | |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 83 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 84 | |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 85 | struct ib_umem_odp * |
Moni Shoua | c320e52 | 2020-01-15 14:43:31 +0200 | [diff] [blame] | 86 | ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 87 | int access, const struct mmu_interval_notifier_ops *ops); |
Moni Shoua | c320e52 | 2020-01-15 14:43:31 +0200 | [diff] [blame] | 88 | struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, |
Jason Gunthorpe | f20bef6 | 2019-08-19 14:17:03 +0300 | [diff] [blame] | 89 | int access); |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 90 | struct ib_umem_odp * |
| 91 | ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, |
| 92 | size_t size, |
| 93 | const struct mmu_interval_notifier_ops *ops); |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 94 | void ib_umem_odp_release(struct ib_umem_odp *umem_odp); |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 95 | |
Yishai Hadas | 36f30e4 | 2020-09-30 19:38:25 +0300 | [diff] [blame] | 96 | int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, |
Yishai Hadas | 8bfafde | 2020-09-30 19:38:26 +0300 | [diff] [blame] | 97 | u64 bcnt, u64 access_mask, bool fault); |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 98 | |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 99 | void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 100 | u64 bound); |
| 101 | |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 102 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
| 103 | |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 104 | static inline struct ib_umem_odp * |
Moni Shoua | c320e52 | 2020-01-15 14:43:31 +0200 | [diff] [blame] | 105 | ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 106 | int access, const struct mmu_interval_notifier_ops *ops) |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 107 | { |
Jason Gunthorpe | 261dc53 | 2019-08-19 14:17:04 +0300 | [diff] [blame] | 108 | return ERR_PTR(-EINVAL); |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 109 | } |
| 110 | |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 111 | static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 112 | |
| 113 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
| 114 | |
| 115 | #endif /* IB_UMEM_ODP_H */ |