blob: 64314ff76612961eb157ff2e2fc1bcace744b252 [file] [log] [blame]
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001/*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_UMEM_ODP_H
34#define IB_UMEM_ODP_H
35
36#include <rdma/ib_umem.h>
Haggai Eran882214e2014-12-11 17:04:18 +020037#include <rdma/ib_verbs.h>
Haggai Eran882214e2014-12-11 17:04:18 +020038
Shachar Raindel8ada2c12014-12-11 17:04:17 +020039struct ib_umem_odp {
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +030040 struct ib_umem umem;
Jason Gunthorpef25a5462019-11-12 16:22:22 -040041 struct mmu_interval_notifier notifier;
42 struct pid *tgid;
Jason Gunthorpec9990ab2018-09-16 20:48:07 +030043
Shachar Raindel8ada2c12014-12-11 17:04:17 +020044 /*
45 * An array of the pages included in the on-demand paging umem.
46 * Indices of pages that are currently not mapped into the device will
47 * contain NULL.
48 */
49 struct page **page_list;
50 /*
51 * An array of the same size as page_list, with DMA addresses mapped
52 * for pages the pages in page_list. The lower two bits designate
53 * access permissions. See ODP_READ_ALLOWED_BIT and
54 * ODP_WRITE_ALLOWED_BIT.
55 */
56 dma_addr_t *dma_list;
57 /*
58 * The umem_mutex protects the page_list and dma_list fields of an ODP
Haggai Eran882214e2014-12-11 17:04:18 +020059 * umem, allowing only a single thread to map/unmap pages. The mutex
60 * also protects access to the mmu notifier counters.
Shachar Raindel8ada2c12014-12-11 17:04:17 +020061 */
62 struct mutex umem_mutex;
63 void *private; /* for the HW driver to use. */
Haggai Eran882214e2014-12-11 17:04:18 +020064
Shiraz Saleemd10bcf92019-04-02 14:52:52 -050065 int npages;
Haggai Eran882214e2014-12-11 17:04:18 +020066
Jason Gunthorpefd7dbf02019-08-19 14:17:01 +030067 /*
68 * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
69 * only as an anchor for the driver to hold onto the per_mm. FIXME:
70 * This should be removed and drivers should work with the per_mm
71 * directly.
72 */
73 bool is_implicit_odp;
74
Jason Gunthorped2183c62019-05-20 09:05:25 +030075 unsigned int page_shift;
Shachar Raindel8ada2c12014-12-11 17:04:17 +020076};
77
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030078static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
79{
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +030080 return container_of(umem, struct ib_umem_odp, umem);
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030081}
82
Jason Gunthorped2183c62019-05-20 09:05:25 +030083/* Returns the first page of an ODP umem. */
84static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
85{
Jason Gunthorpef25a5462019-11-12 16:22:22 -040086 return umem_odp->notifier.interval_tree.start;
Jason Gunthorped2183c62019-05-20 09:05:25 +030087}
88
89/* Returns the address of the page after the last one of an ODP umem. */
90static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
91{
Jason Gunthorpef25a5462019-11-12 16:22:22 -040092 return umem_odp->notifier.interval_tree.last + 1;
Jason Gunthorped2183c62019-05-20 09:05:25 +030093}
94
95static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
96{
97 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
98 umem_odp->page_shift;
99}
100
Leon Romanovsky13859d5d2019-01-08 16:07:26 +0200101/*
102 * The lower 2 bits of the DMA address signal the R/W permissions for
103 * the entry. To upgrade the permissions, provide the appropriate
104 * bitmask to the map_dma_pages function.
105 *
106 * Be aware that upgrading a mapped address might result in change of
107 * the DMA address for the page.
108 */
109#define ODP_READ_ALLOWED_BIT (1<<0ULL)
110#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
111
112#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
113
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200114#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
115
Jason Gunthorpef25a5462019-11-12 16:22:22 -0400116struct ib_umem_odp *
Moni Shouac320e522020-01-15 14:43:31 +0200117ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
Jason Gunthorpef25a5462019-11-12 16:22:22 -0400118 int access, const struct mmu_interval_notifier_ops *ops);
Moni Shouac320e522020-01-15 14:43:31 +0200119struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
Jason Gunthorpef20bef62019-08-19 14:17:03 +0300120 int access);
Jason Gunthorpef25a5462019-11-12 16:22:22 -0400121struct ib_umem_odp *
122ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
123 size_t size,
124 const struct mmu_interval_notifier_ops *ops);
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300125void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200126
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300127int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
128 u64 bcnt, u64 access_mask,
129 unsigned long current_seq);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200130
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300131void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200132 u64 bound);
133
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200134#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
135
Jason Gunthorpef25a5462019-11-12 16:22:22 -0400136static inline struct ib_umem_odp *
Moni Shouac320e522020-01-15 14:43:31 +0200137ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
Jason Gunthorpef25a5462019-11-12 16:22:22 -0400138 int access, const struct mmu_interval_notifier_ops *ops)
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200139{
Jason Gunthorpe261dc532019-08-19 14:17:04 +0300140 return ERR_PTR(-EINVAL);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200141}
142
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300143static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200144
145#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
146
147#endif /* IB_UMEM_ODP_H */