blob: 72cf8b15bbb4e331d49f937c58abd85f7dd70862 [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
8 *
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
13 * in RDMA I/O.
14 */
15
16#include "xprt_rdma.h"
17
18#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19# define RPCDBG_FACILITY RPCDBG_TRANS
20#endif
21
Chuck Lever3968cb52015-03-30 14:35:26 -040022static int
23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25{
Chuck Leverd1ed8572015-08-03 13:03:30 -040026 struct ib_device_attr *devattr = &ia->ri_devattr;
27 struct ib_mr *mr;
28
29 /* Obtain an rkey to use for RPC data payloads.
30 */
31 mr = ib_get_dma_mr(ia->ri_pd,
32 IB_ACCESS_LOCAL_WRITE |
33 IB_ACCESS_REMOTE_WRITE |
34 IB_ACCESS_REMOTE_READ);
35 if (IS_ERR(mr)) {
36 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
37 __func__, PTR_ERR(mr));
38 return -ENOMEM;
39 }
40 ia->ri_dma_mr = mr;
41
42 /* Obtain an lkey to use for regbufs.
43 */
44 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
45 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
46 else
47 ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
48
Chuck Lever3968cb52015-03-30 14:35:26 -040049 return 0;
50}
51
Chuck Lever1c9351e2015-03-30 14:34:30 -040052/* PHYSICAL memory registration conveys one page per chunk segment.
53 */
54static size_t
55physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
56{
57 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
58 rpcrdma_max_segments(r_xprt));
59}
60
Chuck Lever91e70e72015-03-30 14:34:58 -040061static int
62physical_op_init(struct rpcrdma_xprt *r_xprt)
63{
64 return 0;
65}
66
Chuck Lever9c1b4d72015-03-30 14:34:39 -040067/* The client's physical memory is already exposed for
68 * remote access via RDMA READ or RDMA WRITE.
69 */
70static int
71physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
72 int nsegs, bool writing)
73{
74 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
75
Chuck Lever89e0d1122015-05-26 11:51:56 -040076 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
Chuck Leverd1ed8572015-08-03 13:03:30 -040077 seg->mr_rkey = ia->ri_dma_mr->rkey;
Chuck Lever9c1b4d72015-03-30 14:34:39 -040078 seg->mr_base = seg->mr_dma;
79 seg->mr_nsegs = 1;
80 return 1;
81}
82
Chuck Lever6814bae2015-03-30 14:34:48 -040083/* Unmap a memory region, but leave it registered.
84 */
85static int
86physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
87{
Chuck Leverd6547882015-03-30 14:35:44 -040088 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
89
Chuck Lever89e0d1122015-05-26 11:51:56 -040090 rpcrdma_unmap_one(ia->ri_device, seg);
Chuck Lever6814bae2015-03-30 14:34:48 -040091 return 1;
92}
93
Chuck Lever31a701a2015-03-30 14:35:07 -040094static void
Chuck Lever4561f342015-03-30 14:35:17 -040095physical_op_destroy(struct rpcrdma_buffer *buf)
96{
97}
98
Chuck Levera0ce85f2015-03-30 14:34:21 -040099const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400100 .ro_map = physical_op_map,
Chuck Lever6814bae2015-03-30 14:34:48 -0400101 .ro_unmap = physical_op_unmap,
Chuck Lever3968cb52015-03-30 14:35:26 -0400102 .ro_open = physical_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400103 .ro_maxpages = physical_op_maxpages,
Chuck Lever91e70e72015-03-30 14:34:58 -0400104 .ro_init = physical_op_init,
Chuck Lever4561f342015-03-30 14:35:17 -0400105 .ro_destroy = physical_op_destroy,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400106 .ro_displayname = "physical",
107};