Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 2 | /* |
| 3 | * linux/net/sunrpc/socklib.c |
| 4 | * |
| 5 | * Common socket helper routines for RPC client and server |
| 6 | * |
| 7 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 8 | */ |
| 9 | |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 10 | #include <linux/compiler.h> |
| 11 | #include <linux/netdevice.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 13 | #include <linux/skbuff.h> |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 14 | #include <linux/types.h> |
| 15 | #include <linux/pagemap.h> |
| 16 | #include <linux/udp.h> |
Chuck Lever | 9e55eef4 | 2020-03-02 15:19:54 -0500 | [diff] [blame] | 17 | #include <linux/sunrpc/msg_prot.h> |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 18 | #include <linux/sunrpc/xdr.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 19 | #include <linux/export.h> |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 20 | |
Chuck Lever | 9e55eef4 | 2020-03-02 15:19:54 -0500 | [diff] [blame] | 21 | #include "socklib.h" |
| 22 | |
| 23 | /* |
| 24 | * Helper structure for copying from an sk_buff. |
| 25 | */ |
| 26 | struct xdr_skb_reader { |
| 27 | struct sk_buff *skb; |
| 28 | unsigned int offset; |
| 29 | size_t count; |
| 30 | __wsum csum; |
| 31 | }; |
| 32 | |
| 33 | typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, |
| 34 | size_t len); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 35 | |
| 36 | /** |
Chuck Lever | 9d29231 | 2006-12-05 16:35:41 -0500 | [diff] [blame] | 37 | * xdr_skb_read_bits - copy some data bits from skb to internal buffer |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 38 | * @desc: sk_buff copy helper |
| 39 | * @to: copy destination |
| 40 | * @len: number of bytes to copy |
| 41 | * |
| 42 | * Possibly called several times to iterate over an sk_buff and copy |
| 43 | * data out of it. |
| 44 | */ |
Trond Myklebust | 550aebf | 2018-09-14 14:32:45 -0400 | [diff] [blame] | 45 | static size_t |
| 46 | xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 47 | { |
| 48 | if (len > desc->count) |
| 49 | len = desc->count; |
Chuck Lever | 9d29231 | 2006-12-05 16:35:41 -0500 | [diff] [blame] | 50 | if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 51 | return 0; |
| 52 | desc->count -= len; |
| 53 | desc->offset += len; |
| 54 | return len; |
| 55 | } |
| 56 | |
| 57 | /** |
Chuck Lever | 9d29231 | 2006-12-05 16:35:41 -0500 | [diff] [blame] | 58 | * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 59 | * @desc: sk_buff copy helper |
| 60 | * @to: copy destination |
| 61 | * @len: number of bytes to copy |
| 62 | * |
| 63 | * Same as skb_read_bits, but calculate a checksum at the same time. |
| 64 | */ |
Chuck Lever | dd45647 | 2006-12-05 16:35:44 -0500 | [diff] [blame] | 65 | static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 66 | { |
Al Viro | 5f92a73 | 2006-11-14 21:36:54 -0800 | [diff] [blame] | 67 | unsigned int pos; |
| 68 | __wsum csum2; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 69 | |
| 70 | if (len > desc->count) |
| 71 | len = desc->count; |
| 72 | pos = desc->offset; |
Al Viro | 8d5930d | 2020-07-10 20:07:10 -0400 | [diff] [blame] | 73 | csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 74 | desc->csum = csum_block_add(desc->csum, csum2, pos); |
| 75 | desc->count -= len; |
| 76 | desc->offset += len; |
| 77 | return len; |
| 78 | } |
| 79 | |
| 80 | /** |
| 81 | * xdr_partial_copy_from_skb - copy data out of an skb |
| 82 | * @xdr: target XDR buffer |
| 83 | * @base: starting offset |
| 84 | * @desc: sk_buff copy helper |
| 85 | * @copy_actor: virtual method for copying data |
| 86 | * |
| 87 | */ |
Trond Myklebust | ec84646 | 2018-09-14 14:38:05 -0400 | [diff] [blame] | 88 | static ssize_t |
| 89 | xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 90 | { |
| 91 | struct page **ppage = xdr->pages; |
| 92 | unsigned int len, pglen = xdr->page_len; |
| 93 | ssize_t copied = 0; |
Chuck Lever | 322e2ef | 2007-10-26 13:30:59 -0400 | [diff] [blame] | 94 | size_t ret; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 95 | |
| 96 | len = xdr->head[0].iov_len; |
| 97 | if (base < len) { |
| 98 | len -= base; |
| 99 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); |
| 100 | copied += ret; |
| 101 | if (ret != len || !desc->count) |
| 102 | goto out; |
| 103 | base = 0; |
| 104 | } else |
| 105 | base -= len; |
| 106 | |
| 107 | if (unlikely(pglen == 0)) |
| 108 | goto copy_tail; |
| 109 | if (unlikely(base >= pglen)) { |
| 110 | base -= pglen; |
| 111 | goto copy_tail; |
| 112 | } |
| 113 | if (base || xdr->page_base) { |
| 114 | pglen -= base; |
| 115 | base += xdr->page_base; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 116 | ppage += base >> PAGE_SHIFT; |
| 117 | base &= ~PAGE_MASK; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 118 | } |
| 119 | do { |
| 120 | char *kaddr; |
| 121 | |
| 122 | /* ACL likes to be lazy in allocating pages - ACLs |
| 123 | * are small by default but can get huge. */ |
Trond Myklebust | 431f6eb | 2018-09-16 00:08:20 -0400 | [diff] [blame] | 124 | if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) { |
Chuck Lever | 52db6f9 | 2019-04-24 09:38:55 -0400 | [diff] [blame] | 125 | *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 126 | if (unlikely(*ppage == NULL)) { |
| 127 | if (copied == 0) |
| 128 | copied = -ENOMEM; |
| 129 | goto out; |
| 130 | } |
| 131 | } |
| 132 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 133 | len = PAGE_SIZE; |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 134 | kaddr = kmap_atomic(*ppage); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 135 | if (base) { |
| 136 | len -= base; |
| 137 | if (pglen < len) |
| 138 | len = pglen; |
| 139 | ret = copy_actor(desc, kaddr + base, len); |
| 140 | base = 0; |
| 141 | } else { |
| 142 | if (pglen < len) |
| 143 | len = pglen; |
| 144 | ret = copy_actor(desc, kaddr, len); |
| 145 | } |
| 146 | flush_dcache_page(*ppage); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 147 | kunmap_atomic(kaddr); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 148 | copied += ret; |
| 149 | if (ret != len || !desc->count) |
| 150 | goto out; |
| 151 | ppage++; |
| 152 | } while ((pglen -= len) != 0); |
| 153 | copy_tail: |
| 154 | len = xdr->tail[0].iov_len; |
| 155 | if (base < len) |
| 156 | copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); |
| 157 | out: |
| 158 | return copied; |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * csum_partial_copy_to_xdr - checksum and copy data |
| 163 | * @xdr: target XDR buffer |
| 164 | * @skb: source skb |
| 165 | * |
| 166 | * We have set things up such that we perform the checksum of the UDP |
| 167 | * packet in parallel with the copies into the RPC client iovec. -DaveM |
| 168 | */ |
| 169 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) |
| 170 | { |
Chuck Lever | dd45647 | 2006-12-05 16:35:44 -0500 | [diff] [blame] | 171 | struct xdr_skb_reader desc; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 172 | |
| 173 | desc.skb = skb; |
Willem de Bruijn | 1da8c681 | 2016-04-07 11:44:58 -0400 | [diff] [blame] | 174 | desc.offset = 0; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 175 | desc.count = skb->len - desc.offset; |
| 176 | |
Herbert Xu | 6047637 | 2007-04-09 11:59:39 -0700 | [diff] [blame] | 177 | if (skb_csum_unnecessary(skb)) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 178 | goto no_checksum; |
| 179 | |
| 180 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); |
Chuck Lever | 9d29231 | 2006-12-05 16:35:41 -0500 | [diff] [blame] | 181 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 182 | return -1; |
| 183 | if (desc.offset != skb->len) { |
Al Viro | 5f92a73 | 2006-11-14 21:36:54 -0800 | [diff] [blame] | 184 | __wsum csum2; |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 185 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); |
| 186 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); |
| 187 | } |
| 188 | if (desc.count) |
| 189 | return -1; |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 190 | if (csum_fold(desc.csum)) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 191 | return -1; |
Tom Herbert | 7e3cead | 2014-06-10 18:54:19 -0700 | [diff] [blame] | 192 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
| 193 | !skb->csum_complete_sw) |
Cong Wang | 7fe50ac | 2018-11-12 14:47:18 -0800 | [diff] [blame] | 194 | netdev_rx_csum_fault(skb->dev, skb); |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 195 | return 0; |
| 196 | no_checksum: |
Chuck Lever | 9d29231 | 2006-12-05 16:35:41 -0500 | [diff] [blame] | 197 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) |
Chuck Lever | 094bb20 | 2005-08-11 16:25:20 -0400 | [diff] [blame] | 198 | return -1; |
| 199 | if (desc.count) |
| 200 | return -1; |
| 201 | return 0; |
| 202 | } |
\"Talpey, Thomas\ | 1244480 | 2007-09-10 13:45:36 -0400 | [diff] [blame] | 203 | EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr); |
Chuck Lever | 9e55eef4 | 2020-03-02 15:19:54 -0500 | [diff] [blame] | 204 | |
| 205 | static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg, |
| 206 | size_t seek) |
| 207 | { |
| 208 | if (seek) |
| 209 | iov_iter_advance(&msg->msg_iter, seek); |
| 210 | return sock_sendmsg(sock, msg); |
| 211 | } |
| 212 | |
| 213 | static int xprt_send_kvec(struct socket *sock, struct msghdr *msg, |
| 214 | struct kvec *vec, size_t seek) |
| 215 | { |
| 216 | iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); |
| 217 | return xprt_sendmsg(sock, msg, seek); |
| 218 | } |
| 219 | |
| 220 | static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, |
| 221 | struct xdr_buf *xdr, size_t base) |
| 222 | { |
| 223 | int err; |
| 224 | |
| 225 | err = xdr_alloc_bvec(xdr, GFP_KERNEL); |
| 226 | if (err < 0) |
| 227 | return err; |
| 228 | |
| 229 | iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), |
| 230 | xdr->page_len + xdr->page_base); |
| 231 | return xprt_sendmsg(sock, msg, base + xdr->page_base); |
| 232 | } |
| 233 | |
| 234 | /* Common case: |
| 235 | * - stream transport |
| 236 | * - sending from byte 0 of the message |
| 237 | * - the message is wholly contained in @xdr's head iovec |
| 238 | */ |
| 239 | static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg, |
| 240 | rpc_fraghdr marker, struct kvec *vec, |
| 241 | size_t base) |
| 242 | { |
| 243 | struct kvec iov[2] = { |
| 244 | [0] = { |
| 245 | .iov_base = &marker, |
| 246 | .iov_len = sizeof(marker) |
| 247 | }, |
| 248 | [1] = *vec, |
| 249 | }; |
| 250 | size_t len = iov[0].iov_len + iov[1].iov_len; |
| 251 | |
| 252 | iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); |
| 253 | return xprt_sendmsg(sock, msg, base); |
| 254 | } |
| 255 | |
| 256 | /** |
| 257 | * xprt_sock_sendmsg - write an xdr_buf directly to a socket |
| 258 | * @sock: open socket to send on |
| 259 | * @msg: socket message metadata |
| 260 | * @xdr: xdr_buf containing this request |
| 261 | * @base: starting position in the buffer |
| 262 | * @marker: stream record marker field |
| 263 | * @sent_p: return the total number of bytes successfully queued for sending |
| 264 | * |
| 265 | * Return values: |
| 266 | * On success, returns zero and fills in @sent_p. |
| 267 | * %-ENOTSOCK if @sock is not a struct socket. |
| 268 | */ |
| 269 | int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg, |
| 270 | struct xdr_buf *xdr, unsigned int base, |
| 271 | rpc_fraghdr marker, unsigned int *sent_p) |
| 272 | { |
| 273 | unsigned int rmsize = marker ? sizeof(marker) : 0; |
| 274 | unsigned int remainder = rmsize + xdr->len - base; |
| 275 | unsigned int want; |
| 276 | int err = 0; |
| 277 | |
| 278 | *sent_p = 0; |
| 279 | |
| 280 | if (unlikely(!sock)) |
| 281 | return -ENOTSOCK; |
| 282 | |
| 283 | msg->msg_flags |= MSG_MORE; |
| 284 | want = xdr->head[0].iov_len + rmsize; |
| 285 | if (base < want) { |
| 286 | unsigned int len = want - base; |
| 287 | |
| 288 | remainder -= len; |
| 289 | if (remainder == 0) |
| 290 | msg->msg_flags &= ~MSG_MORE; |
| 291 | if (rmsize) |
| 292 | err = xprt_send_rm_and_kvec(sock, msg, marker, |
| 293 | &xdr->head[0], base); |
| 294 | else |
| 295 | err = xprt_send_kvec(sock, msg, &xdr->head[0], base); |
| 296 | if (remainder == 0 || err != len) |
| 297 | goto out; |
| 298 | *sent_p += err; |
| 299 | base = 0; |
| 300 | } else { |
| 301 | base -= want; |
| 302 | } |
| 303 | |
| 304 | if (base < xdr->page_len) { |
| 305 | unsigned int len = xdr->page_len - base; |
| 306 | |
| 307 | remainder -= len; |
| 308 | if (remainder == 0) |
| 309 | msg->msg_flags &= ~MSG_MORE; |
| 310 | err = xprt_send_pagedata(sock, msg, xdr, base); |
| 311 | if (remainder == 0 || err != len) |
| 312 | goto out; |
| 313 | *sent_p += err; |
| 314 | base = 0; |
| 315 | } else { |
| 316 | base -= xdr->page_len; |
| 317 | } |
| 318 | |
| 319 | if (base >= xdr->tail[0].iov_len) |
| 320 | return 0; |
| 321 | msg->msg_flags &= ~MSG_MORE; |
| 322 | err = xprt_send_kvec(sock, msg, &xdr->tail[0], base); |
| 323 | out: |
| 324 | if (err > 0) { |
| 325 | *sent_p += err; |
| 326 | err = 0; |
| 327 | } |
| 328 | return err; |
| 329 | } |