Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Copyright (c) 2009, Microsoft Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 17 | * |
| 18 | * Authors: |
| 19 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 20 | * Hank Janssen <hjanssen@microsoft.com> |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 21 | * K. Y. Srinivasan <kys@microsoft.com> |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 22 | * |
| 23 | */ |
Hank Janssen | 0a46618 | 2011-03-29 13:58:47 -0700 | [diff] [blame] | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 25 | |
Greg Kroah-Hartman | a0086dc | 2009-08-17 17:22:08 -0700 | [diff] [blame] | 26 | #include <linux/kernel.h> |
| 27 | #include <linux/mm.h> |
Greg Kroah-Hartman | 46a9719 | 2011-10-04 12:29:52 -0700 | [diff] [blame] | 28 | #include <linux/hyperv.h> |
K. Y. Srinivasan | 011a7c3 | 2014-02-01 19:02:20 -0800 | [diff] [blame] | 29 | #include <linux/uio.h> |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 30 | #include <linux/vmalloc.h> |
| 31 | #include <linux/slab.h> |
K. Y. Srinivasan | 3f335ea | 2011-05-12 19:34:15 -0700 | [diff] [blame] | 32 | |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 33 | #include "hyperv_vmbus.h" |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 34 | |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 35 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
| 36 | { |
| 37 | rbi->ring_buffer->interrupt_mask = 1; |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 38 | virt_mb(); |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
| 42 | { |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 43 | |
| 44 | rbi->ring_buffer->interrupt_mask = 0; |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 45 | virt_mb(); |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * Now check to see if the ring buffer is still empty. |
| 49 | * If it is not, we raced and we need to process new |
| 50 | * incoming messages. |
| 51 | */ |
K. Y. Srinivasan | a6341f0 | 2016-04-02 17:59:46 -0700 | [diff] [blame] | 52 | return hv_get_bytes_to_read(rbi); |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 53 | } |
| 54 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 55 | /* |
| 56 | * When we write to the ring buffer, check if the host needs to |
| 57 | * be signaled. Here is the details of this protocol: |
| 58 | * |
| 59 | * 1. The host guarantees that while it is draining the |
| 60 | * ring buffer, it will set the interrupt_mask to |
| 61 | * indicate it does not need to be interrupted when |
| 62 | * new data is placed. |
| 63 | * |
| 64 | * 2. The host guarantees that it will completely drain |
| 65 | * the ring buffer before exiting the read loop. Further, |
| 66 | * once the ring buffer is empty, it will clear the |
| 67 | * interrupt_mask and re-check to see if new data has |
| 68 | * arrived. |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 69 | * |
| 70 | * KYS: Oct. 30, 2016: |
| 71 | * It looks like Windows hosts have logic to deal with DOS attacks that |
| 72 | * can be triggered if it receives interrupts when it is not expecting |
| 73 | * the interrupt. The host expects interrupts only when the ring |
| 74 | * transitions from empty to non-empty (or full to non full on the guest |
| 75 | * to host ring). |
| 76 | * So, base the signaling decision solely on the ring state until the |
| 77 | * host logic is fixed. |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 78 | */ |
| 79 | |
Stephen Hemminger | b103a56 | 2017-02-05 17:20:32 -0700 | [diff] [blame] | 80 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 81 | { |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 82 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
| 83 | |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 84 | virt_mb(); |
K. Y. Srinivasan | d45faae | 2016-04-02 17:59:47 -0700 | [diff] [blame] | 85 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 86 | return; |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 87 | |
Jason Wang | e91e84f | 2013-06-20 12:58:57 +0800 | [diff] [blame] | 88 | /* check interrupt_mask before read_index */ |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 89 | virt_rmb(); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 90 | /* |
| 91 | * This is the only case we need to signal when the |
| 92 | * ring transitions from being empty to non-empty. |
| 93 | */ |
K. Y. Srinivasan | d45faae | 2016-04-02 17:59:47 -0700 | [diff] [blame] | 94 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 95 | vmbus_setevent(channel); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 96 | |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 97 | return; |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 98 | } |
| 99 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 100 | /* Get the next write location for the specified ring buffer. */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 101 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 102 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 103 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 104 | u32 next = ring_info->ring_buffer->write_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 105 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 106 | return next; |
| 107 | } |
| 108 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 109 | /* Set the next write location for the specified ring buffer. */ |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 110 | static inline void |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 111 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 112 | u32 next_write_location) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 113 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 114 | ring_info->ring_buffer->write_index = next_write_location; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 115 | } |
| 116 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 117 | /* Get the next read location for the specified ring buffer. */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 118 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 119 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 120 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 121 | u32 next = ring_info->ring_buffer->read_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 122 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 123 | return next; |
| 124 | } |
| 125 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 126 | /* |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 127 | * Get the next read location + offset for the specified ring buffer. |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 128 | * This allows the caller to skip. |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 129 | */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 130 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 131 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | 1ac5864 | 2010-11-08 14:04:47 -0800 | [diff] [blame] | 132 | u32 offset) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 133 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 134 | u32 next = ring_info->ring_buffer->read_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 135 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 136 | next += offset; |
| 137 | next %= ring_info->ring_datasize; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 138 | |
| 139 | return next; |
| 140 | } |
| 141 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 142 | /* Set the next read location for the specified ring buffer. */ |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 143 | static inline void |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 144 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 145 | u32 next_read_location) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 146 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 147 | ring_info->ring_buffer->read_index = next_read_location; |
K. Y. Srinivasan | ab028db | 2016-04-02 17:59:51 -0700 | [diff] [blame] | 148 | ring_info->priv_read_index = next_read_location; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 149 | } |
| 150 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 151 | /* Get the size of the ring buffer. */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 152 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 153 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 154 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 155 | return ring_info->ring_datasize; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 158 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
Greg Kroah-Hartman | 5947143 | 2009-07-14 15:10:26 -0700 | [diff] [blame] | 159 | static inline u64 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 160 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 161 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 162 | return (u64)ring_info->ring_buffer->write_index << 32; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 163 | } |
| 164 | |
K. Y. Srinivasan | 8f1136a | 2011-05-10 07:55:31 -0700 | [diff] [blame] | 165 | /* |
K. Y. Srinivasan | 8f1136a | 2011-05-10 07:55:31 -0700 | [diff] [blame] | 166 | * Helper routine to copy to source from ring buffer. |
| 167 | * Assume there is enough room. Handles wrap-around in src case only!! |
K. Y. Srinivasan | 8f1136a | 2011-05-10 07:55:31 -0700 | [diff] [blame] | 168 | */ |
| 169 | static u32 hv_copyfrom_ringbuffer( |
| 170 | struct hv_ring_buffer_info *ring_info, |
| 171 | void *dest, |
| 172 | u32 destlen, |
| 173 | u32 start_read_offset) |
| 174 | { |
| 175 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
| 176 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
| 177 | |
Vitaly Kuznetsov | f24f0b4 | 2016-09-02 05:58:21 -0700 | [diff] [blame] | 178 | memcpy(dest, ring_buffer + start_read_offset, destlen); |
K. Y. Srinivasan | 8f1136a | 2011-05-10 07:55:31 -0700 | [diff] [blame] | 179 | |
| 180 | start_read_offset += destlen; |
| 181 | start_read_offset %= ring_buffer_size; |
| 182 | |
| 183 | return start_read_offset; |
| 184 | } |
| 185 | |
| 186 | |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 187 | /* |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 188 | * Helper routine to copy from source to ring buffer. |
| 189 | * Assume there is enough room. Handles wrap-around in dest case only!! |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 190 | */ |
| 191 | static u32 hv_copyto_ringbuffer( |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 192 | struct hv_ring_buffer_info *ring_info, |
| 193 | u32 start_write_offset, |
| 194 | void *src, |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 195 | u32 srclen) |
| 196 | { |
| 197 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
| 198 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 199 | |
Vitaly Kuznetsov | f24f0b4 | 2016-09-02 05:58:21 -0700 | [diff] [blame] | 200 | memcpy(ring_buffer + start_write_offset, src, srclen); |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 201 | |
| 202 | start_write_offset += srclen; |
| 203 | start_write_offset %= ring_buffer_size; |
| 204 | |
| 205 | return start_write_offset; |
| 206 | } |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 207 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 208 | /* Get various debug metrics for the specified ring buffer. */ |
K. Y. Srinivasan | a75b61d | 2011-05-10 07:55:28 -0700 | [diff] [blame] | 209 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
Greg Kroah-Hartman | 80682b7 | 2010-07-27 11:37:32 -0700 | [diff] [blame] | 210 | struct hv_ring_buffer_debug_info *debug_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 211 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 212 | u32 bytes_avail_towrite; |
| 213 | u32 bytes_avail_toread; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 214 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 215 | if (ring_info->ring_buffer) { |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 216 | hv_get_ringbuffer_availbytes(ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 217 | &bytes_avail_toread, |
| 218 | &bytes_avail_towrite); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 219 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 220 | debug_info->bytes_avail_toread = bytes_avail_toread; |
| 221 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 222 | debug_info->current_read_index = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 223 | ring_info->ring_buffer->read_index; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 224 | debug_info->current_write_index = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 225 | ring_info->ring_buffer->write_index; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 226 | debug_info->current_interrupt_mask = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 227 | ring_info->ring_buffer->interrupt_mask; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 228 | } |
| 229 | } |
| 230 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 231 | /* Initialize the ring buffer. */ |
K. Y. Srinivasan | 72a95cb | 2011-05-10 07:55:21 -0700 | [diff] [blame] | 232 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 233 | struct page *pages, u32 page_cnt) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 234 | { |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 235 | int i; |
| 236 | struct page **pages_wraparound; |
| 237 | |
| 238 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 239 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 240 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 241 | |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 242 | /* |
| 243 | * First page holds struct hv_ring_buffer, do wraparound mapping for |
| 244 | * the rest. |
| 245 | */ |
| 246 | pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), |
| 247 | GFP_KERNEL); |
| 248 | if (!pages_wraparound) |
| 249 | return -ENOMEM; |
| 250 | |
| 251 | pages_wraparound[0] = pages; |
| 252 | for (i = 0; i < 2 * (page_cnt - 1); i++) |
| 253 | pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; |
| 254 | |
| 255 | ring_info->ring_buffer = (struct hv_ring_buffer *) |
| 256 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); |
| 257 | |
| 258 | kfree(pages_wraparound); |
| 259 | |
| 260 | |
| 261 | if (!ring_info->ring_buffer) |
| 262 | return -ENOMEM; |
| 263 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 264 | ring_info->ring_buffer->read_index = |
| 265 | ring_info->ring_buffer->write_index = 0; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 266 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 267 | /* Set the feature bit for enabling flow control. */ |
K. Y. Srinivasan | 046c791 | 2014-09-05 17:29:12 -0700 | [diff] [blame] | 268 | ring_info->ring_buffer->feature_bits.value = 1; |
| 269 | |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 270 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
| 271 | ring_info->ring_datasize = ring_info->ring_size - |
| 272 | sizeof(struct hv_ring_buffer); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 273 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 274 | spin_lock_init(&ring_info->ring_lock); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 279 | /* Cleanup the ring buffer. */ |
K. Y. Srinivasan | 2dba688 | 2011-05-10 07:55:22 -0700 | [diff] [blame] | 280 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 281 | { |
Vitaly Kuznetsov | 9988ce6 | 2016-09-02 05:58:20 -0700 | [diff] [blame] | 282 | vunmap(ring_info->ring_buffer); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 285 | /* Write to the ring buffer. */ |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 286 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
Stephen Hemminger | 5529eaf | 2017-02-11 23:02:22 -0700 | [diff] [blame^] | 287 | struct kvec *kv_list, u32 kv_count) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 288 | { |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 289 | int i = 0; |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 290 | u32 bytes_avail_towrite; |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 291 | u32 totalbytes_towrite = 0; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 292 | |
K. Y. Srinivasan | 66a6054 | 2011-05-10 07:55:33 -0700 | [diff] [blame] | 293 | u32 next_write_location; |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 294 | u32 old_write; |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 295 | u64 prev_indices = 0; |
K. Y. Srinivasan | fe760e4 | 2016-01-27 22:29:45 -0800 | [diff] [blame] | 296 | unsigned long flags = 0; |
K. Y. Srinivasan | 1f6ee4e | 2016-11-06 13:14:17 -0800 | [diff] [blame] | 297 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 298 | |
K. Y. Srinivasan | e7e97dd | 2016-12-07 01:16:28 -0800 | [diff] [blame] | 299 | if (channel->rescind) |
| 300 | return -ENODEV; |
| 301 | |
K. Y. Srinivasan | 011a7c3 | 2014-02-01 19:02:20 -0800 | [diff] [blame] | 302 | for (i = 0; i < kv_count; i++) |
| 303 | totalbytes_towrite += kv_list[i].iov_len; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 304 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 305 | totalbytes_towrite += sizeof(u64); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 306 | |
Stephen Hemminger | 5529eaf | 2017-02-11 23:02:22 -0700 | [diff] [blame^] | 307 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 308 | |
K. Y. Srinivasan | a6341f0 | 2016-04-02 17:59:46 -0700 | [diff] [blame] | 309 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 310 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 311 | /* |
| 312 | * If there is only room for the packet, assume it is full. |
| 313 | * Otherwise, the next time around, we think the ring buffer |
| 314 | * is empty since the read index == write index. |
| 315 | */ |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 316 | if (bytes_avail_towrite <= totalbytes_towrite) { |
Stephen Hemminger | 5529eaf | 2017-02-11 23:02:22 -0700 | [diff] [blame^] | 317 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
K. Y. Srinivasan | d2598f0 | 2011-08-25 09:48:58 -0700 | [diff] [blame] | 318 | return -EAGAIN; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 319 | } |
| 320 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 321 | /* Write to the ring buffer */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 322 | next_write_location = hv_get_next_write_location(outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 323 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 324 | old_write = next_write_location; |
| 325 | |
K. Y. Srinivasan | 011a7c3 | 2014-02-01 19:02:20 -0800 | [diff] [blame] | 326 | for (i = 0; i < kv_count; i++) { |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 327 | next_write_location = hv_copyto_ringbuffer(outring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 328 | next_write_location, |
K. Y. Srinivasan | 011a7c3 | 2014-02-01 19:02:20 -0800 | [diff] [blame] | 329 | kv_list[i].iov_base, |
| 330 | kv_list[i].iov_len); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 331 | } |
| 332 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 333 | /* Set previous packet start */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 334 | prev_indices = hv_get_ring_bufferindices(outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 335 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 336 | next_write_location = hv_copyto_ringbuffer(outring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 337 | next_write_location, |
| 338 | &prev_indices, |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 339 | sizeof(u64)); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 340 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 341 | /* Issue a full memory barrier before updating the write index */ |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 342 | virt_mb(); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 343 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 344 | /* Now, update the write location */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 345 | hv_set_next_write_location(outring_info, next_write_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 346 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 347 | |
Stephen Hemminger | 5529eaf | 2017-02-11 23:02:22 -0700 | [diff] [blame^] | 348 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 349 | |
Stephen Hemminger | b103a56 | 2017-02-05 17:20:32 -0700 | [diff] [blame] | 350 | hv_signal_on_write(old_write, channel); |
K. Y. Srinivasan | e7e97dd | 2016-12-07 01:16:28 -0800 | [diff] [blame] | 351 | |
| 352 | if (channel->rescind) |
| 353 | return -ENODEV; |
| 354 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 355 | return 0; |
| 356 | } |
| 357 | |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 358 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 359 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 360 | u64 *requestid, bool raw) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 361 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 362 | u32 bytes_avail_toread; |
| 363 | u32 next_read_location = 0; |
| 364 | u64 prev_indices = 0; |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 365 | struct vmpacket_descriptor desc; |
| 366 | u32 offset; |
| 367 | u32 packetlen; |
| 368 | int ret = 0; |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 369 | struct hv_ring_buffer_info *inring_info = &channel->inbound; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 370 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 371 | if (buflen <= 0) |
Bill Pemberton | a16e148 | 2010-05-05 15:27:50 -0400 | [diff] [blame] | 372 | return -EINVAL; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 373 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 374 | |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 375 | *buffer_actual_len = 0; |
| 376 | *requestid = 0; |
| 377 | |
K. Y. Srinivasan | a6341f0 | 2016-04-02 17:59:46 -0700 | [diff] [blame] | 378 | bytes_avail_toread = hv_get_bytes_to_read(inring_info); |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 379 | /* Make sure there is something to read */ |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 380 | if (bytes_avail_toread < sizeof(desc)) { |
| 381 | /* |
| 382 | * No error is set when there is even no header, drivers are |
| 383 | * supposed to analyze buffer_actual_len. |
| 384 | */ |
K. Y. Srinivasan | 3eba9a7 | 2016-01-27 22:29:44 -0800 | [diff] [blame] | 385 | return ret; |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 386 | } |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 387 | |
Dexuan Cui | 433e19c | 2017-01-28 11:46:02 -0700 | [diff] [blame] | 388 | init_cached_read_index(channel); |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 389 | next_read_location = hv_get_next_read_location(inring_info); |
| 390 | next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, |
| 391 | sizeof(desc), |
| 392 | next_read_location); |
| 393 | |
| 394 | offset = raw ? 0 : (desc.offset8 << 3); |
| 395 | packetlen = (desc.len8 << 3) - offset; |
| 396 | *buffer_actual_len = packetlen; |
| 397 | *requestid = desc.trans_id; |
| 398 | |
K. Y. Srinivasan | 3eba9a7 | 2016-01-27 22:29:44 -0800 | [diff] [blame] | 399 | if (bytes_avail_toread < packetlen + offset) |
| 400 | return -EAGAIN; |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 401 | |
K. Y. Srinivasan | 3eba9a7 | 2016-01-27 22:29:44 -0800 | [diff] [blame] | 402 | if (packetlen > buflen) |
| 403 | return -ENOBUFS; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 404 | |
Haiyang Zhang | 1ac5864 | 2010-11-08 14:04:47 -0800 | [diff] [blame] | 405 | next_read_location = |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 406 | hv_get_next_readlocation_withoffset(inring_info, offset); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 407 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 408 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 409 | buffer, |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 410 | packetlen, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 411 | next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 412 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 413 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 414 | &prev_indices, |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 415 | sizeof(u64), |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 416 | next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 417 | |
Vitaly Kuznetsov | 822f18d | 2015-12-14 19:01:57 -0800 | [diff] [blame] | 418 | /* |
| 419 | * Make sure all reads are done before we update the read index since |
| 420 | * the writer may start writing to the read area once the read index |
| 421 | * is updated. |
| 422 | */ |
K. Y. Srinivasan | dcd0eec | 2016-04-02 17:59:48 -0700 | [diff] [blame] | 423 | virt_mb(); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 424 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 425 | /* Update the read index */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 426 | hv_set_next_read_location(inring_info, next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 427 | |
K. Y. Srinivasan | 3372592 | 2016-11-06 13:14:18 -0800 | [diff] [blame] | 428 | hv_signal_on_read(channel); |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 429 | |
Vitaly Kuznetsov | 940b68e | 2015-12-14 19:02:01 -0800 | [diff] [blame] | 430 | return ret; |
Vitaly Kuznetsov | b5f53dd | 2015-12-14 19:01:59 -0800 | [diff] [blame] | 431 | } |