Thomas Gleixner | a61127c | 2019-05-29 16:57:49 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 2 | /* |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 3 | * net busy poll support |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 4 | * Copyright(c) 2013 Intel Corporation. |
| 5 | * |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 6 | * Author: Eliezer Tamir |
| 7 | * |
| 8 | * Contact Information: |
| 9 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 10 | */ |
| 11 | |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 12 | #ifndef _LINUX_NET_BUSY_POLL_H |
| 13 | #define _LINUX_NET_BUSY_POLL_H |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 14 | |
| 15 | #include <linux/netdevice.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 16 | #include <linux/sched/clock.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 17 | #include <linux/sched/signal.h> |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 18 | #include <net/ip.h> |
| 19 | |
Alexander Duyck | 545cd5e | 2017-03-24 10:07:53 -0700 | [diff] [blame] | 20 | /* 0 - Reserved to indicate value not set |
| 21 | * 1..NR_CPUS - Reserved for sender_cpu |
| 22 | * NR_CPUS+1..~0 - Region available for NAPI IDs |
| 23 | */ |
| 24 | #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) |
| 25 | |
Daniel Borkmann | e4dde41 | 2017-08-11 18:31:24 +0200 | [diff] [blame] | 26 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 27 | |
| 28 | struct napi_struct; |
| 29 | extern unsigned int sysctl_net_busy_read __read_mostly; |
| 30 | extern unsigned int sysctl_net_busy_poll __read_mostly; |
| 31 | |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 32 | static inline bool net_busy_loop_on(void) |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 33 | { |
Eliezer Tamir | 64b0dc5 | 2013-07-10 17:13:36 +0300 | [diff] [blame] | 34 | return sysctl_net_busy_poll; |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 35 | } |
| 36 | |
Eric Dumazet | 21cb84c | 2016-11-15 10:15:12 -0800 | [diff] [blame] | 37 | static inline bool sk_can_busy_loop(const struct sock *sk) |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 38 | { |
Alexander Duyck | 545cd5e | 2017-03-24 10:07:53 -0700 | [diff] [blame] | 39 | return sk->sk_ll_usec && !signal_pending(current); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 40 | } |
| 41 | |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 42 | bool sk_busy_loop_end(void *p, unsigned long start_time); |
| 43 | |
| 44 | void napi_busy_loop(unsigned int napi_id, |
| 45 | bool (*loop_end)(void *, unsigned long), |
Björn Töpel | 7fd3253 | 2020-11-30 19:51:56 +0100 | [diff] [blame^] | 46 | void *loop_end_arg, bool prefer_busy_poll); |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 47 | |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 48 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 49 | static inline unsigned long net_busy_loop_on(void) |
Eliezer Tamir | 91e2fd33 | 2013-06-28 15:59:35 +0300 | [diff] [blame] | 50 | { |
| 51 | return 0; |
| 52 | } |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 53 | |
Eliezer Tamir | cbf5500 | 2013-07-08 16:20:34 +0300 | [diff] [blame] | 54 | static inline bool sk_can_busy_loop(struct sock *sk) |
Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 55 | { |
| 56 | return false; |
| 57 | } |
| 58 | |
Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 59 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 60 | |
Alexander Duyck | 3705671 | 2017-03-24 10:08:18 -0700 | [diff] [blame] | 61 | static inline unsigned long busy_loop_current_time(void) |
| 62 | { |
| 63 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 64 | return (unsigned long)(local_clock() >> 10); |
| 65 | #else |
| 66 | return 0; |
| 67 | #endif |
| 68 | } |
| 69 | |
| 70 | /* in poll/select we use the global sysctl_net_ll_poll value */ |
| 71 | static inline bool busy_loop_timeout(unsigned long start_time) |
| 72 | { |
| 73 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 74 | unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); |
| 75 | |
| 76 | if (bp_usec) { |
| 77 | unsigned long end_time = start_time + bp_usec; |
| 78 | unsigned long now = busy_loop_current_time(); |
| 79 | |
| 80 | return time_after(now, end_time); |
| 81 | } |
| 82 | #endif |
| 83 | return true; |
| 84 | } |
| 85 | |
| 86 | static inline bool sk_busy_loop_timeout(struct sock *sk, |
| 87 | unsigned long start_time) |
| 88 | { |
| 89 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 90 | unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); |
| 91 | |
| 92 | if (bp_usec) { |
| 93 | unsigned long end_time = start_time + bp_usec; |
| 94 | unsigned long now = busy_loop_current_time(); |
| 95 | |
| 96 | return time_after(now, end_time); |
| 97 | } |
| 98 | #endif |
| 99 | return true; |
| 100 | } |
| 101 | |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 102 | static inline void sk_busy_loop(struct sock *sk, int nonblock) |
| 103 | { |
| 104 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 105 | unsigned int napi_id = READ_ONCE(sk->sk_napi_id); |
| 106 | |
| 107 | if (napi_id >= MIN_NAPI_ID) |
Björn Töpel | 7fd3253 | 2020-11-30 19:51:56 +0100 | [diff] [blame^] | 108 | napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk, |
| 109 | READ_ONCE(sk->sk_prefer_busy_poll)); |
Sridhar Samudrala | 7db6b04 | 2017-03-24 10:08:24 -0700 | [diff] [blame] | 110 | #endif |
| 111 | } |
| 112 | |
Alexander Duyck | d2e64db | 2017-03-24 10:08:06 -0700 | [diff] [blame] | 113 | /* used in the NIC receive handler to mark the skb */ |
| 114 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
| 115 | struct napi_struct *napi) |
| 116 | { |
| 117 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Amritha Nambiar | 78e57f1 | 2020-06-18 14:22:15 -0700 | [diff] [blame] | 118 | /* If the skb was already marked with a valid NAPI ID, avoid overwriting |
| 119 | * it. |
| 120 | */ |
| 121 | if (skb->napi_id < MIN_NAPI_ID) |
| 122 | skb->napi_id = napi->napi_id; |
Alexander Duyck | d2e64db | 2017-03-24 10:08:06 -0700 | [diff] [blame] | 123 | #endif |
| 124 | } |
| 125 | |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 126 | /* used in the protocol hanlder to propagate the napi_id to the socket */ |
| 127 | static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) |
| 128 | { |
| 129 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | ee8d153 | 2019-10-29 10:54:44 -0700 | [diff] [blame] | 130 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 131 | #endif |
Amritha Nambiar | c6345ce | 2018-06-29 21:26:57 -0700 | [diff] [blame] | 132 | sk_rx_queue_set(sk, skb); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | /* variant used for unconnected sockets */ |
| 136 | static inline void sk_mark_napi_id_once(struct sock *sk, |
| 137 | const struct sk_buff *skb) |
| 138 | { |
| 139 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Eric Dumazet | ee8d153 | 2019-10-29 10:54:44 -0700 | [diff] [blame] | 140 | if (!READ_ONCE(sk->sk_napi_id)) |
| 141 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
Eric Dumazet | e68b6e5 | 2016-11-16 09:10:42 -0800 | [diff] [blame] | 142 | #endif |
| 143 | } |
| 144 | |
Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 145 | #endif /* _LINUX_NET_BUSY_POLL_H */ |