David Sterba | 602cbe9 | 2019-08-21 18:48:25 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_MISC_H |
| 4 | #define BTRFS_MISC_H |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/wait.h> |
Kari Argillander | cde7417 | 2021-08-31 00:51:52 +0300 | [diff] [blame] | 8 | #include <linux/math64.h> |
Qu Wenruo | e9a28dc | 2020-03-26 14:11:09 +0800 | [diff] [blame] | 9 | #include <linux/rbtree.h> |
David Sterba | 602cbe9 | 2019-08-21 18:48:25 +0200 | [diff] [blame] | 10 | |
| 11 | #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) |
| 12 | |
| 13 | static inline void cond_wake_up(struct wait_queue_head *wq) |
| 14 | { |
| 15 | /* |
| 16 | * This implies a full smp_mb barrier, see comments for |
| 17 | * waitqueue_active why. |
| 18 | */ |
| 19 | if (wq_has_sleeper(wq)) |
| 20 | wake_up(wq); |
| 21 | } |
| 22 | |
| 23 | static inline void cond_wake_up_nomb(struct wait_queue_head *wq) |
| 24 | { |
| 25 | /* |
| 26 | * Special case for conditional wakeup where the barrier required for |
| 27 | * waitqueue_active is implied by some of the preceding code. Eg. one |
| 28 | * of such atomic operations (atomic_dec_and_return, ...), or a |
| 29 | * unlock/lock sequence, etc. |
| 30 | */ |
| 31 | if (waitqueue_active(wq)) |
| 32 | wake_up(wq); |
| 33 | } |
| 34 | |
David Sterba | 784352f | 2019-08-21 18:54:28 +0200 | [diff] [blame] | 35 | static inline u64 div_factor(u64 num, int factor) |
| 36 | { |
| 37 | if (factor == 10) |
| 38 | return num; |
| 39 | num *= factor; |
| 40 | return div_u64(num, 10); |
| 41 | } |
| 42 | |
| 43 | static inline u64 div_factor_fine(u64 num, int factor) |
| 44 | { |
| 45 | if (factor == 100) |
| 46 | return num; |
| 47 | num *= factor; |
| 48 | return div_u64(num, 100); |
| 49 | } |
| 50 | |
David Sterba | 79c8264 | 2019-10-01 19:40:15 +0200 | [diff] [blame] | 51 | /* Copy of is_power_of_two that is 64bit safe */ |
| 52 | static inline bool is_power_of_two_u64(u64 n) |
| 53 | { |
| 54 | return n != 0 && (n & (n - 1)) == 0; |
| 55 | } |
| 56 | |
| 57 | static inline bool has_single_bit_set(u64 n) |
| 58 | { |
| 59 | return is_power_of_two_u64(n); |
| 60 | } |
| 61 | |
Qu Wenruo | e9a28dc | 2020-03-26 14:11:09 +0800 | [diff] [blame] | 62 | /* |
| 63 | * Simple bytenr based rb_tree relate structures |
| 64 | * |
| 65 | * Any structure wants to use bytenr as single search index should have their |
| 66 | * structure start with these members. |
| 67 | */ |
| 68 | struct rb_simple_node { |
| 69 | struct rb_node rb_node; |
| 70 | u64 bytenr; |
| 71 | }; |
| 72 | |
| 73 | static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr) |
| 74 | { |
| 75 | struct rb_node *node = root->rb_node; |
| 76 | struct rb_simple_node *entry; |
| 77 | |
| 78 | while (node) { |
| 79 | entry = rb_entry(node, struct rb_simple_node, rb_node); |
| 80 | |
| 81 | if (bytenr < entry->bytenr) |
| 82 | node = node->rb_left; |
| 83 | else if (bytenr > entry->bytenr) |
| 84 | node = node->rb_right; |
| 85 | else |
| 86 | return node; |
| 87 | } |
| 88 | return NULL; |
| 89 | } |
| 90 | |
| 91 | static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr, |
| 92 | struct rb_node *node) |
| 93 | { |
| 94 | struct rb_node **p = &root->rb_node; |
| 95 | struct rb_node *parent = NULL; |
| 96 | struct rb_simple_node *entry; |
| 97 | |
| 98 | while (*p) { |
| 99 | parent = *p; |
| 100 | entry = rb_entry(parent, struct rb_simple_node, rb_node); |
| 101 | |
| 102 | if (bytenr < entry->bytenr) |
| 103 | p = &(*p)->rb_left; |
| 104 | else if (bytenr > entry->bytenr) |
| 105 | p = &(*p)->rb_right; |
| 106 | else |
| 107 | return parent; |
| 108 | } |
| 109 | |
| 110 | rb_link_node(node, parent, p); |
| 111 | rb_insert_color(node, root); |
| 112 | return NULL; |
| 113 | } |
| 114 | |
David Sterba | 602cbe9 | 2019-08-21 18:48:25 +0200 | [diff] [blame] | 115 | #endif |