^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef BTRFS_MISC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define BTRFS_MISC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static inline void cond_wake_up(struct wait_queue_head *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * This implies a full smp_mb barrier, see comments for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * waitqueue_active why.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (wq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) wake_up(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Special case for conditional wakeup where the barrier required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * waitqueue_active is implied by some of the preceding code. Eg. one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * of such atomic operations (atomic_dec_and_return, ...), or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * unlock/lock sequence, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (waitqueue_active(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) wake_up(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline u64 div_factor(u64 num, int factor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (factor == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) num *= factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return div_u64(num, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline u64 div_factor_fine(u64 num, int factor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (factor == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) num *= factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return div_u64(num, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Copy of is_power_of_two that is 64bit safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline bool is_power_of_two_u64(u64 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return n != 0 && (n & (n - 1)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline bool has_single_bit_set(u64 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return is_power_of_two_u64(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Simple bytenr based rb_tree relate structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Any structure wants to use bytenr as single search index should have their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * structure start with these members.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct rb_simple_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct rb_node *node = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct rb_simple_node *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) entry = rb_entry(node, struct rb_simple_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (bytenr < entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else if (bytenr > entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct rb_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct rb_simple_node *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) entry = rb_entry(parent, struct rb_simple_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (bytenr < entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) else if (bytenr > entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rb_link_node(node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rb_insert_color(node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif