linux-stable/fs/btrfs/misc.h
David Sterba 428c8e0310 btrfs: simplify percent calculation helpers, rename div_factor
The div_factor* helpers calculate fraction or percentage fraction. The
name is a bit confusing, we use it only for percentage calculations and
there are two helpers.

There's a helper mult_frac that's for general fractions, that tries to
be accurate but we multiply and divide by small numbers so we can use
the div_u64 helper.

Rename the div_factor* helpers and use 1..100 percentage range, also drop
the case checking for percentage == 100, it's never hit.

The conversions:

* div_factor calculates tenths and the numbers need to be adjusted
* div_factor_fine is direct replacement

Signed-off-by: David Sterba <dsterba@suse.com>
2022-12-05 18:00:48 +01:00

146 lines
3.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_MISC_H
#define BTRFS_MISC_H
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/math64.h>
#include <linux/rbtree.h>
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
/*
* Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
*/
#define ENUM_BIT(name) \
__ ## name ## _BIT, \
name = (1U << __ ## name ## _BIT), \
__ ## name ## _SEQ = __ ## name ## _BIT
static inline void cond_wake_up(struct wait_queue_head *wq)
{
/*
* This implies a full smp_mb barrier, see comments for
* waitqueue_active why.
*/
if (wq_has_sleeper(wq))
wake_up(wq);
}
static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
{
/*
* Special case for conditional wakeup where the barrier required for
* waitqueue_active is implied by some of the preceding code. Eg. one
* of such atomic operations (atomic_dec_and_return, ...), or a
* unlock/lock sequence, etc.
*/
if (waitqueue_active(wq))
wake_up(wq);
}
static inline u64 mult_perc(u64 num, u32 percent)
{
return div_u64(num * percent, 100);
}
/* Copy of is_power_of_two that is 64bit safe */
static inline bool is_power_of_two_u64(u64 n)
{
return n != 0 && (n & (n - 1)) == 0;
}
static inline bool has_single_bit_set(u64 n)
{
return is_power_of_two_u64(n);
}
/*
* Simple bytenr based rb_tree relate structures
*
* Any structure wants to use bytenr as single search index should have their
* structure start with these members.
*/
struct rb_simple_node {
struct rb_node rb_node;
u64 bytenr;
};
static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
node = node->rb_left;
else if (bytenr > entry->bytenr)
node = node->rb_right;
else
return node;
}
return NULL;
}
/*
* Search @root from an entry that starts or comes after @bytenr.
*
* @root: the root to search.
* @bytenr: bytenr to search from.
*
* Return the rb_node that start at or after @bytenr. If there is no entry at
* or after @bytner return NULL.
*/
static inline struct rb_node *rb_simple_search_first(struct rb_root *root,
u64 bytenr)
{
struct rb_node *node = root->rb_node, *ret = NULL;
struct rb_simple_node *entry, *ret_entry = NULL;
while (node) {
entry = rb_entry(node, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr) {
if (!ret || entry->bytenr < ret_entry->bytenr) {
ret = node;
ret_entry = entry;
}
node = node->rb_left;
} else if (bytenr > entry->bytenr) {
node = node->rb_right;
} else {
return node;
}
}
return ret;
}
static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rb_simple_node *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct rb_simple_node, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
#endif