bcache: move macro btree() and btree_root() into btree.h
In order to accelerate bcache registration speed, the macro btree() and btree_root() will be referenced out of btree.c. This patch moves them from btree.c into btree.h with other relative function declaration in btree.h, for the following changes. Signed-off-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
431d6e3eec
Коммит
253a99d95d
|
@ -101,64 +101,6 @@
|
|||
|
||||
#define insert_lock(s, b) ((b)->level <= (s)->lock)
|
||||
|
||||
/*
|
||||
* These macros are for recursing down the btree - they handle the details of
|
||||
* locking and looking up nodes in the cache for you. They're best treated as
|
||||
* mere syntax when reading code that uses them.
|
||||
*
|
||||
* op->lock determines whether we take a read or a write lock at a given depth.
|
||||
* If you've got a read lock and find that you need a write lock (i.e. you're
|
||||
* going to have to split), set op->lock and return -EINTR; btree_root() will
|
||||
* call you again and you'll have the correct lock.
|
||||
*/
|
||||
|
||||
/**
|
||||
* btree - recurse down the btree on a specified key
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @key: key to recurse on
|
||||
* @b: parent btree node
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree(fn, key, b, op, ...) \
|
||||
({ \
|
||||
int _r, l = (b)->level - 1; \
|
||||
bool _w = l <= (op)->lock; \
|
||||
struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
|
||||
_w, b); \
|
||||
if (!IS_ERR(_child)) { \
|
||||
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
|
||||
rw_unlock(_w, _child); \
|
||||
} else \
|
||||
_r = PTR_ERR(_child); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
/**
|
||||
* btree_root - call a function on the root of the btree
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @c: cache set
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree_root(fn, c, op, ...) \
|
||||
({ \
|
||||
int _r = -EINTR; \
|
||||
do { \
|
||||
struct btree *_b = (c)->root; \
|
||||
bool _w = insert_lock(op, _b); \
|
||||
rw_lock(_w, _b, _b->level); \
|
||||
if (_b == (c)->root && \
|
||||
_w == insert_lock(op, _b)) { \
|
||||
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
|
||||
} \
|
||||
rw_unlock(_w, _b); \
|
||||
bch_cannibalize_unlock(c); \
|
||||
if (_r == -EINTR) \
|
||||
schedule(); \
|
||||
} while (_r == -EINTR); \
|
||||
\
|
||||
finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
static inline struct bset *write_block(struct btree *b)
|
||||
{
|
||||
|
@ -2422,7 +2364,7 @@ int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
|
|||
return btree_root(map_nodes_recurse, c, op, from, fn, flags);
|
||||
}
|
||||
|
||||
static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
struct bkey *from, btree_map_keys_fn *fn,
|
||||
int flags)
|
||||
{
|
||||
|
|
|
@ -260,6 +260,13 @@ void bch_initial_gc_finish(struct cache_set *c);
|
|||
void bch_moving_gc(struct cache_set *c);
|
||||
int bch_btree_check(struct cache_set *c);
|
||||
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
|
||||
typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
|
||||
struct bkey *k);
|
||||
int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
struct bkey *from, btree_map_keys_fn *fn,
|
||||
int flags);
|
||||
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_keys_fn *fn, int flags);
|
||||
|
||||
static inline void wake_up_gc(struct cache_set *c)
|
||||
{
|
||||
|
@ -284,6 +291,65 @@ static inline void force_wake_up_gc(struct cache_set *c)
|
|||
wake_up_gc(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* These macros are for recursing down the btree - they handle the details of
|
||||
* locking and looking up nodes in the cache for you. They're best treated as
|
||||
* mere syntax when reading code that uses them.
|
||||
*
|
||||
* op->lock determines whether we take a read or a write lock at a given depth.
|
||||
* If you've got a read lock and find that you need a write lock (i.e. you're
|
||||
* going to have to split), set op->lock and return -EINTR; btree_root() will
|
||||
* call you again and you'll have the correct lock.
|
||||
*/
|
||||
|
||||
/**
|
||||
* btree - recurse down the btree on a specified key
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @key: key to recurse on
|
||||
* @b: parent btree node
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree(fn, key, b, op, ...) \
|
||||
({ \
|
||||
int _r, l = (b)->level - 1; \
|
||||
bool _w = l <= (op)->lock; \
|
||||
struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
|
||||
_w, b); \
|
||||
if (!IS_ERR(_child)) { \
|
||||
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
|
||||
rw_unlock(_w, _child); \
|
||||
} else \
|
||||
_r = PTR_ERR(_child); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
/**
|
||||
* btree_root - call a function on the root of the btree
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @c: cache set
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree_root(fn, c, op, ...) \
|
||||
({ \
|
||||
int _r = -EINTR; \
|
||||
do { \
|
||||
struct btree *_b = (c)->root; \
|
||||
bool _w = insert_lock(op, _b); \
|
||||
rw_lock(_w, _b, _b->level); \
|
||||
if (_b == (c)->root && \
|
||||
_w == insert_lock(op, _b)) { \
|
||||
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
|
||||
} \
|
||||
rw_unlock(_w, _b); \
|
||||
bch_cannibalize_unlock(c); \
|
||||
if (_r == -EINTR) \
|
||||
schedule(); \
|
||||
} while (_r == -EINTR); \
|
||||
\
|
||||
finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
#define MAP_DONE 0
|
||||
#define MAP_CONTINUE 1
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче