btrfs: switch extent buffer tree lock to rw_semaphore
Historically we've implemented our own locking because we wanted to be able to selectively spin or sleep based on what we were doing in the tree. For instance, if all of our nodes were in cache then there's rarely a reason to need to sleep waiting for node locks, as they'll likely become available soon. At the time this code was written the rw_semaphore didn't do adaptive spinning, and thus was orders of magnitude slower than our home grown locking. However now the opposite is the case. There are a few problems with how we implement blocking locks, namely that we use a normal waitqueue and simply wake everybody up in reverse sleep order. This leads to some suboptimal performance behavior, and a lot of context switches in highly contended cases. The rw_semaphores actually do this properly, and also have adaptive spinning that works relatively well. The locking code is also a bit of a bear to understand, and we lose the benefit of lockdep for the most part because the blocking states of the lock are simply ad-hoc and not mapped into lockdep. So rework the locking code to drop all of this custom locking stuff, and simply use a rw_semaphore for everything. This makes the locking much simpler for everything, as we can now drop a lot of cruft and blocking transitions. The performance numbers vary depending on the workload, because generally speaking there doesn't tend to be a lot of contention on the btree. However, on my test system which is an 80 core single socket system with 256GiB of RAM and a 2TiB NVMe drive I get the following results (with all debug options off): dbench 200 baseline Throughput 216.056 MB/sec 200 clients 200 procs max_latency=1471.197 ms dbench 200 with patch Throughput 737.188 MB/sec 200 clients 200 procs max_latency=714.346 ms Previously we also used fs_mark to test this sort of contention, and those results are far less impressive, mostly because there's not enough tasks to really stress the locking fs_mark -d /d[0-15] -S 0 -L 20 -n 100000 -s 0 -t 16 baseline Average Files/sec: 160166.7 p50 Files/sec: 165832 p90 Files/sec: 123886 p99 Files/sec: 123495 real 3m26.527s user 2m19.223s sys 48m21.856s patched Average Files/sec: 164135.7 p50 Files/sec: 171095 p90 Files/sec: 122889 p99 Files/sec: 113819 real 3m29.660s user 2m19.990s sys 44m12.259s Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
ecdcf3c259
Коммит
196d59ab9c
|
@ -4946,12 +4946,8 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
||||||
eb->len = len;
|
eb->len = len;
|
||||||
eb->fs_info = fs_info;
|
eb->fs_info = fs_info;
|
||||||
eb->bflags = 0;
|
eb->bflags = 0;
|
||||||
rwlock_init(&eb->lock);
|
init_rwsem(&eb->lock);
|
||||||
atomic_set(&eb->blocking_readers, 0);
|
|
||||||
eb->blocking_writers = 0;
|
|
||||||
eb->lock_recursed = false;
|
eb->lock_recursed = false;
|
||||||
init_waitqueue_head(&eb->write_lock_wq);
|
|
||||||
init_waitqueue_head(&eb->read_lock_wq);
|
|
||||||
|
|
||||||
btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
|
btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
|
||||||
&fs_info->allocated_ebs);
|
&fs_info->allocated_ebs);
|
||||||
|
@ -4967,13 +4963,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
||||||
> MAX_INLINE_EXTENT_BUFFER_SIZE);
|
> MAX_INLINE_EXTENT_BUFFER_SIZE);
|
||||||
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
|
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
|
||||||
eb->spinning_writers = 0;
|
|
||||||
atomic_set(&eb->spinning_readers, 0);
|
|
||||||
atomic_set(&eb->read_locks, 0);
|
|
||||||
eb->write_locks = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return eb;
|
return eb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,31 +87,14 @@ struct extent_buffer {
|
||||||
int read_mirror;
|
int read_mirror;
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
pid_t lock_owner;
|
pid_t lock_owner;
|
||||||
|
|
||||||
int blocking_writers;
|
|
||||||
atomic_t blocking_readers;
|
|
||||||
bool lock_recursed;
|
bool lock_recursed;
|
||||||
|
struct rw_semaphore lock;
|
||||||
|
|
||||||
/* >= 0 if eb belongs to a log tree, -1 otherwise */
|
/* >= 0 if eb belongs to a log tree, -1 otherwise */
|
||||||
short log_index;
|
short log_index;
|
||||||
|
|
||||||
/* protects write locks */
|
|
||||||
rwlock_t lock;
|
|
||||||
|
|
||||||
/* readers use lock_wq while they wait for the write
|
|
||||||
* lock holders to unlock
|
|
||||||
*/
|
|
||||||
wait_queue_head_t write_lock_wq;
|
|
||||||
|
|
||||||
/* writers use read_lock_wq while they wait for readers
|
|
||||||
* to unlock
|
|
||||||
*/
|
|
||||||
wait_queue_head_t read_lock_wq;
|
|
||||||
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
|
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
#ifdef CONFIG_BTRFS_DEBUG
|
||||||
int spinning_writers;
|
|
||||||
atomic_t spinning_readers;
|
|
||||||
atomic_t read_locks;
|
|
||||||
int write_locks;
|
|
||||||
struct list_head leak_list;
|
struct list_head leak_list;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -17,44 +17,17 @@
|
||||||
* Extent buffer locking
|
* Extent buffer locking
|
||||||
* =====================
|
* =====================
|
||||||
*
|
*
|
||||||
* The locks use a custom scheme that allows to do more operations than are
|
* We use a rw_semaphore for tree locking, and the semantics are exactly the
|
||||||
* available fromt current locking primitives. The building blocks are still
|
* same:
|
||||||
* rwlock and wait queues.
|
|
||||||
*
|
|
||||||
* Required semantics:
|
|
||||||
*
|
*
|
||||||
* - reader/writer exclusion
|
* - reader/writer exclusion
|
||||||
* - writer/writer exclusion
|
* - writer/writer exclusion
|
||||||
* - reader/reader sharing
|
* - reader/reader sharing
|
||||||
* - spinning lock semantics
|
|
||||||
* - blocking lock semantics
|
|
||||||
* - try-lock semantics for readers and writers
|
* - try-lock semantics for readers and writers
|
||||||
* - one level nesting, allowing read lock to be taken by the same thread that
|
|
||||||
* already has write lock
|
|
||||||
*
|
*
|
||||||
* The extent buffer locks (also called tree locks) manage access to eb data
|
* Additionally we need one level nesting recursion, see below. The rwsem
|
||||||
* related to the storage in the b-tree (keys, items, but not the individual
|
* implementation does opportunistic spinning which reduces number of times the
|
||||||
* members of eb).
|
* locking task needs to sleep.
|
||||||
* We want concurrency of many readers and safe updates. The underlying locking
|
|
||||||
* is done by read-write spinlock and the blocking part is implemented using
|
|
||||||
* counters and wait queues.
|
|
||||||
*
|
|
||||||
* spinning semantics - the low-level rwlock is held so all other threads that
|
|
||||||
* want to take it are spinning on it.
|
|
||||||
*
|
|
||||||
* blocking semantics - the low-level rwlock is not held but the counter
|
|
||||||
* denotes how many times the blocking lock was held;
|
|
||||||
* sleeping is possible
|
|
||||||
*
|
|
||||||
* Write lock always allows only one thread to access the data.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Debugging
|
|
||||||
* ---------
|
|
||||||
*
|
|
||||||
* There are additional state counters that are asserted in various contexts,
|
|
||||||
* removed from non-debug build to reduce extent_buffer size and for
|
|
||||||
* performance reasons.
|
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* Lock recursion
|
* Lock recursion
|
||||||
|
@ -75,115 +48,8 @@
|
||||||
* btrfs_lookup_file_extent
|
* btrfs_lookup_file_extent
|
||||||
* btrfs_search_slot
|
* btrfs_search_slot
|
||||||
*
|
*
|
||||||
*
|
|
||||||
* Locking pattern - spinning
|
|
||||||
* --------------------------
|
|
||||||
*
|
|
||||||
* The simple locking scenario, the +--+ denotes the spinning section.
|
|
||||||
*
|
|
||||||
* +- btrfs_tree_lock
|
|
||||||
* | - extent_buffer::rwlock is held
|
|
||||||
* | - no heavy operations should happen, eg. IO, memory allocations, large
|
|
||||||
* | structure traversals
|
|
||||||
* +- btrfs_tree_unock
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Locking pattern - blocking
|
|
||||||
* --------------------------
|
|
||||||
*
|
|
||||||
* The blocking write uses the following scheme. The +--+ denotes the spinning
|
|
||||||
* section.
|
|
||||||
*
|
|
||||||
* +- btrfs_tree_lock
|
|
||||||
* |
|
|
||||||
* +- btrfs_set_lock_blocking_write
|
|
||||||
*
|
|
||||||
* - allowed: IO, memory allocations, etc.
|
|
||||||
*
|
|
||||||
* -- btrfs_tree_unlock - note, no explicit unblocking necessary
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Blocking read is similar.
|
|
||||||
*
|
|
||||||
* +- btrfs_tree_read_lock
|
|
||||||
* |
|
|
||||||
* +- btrfs_set_lock_blocking_read
|
|
||||||
*
|
|
||||||
* - heavy operations allowed
|
|
||||||
*
|
|
||||||
* +- btrfs_tree_read_unlock_blocking
|
|
||||||
* |
|
|
||||||
* +- btrfs_tree_read_unlock
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
|
||||||
static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
WARN_ON(eb->spinning_writers);
|
|
||||||
eb->spinning_writers++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
WARN_ON(eb->spinning_writers != 1);
|
|
||||||
eb->spinning_writers--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
WARN_ON(eb->spinning_writers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
atomic_inc(&eb->spinning_readers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
|
|
||||||
atomic_dec(&eb->spinning_readers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
atomic_inc(&eb->read_locks);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
atomic_dec(&eb->read_locks);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
BUG_ON(!atomic_read(&eb->read_locks));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
eb->write_locks++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
|
|
||||||
{
|
|
||||||
eb->write_locks--;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
|
|
||||||
static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark already held read lock as blocking. Can be nested in write lock by the
|
* Mark already held read lock as blocking. Can be nested in write lock by the
|
||||||
* same thread.
|
* same thread.
|
||||||
|
@ -195,18 +61,6 @@ static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
|
||||||
*/
|
*/
|
||||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
trace_btrfs_set_lock_blocking_read(eb);
|
|
||||||
/*
|
|
||||||
* No lock is required. The lock owner may change if we have a read
|
|
||||||
* lock, but it won't change to or away from us. If we have the write
|
|
||||||
* lock, we are the owner and it'll never change.
|
|
||||||
*/
|
|
||||||
if (eb->lock_recursed && current->pid == eb->lock_owner)
|
|
||||||
return;
|
|
||||||
btrfs_assert_tree_read_locked(eb);
|
|
||||||
atomic_inc(&eb->blocking_readers);
|
|
||||||
btrfs_assert_spinning_readers_put(eb);
|
|
||||||
read_unlock(&eb->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -219,30 +73,20 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
||||||
*/
|
*/
|
||||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
trace_btrfs_set_lock_blocking_write(eb);
|
|
||||||
/*
|
|
||||||
* No lock is required. The lock owner may change if we have a read
|
|
||||||
* lock, but it won't change to or away from us. If we have the write
|
|
||||||
* lock, we are the owner and it'll never change.
|
|
||||||
*/
|
|
||||||
if (eb->lock_recursed && current->pid == eb->lock_owner)
|
|
||||||
return;
|
|
||||||
if (eb->blocking_writers == 0) {
|
|
||||||
btrfs_assert_spinning_writers_put(eb);
|
|
||||||
btrfs_assert_tree_locked(eb);
|
|
||||||
WRITE_ONCE(eb->blocking_writers, 1);
|
|
||||||
write_unlock(&eb->lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock the extent buffer for read. Wait for any writers (spinning or blocking).
|
* __btrfs_tree_read_lock - lock extent buffer for read
|
||||||
* Can be nested in write lock by the same thread.
|
* @eb: the eb to be locked
|
||||||
|
* @nest: the nesting level to be used for lockdep
|
||||||
|
* @recurse: if this lock is able to be recursed
|
||||||
*
|
*
|
||||||
* Use when the locked section does only lightweight actions and busy waiting
|
* This takes the read lock on the extent buffer, using the specified nesting
|
||||||
* would be cheaper than making other threads do the wait/wake loop.
|
* level for lockdep purposes.
|
||||||
*
|
*
|
||||||
* The rwlock is held upon exit.
|
* If you specify recurse = true, then we will allow this to be taken if we
|
||||||
|
* currently own the lock already. This should only be used in specific
|
||||||
|
* usecases, and the subsequent unlock will not change the state of the lock.
|
||||||
*/
|
*/
|
||||||
void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
|
void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
|
||||||
bool recurse)
|
bool recurse)
|
||||||
|
@ -251,33 +95,33 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
|
||||||
|
|
||||||
if (trace_btrfs_tree_read_lock_enabled())
|
if (trace_btrfs_tree_read_lock_enabled())
|
||||||
start_ns = ktime_get_ns();
|
start_ns = ktime_get_ns();
|
||||||
again:
|
|
||||||
read_lock(&eb->lock);
|
if (unlikely(recurse)) {
|
||||||
BUG_ON(eb->blocking_writers == 0 &&
|
/* First see if we can grab the lock outright */
|
||||||
current->pid == eb->lock_owner);
|
if (down_read_trylock(&eb->lock))
|
||||||
if (eb->blocking_writers) {
|
goto out;
|
||||||
if (current->pid == eb->lock_owner) {
|
|
||||||
/*
|
/*
|
||||||
* This extent is already write-locked by our thread.
|
* Ok still doesn't necessarily mean we are already holding the
|
||||||
* We allow an additional read lock to be added because
|
* lock, check the owner.
|
||||||
* it's for the same thread. btrfs_find_all_roots()
|
*/
|
||||||
* depends on this as it may be called on a partly
|
if (eb->lock_owner != current->pid) {
|
||||||
* (write-)locked tree.
|
down_read_nested(&eb->lock, nest);
|
||||||
*/
|
goto out;
|
||||||
WARN_ON(!recurse);
|
|
||||||
BUG_ON(eb->lock_recursed);
|
|
||||||
eb->lock_recursed = true;
|
|
||||||
read_unlock(&eb->lock);
|
|
||||||
trace_btrfs_tree_read_lock(eb, start_ns);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
read_unlock(&eb->lock);
|
|
||||||
wait_event(eb->write_lock_wq,
|
/*
|
||||||
READ_ONCE(eb->blocking_writers) == 0);
|
* Ok we have actually recursed, but we should only be recursing
|
||||||
goto again;
|
* once, so blow up if we're already recursed, otherwise set
|
||||||
|
* ->lock_recursed and carry on.
|
||||||
|
*/
|
||||||
|
BUG_ON(eb->lock_recursed);
|
||||||
|
eb->lock_recursed = true;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
btrfs_assert_tree_read_locks_get(eb);
|
down_read_nested(&eb->lock, nest);
|
||||||
btrfs_assert_spinning_readers_get(eb);
|
out:
|
||||||
|
eb->lock_owner = current->pid;
|
||||||
trace_btrfs_tree_read_lock(eb, start_ns);
|
trace_btrfs_tree_read_lock(eb, start_ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,74 +138,42 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
|
||||||
*/
|
*/
|
||||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
if (READ_ONCE(eb->blocking_writers))
|
return btrfs_try_tree_read_lock(eb);
|
||||||
return 0;
|
|
||||||
|
|
||||||
read_lock(&eb->lock);
|
|
||||||
/* Refetch value after lock */
|
|
||||||
if (READ_ONCE(eb->blocking_writers)) {
|
|
||||||
read_unlock(&eb->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
btrfs_assert_tree_read_locks_get(eb);
|
|
||||||
btrfs_assert_spinning_readers_get(eb);
|
|
||||||
trace_btrfs_tree_read_lock_atomic(eb);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try-lock for read. Don't block or wait for contending writers.
|
* Try-lock for read.
|
||||||
*
|
*
|
||||||
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
||||||
*/
|
*/
|
||||||
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
if (READ_ONCE(eb->blocking_writers))
|
if (down_read_trylock(&eb->lock)) {
|
||||||
return 0;
|
eb->lock_owner = current->pid;
|
||||||
|
trace_btrfs_try_tree_read_lock(eb);
|
||||||
if (!read_trylock(&eb->lock))
|
return 1;
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Refetch value after lock */
|
|
||||||
if (READ_ONCE(eb->blocking_writers)) {
|
|
||||||
read_unlock(&eb->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
btrfs_assert_tree_read_locks_get(eb);
|
return 0;
|
||||||
btrfs_assert_spinning_readers_get(eb);
|
|
||||||
trace_btrfs_try_tree_read_lock(eb);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try-lock for write. May block until the lock is uncontended, but does not
|
* Try-lock for write.
|
||||||
* wait until it is free.
|
|
||||||
*
|
*
|
||||||
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
||||||
*/
|
*/
|
||||||
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
|
if (down_write_trylock(&eb->lock)) {
|
||||||
return 0;
|
eb->lock_owner = current->pid;
|
||||||
|
trace_btrfs_try_tree_write_lock(eb);
|
||||||
write_lock(&eb->lock);
|
return 1;
|
||||||
/* Refetch value after lock */
|
|
||||||
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
|
|
||||||
write_unlock(&eb->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
btrfs_assert_tree_write_locks_get(eb);
|
return 0;
|
||||||
btrfs_assert_spinning_writers_get(eb);
|
|
||||||
eb->lock_owner = current->pid;
|
|
||||||
trace_btrfs_try_tree_write_lock(eb);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release read lock. Must be used only if the lock is in spinning mode. If
|
* Release read lock. If the read lock was recursed then the lock stays in the
|
||||||
* the read lock is nested, must pair with read lock before the write unlock.
|
* original state that it was before it was recursively locked.
|
||||||
*
|
|
||||||
* The rwlock is not held upon exit.
|
|
||||||
*/
|
*/
|
||||||
void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
|
@ -376,10 +188,8 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
||||||
eb->lock_recursed = false;
|
eb->lock_recursed = false;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
btrfs_assert_tree_read_locked(eb);
|
eb->lock_owner = 0;
|
||||||
btrfs_assert_spinning_readers_put(eb);
|
up_read(&eb->lock);
|
||||||
btrfs_assert_tree_read_locks_put(eb);
|
|
||||||
read_unlock(&eb->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -391,30 +201,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
||||||
*/
|
*/
|
||||||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
trace_btrfs_tree_read_unlock_blocking(eb);
|
btrfs_tree_read_unlock(eb);
|
||||||
/*
|
|
||||||
* if we're nested, we have the write lock. No new locking
|
|
||||||
* is needed as long as we are the lock owner.
|
|
||||||
* The write unlock will do a barrier for us, and the lock_recursed
|
|
||||||
* field only matters to the lock owner.
|
|
||||||
*/
|
|
||||||
if (eb->lock_recursed && current->pid == eb->lock_owner) {
|
|
||||||
eb->lock_recursed = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
btrfs_assert_tree_read_locked(eb);
|
|
||||||
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
|
|
||||||
/* atomic_dec_and_test implies a barrier */
|
|
||||||
if (atomic_dec_and_test(&eb->blocking_readers))
|
|
||||||
cond_wake_up_nomb(&eb->read_lock_wq);
|
|
||||||
btrfs_assert_tree_read_locks_put(eb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock for write. Wait for all blocking and spinning readers and writers. This
|
* __btrfs_tree_lock - lock eb for write
|
||||||
* starts context where reader lock could be nested by the same thread.
|
* @eb: the eb to lock
|
||||||
|
* @nest: the nesting to use for the lock
|
||||||
*
|
*
|
||||||
* The rwlock is held for write upon exit.
|
* Returns with the eb->lock write locked.
|
||||||
*/
|
*/
|
||||||
void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
|
void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
|
||||||
__acquires(&eb->lock)
|
__acquires(&eb->lock)
|
||||||
|
@ -424,19 +219,7 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
|
||||||
if (trace_btrfs_tree_lock_enabled())
|
if (trace_btrfs_tree_lock_enabled())
|
||||||
start_ns = ktime_get_ns();
|
start_ns = ktime_get_ns();
|
||||||
|
|
||||||
WARN_ON(eb->lock_owner == current->pid);
|
down_write_nested(&eb->lock, nest);
|
||||||
again:
|
|
||||||
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
|
|
||||||
wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
|
|
||||||
write_lock(&eb->lock);
|
|
||||||
/* Refetch value after lock */
|
|
||||||
if (atomic_read(&eb->blocking_readers) ||
|
|
||||||
READ_ONCE(eb->blocking_writers)) {
|
|
||||||
write_unlock(&eb->lock);
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
btrfs_assert_spinning_writers_get(eb);
|
|
||||||
btrfs_assert_tree_write_locks_get(eb);
|
|
||||||
eb->lock_owner = current->pid;
|
eb->lock_owner = current->pid;
|
||||||
trace_btrfs_tree_lock(eb, start_ns);
|
trace_btrfs_tree_lock(eb, start_ns);
|
||||||
}
|
}
|
||||||
|
@ -447,42 +230,13 @@ void btrfs_tree_lock(struct extent_buffer *eb)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release the write lock, either blocking or spinning (ie. there's no need
|
* Release the write lock.
|
||||||
* for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
|
|
||||||
* This also ends the context for nesting, the read lock must have been
|
|
||||||
* released already.
|
|
||||||
*
|
|
||||||
* Tasks blocked and waiting are woken, rwlock is not held upon exit.
|
|
||||||
*/
|
*/
|
||||||
void btrfs_tree_unlock(struct extent_buffer *eb)
|
void btrfs_tree_unlock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* This is read both locked and unlocked but always by the same thread
|
|
||||||
* that already owns the lock so we don't need to use READ_ONCE
|
|
||||||
*/
|
|
||||||
int blockers = eb->blocking_writers;
|
|
||||||
|
|
||||||
BUG_ON(blockers > 1);
|
|
||||||
|
|
||||||
btrfs_assert_tree_locked(eb);
|
|
||||||
trace_btrfs_tree_unlock(eb);
|
trace_btrfs_tree_unlock(eb);
|
||||||
eb->lock_owner = 0;
|
eb->lock_owner = 0;
|
||||||
btrfs_assert_tree_write_locks_put(eb);
|
up_write(&eb->lock);
|
||||||
|
|
||||||
if (blockers) {
|
|
||||||
btrfs_assert_no_spinning_writers(eb);
|
|
||||||
/* Unlocked write */
|
|
||||||
WRITE_ONCE(eb->blocking_writers, 0);
|
|
||||||
/*
|
|
||||||
* We need to order modifying blocking_writers above with
|
|
||||||
* actually waking up the sleepers to ensure they see the
|
|
||||||
* updated value of blocking_writers
|
|
||||||
*/
|
|
||||||
cond_wake_up(&eb->write_lock_wq);
|
|
||||||
} else {
|
|
||||||
btrfs_assert_spinning_writers_put(eb);
|
|
||||||
write_unlock(&eb->lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -110,7 +110,7 @@ static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
#ifdef CONFIG_BTRFS_DEBUG
|
||||||
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
|
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
|
||||||
BUG_ON(!eb->write_locks);
|
lockdep_assert_held(&eb->lock);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
|
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
|
||||||
|
|
|
@ -191,15 +191,8 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
|
||||||
static void print_eb_refs_lock(struct extent_buffer *eb)
|
static void print_eb_refs_lock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
#ifdef CONFIG_BTRFS_DEBUG
|
||||||
btrfs_info(eb->fs_info,
|
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
|
||||||
"refs %u lock (w:%d r:%d bw:%d br:%d sw:%d sr:%d) lock_owner %u current %u",
|
atomic_read(&eb->refs), eb->lock_owner, current->pid);
|
||||||
atomic_read(&eb->refs), eb->write_locks,
|
|
||||||
atomic_read(&eb->read_locks),
|
|
||||||
eb->blocking_writers,
|
|
||||||
atomic_read(&eb->blocking_readers),
|
|
||||||
eb->spinning_writers,
|
|
||||||
atomic_read(&eb->spinning_readers),
|
|
||||||
eb->lock_owner, current->pid);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче