for-5.5-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAl3YCRAACgkQxWXV+ddt WDuTuQ/7BOibDKqInm2SsL8xMuZqXjxGXUcHDPio5MbzNJ3wpV0j1KqWWsuK8hi0 HAhSI3fu3NG7RQYh3nuRO0CaZy3ENiqKoffrSpg7k5DJG0B7Lm/G/970fmOYUp6a j6PMNcrKaw+1J3yuljSd20+n6j/hdmfn847ZsSY+7JmZ4zGMJ5GMv3IRipdLFUzR tmjWmmCI05sF4/8cI6jzUVq588uSFTO1bGXFugmoO0ztpameudCnYniJI0tDBFSV pqk6lqoOPNcaC9nATuA5KKOpUJ9nSscP/St3DV4D6LaZKkT/M5zs12lXPMJx/pKn oCHt/A/wBdbDOoy1uHVMWQ9cz9PyVFtU7eSKizcFjoqnHO6fzlnRr9fsmIZKtTw9 H6nXVmP1S+xJg/zTBxCXHVfZR2dqADUsHWztN1LM8Pen/l9+UMwBeMhq9f9Jz68I kF7zWlfLEtNh8naEYf34LkGVMtCNY4PHFsSztPg/jbfsH34xMvetKvPR2s8lejhp 42YqPHgEh2+8mmVcq65+jl+bPOp/5bdToRtuPiszWiKZSXt/5xplP+5lkSEet0J6 4aNZ8NRAiZ98br45jdTMUVSo6YtI27SS+GdVOUHPQtPI/kWi9XHx+l3E9MVOUtrd lQ1Z9tPinEnJH4kntiCz2sKdzNKE01IagV4wFylz1Ct+ZqF9jNs= =JzIp -----END PGP SIGNATURE----- Merge tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs updates from David Sterba: "User visible changes: - new block group profiles: RAID1 with 3- and 4- copies - RAID1 in btrfs has always 2 copies, now add support for 3 and 4 - this is an incompat feature (named RAID1C34) - recommended use of RAID1C3 is replacement of RAID6 profile on metadata, this brings a more reliable resiliency against 2 device loss/damage - support for new checksums - per-filesystem, set at mkfs time - fast hash (crc32c successor): xxhash, 64bit digest - strong hashes (both 256bit): sha256 (slower, FIPS), blake2b (faster) - the blake2b module goes via the crypto tree, btrfs.ko has a soft dependency - speed up lseek, don't take inode locks unnecessarily, this can speed up parallel SEEK_CUR/SEEK_SET/SEEK_END by 80% - send: - allow clone operations within the same file - limit maximum number of sent clone references to avoid slow backref walking - error message improvements: device scan prints process name and PID Core changes: - cleanups - remove unique workqueue helpers, used to provide a way to avoid deadlocks in the workqueue code, now done in a simpler way - remove lots of indirect function calls in compression code - extent IO tree code moved out of extent_io.c - cleanup backup superblock handling at mount time - transaction life cycle documentation and cleanups - locking code cleanups, annotations and documentation - add more cold, const, pure function attributes - removal of unused or redundant struct members or variables - new tree-checker sanity tests - try to detect missing INODE_ITEM, cross-reference checks of DIR_ITEM, DIR_INDEX, INODE_REF, and XATTR_* items - remove own bio scheduling code (used to avoid checksum submissions being stuck behind other IO), replaced by cgroup controller-based code to allow better control and avoid priority inversions in cases where the custom and cgroup scheduling disagreed Fixes: - avoid getting stuck during cyclic writebacks - fix trimming of ranges crossing block group boundaries - fix rename exchange on subvolumes, all involved subvolumes need to be recorded in the transaction" * tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (137 commits) btrfs: drop bdev argument from submit_extent_page btrfs: remove extent_map::bdev btrfs: drop bio_set_dev where not needed btrfs: get bdev directly from fs_devices in submit_extent_page btrfs: record all roots for rename exchange on a subvol Btrfs: fix block group remaining RO forever after error during device replace btrfs: scrub: Don't check free space before marking a block group RO btrfs: change btrfs_fs_devices::rotating to bool btrfs: change btrfs_fs_devices::seeding to bool btrfs: rename btrfs_block_group_cache btrfs: block-group: Reuse the item key from caller of read_one_block_group() btrfs: block-group: Refactor btrfs_read_block_groups() btrfs: document extent buffer locking btrfs: access eb::blocking_writers according to ACCESS_ONCE policies btrfs: set blocking_writers directly, no increment or decrement btrfs: merge blocking_writers branches in btrfs_tree_read_lock btrfs: drop incompat bit for raid1c34 after last block group is gone btrfs: add incompat for raid1 with 3, 4 copies btrfs: add support for 4-copy replication (raid1c4) btrfs: add support for 3-copy replication (raid1c3) ...
This commit is contained in:
Коммит
97d0bf96a0
|
@ -5,6 +5,8 @@ config BTRFS_FS
|
|||
select CRYPTO
|
||||
select CRYPTO_CRC32C
|
||||
select LIBCRC32C
|
||||
select CRYPTO_XXHASH
|
||||
select CRYPTO_SHA256
|
||||
select ZLIB_INFLATE
|
||||
select ZLIB_DEFLATE
|
||||
select LZO_COMPRESS
|
||||
|
|
|
@ -53,24 +53,12 @@ struct btrfs_workqueue {
|
|||
struct __btrfs_workqueue *high;
|
||||
};
|
||||
|
||||
static void normal_work_helper(struct btrfs_work *work);
|
||||
|
||||
#define BTRFS_WORK_HELPER(name) \
|
||||
noinline_for_stack void btrfs_##name(struct work_struct *arg) \
|
||||
{ \
|
||||
struct btrfs_work *work = container_of(arg, struct btrfs_work, \
|
||||
normal_work); \
|
||||
normal_work_helper(work); \
|
||||
}
|
||||
|
||||
struct btrfs_fs_info *
|
||||
btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
|
||||
struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
|
||||
{
|
||||
return wq->fs_info;
|
||||
}
|
||||
|
||||
struct btrfs_fs_info *
|
||||
btrfs_work_owner(const struct btrfs_work *work)
|
||||
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
|
||||
{
|
||||
return work->wq->fs_info;
|
||||
}
|
||||
|
@ -89,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
|
|||
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
|
||||
}
|
||||
|
||||
BTRFS_WORK_HELPER(worker_helper);
|
||||
BTRFS_WORK_HELPER(delalloc_helper);
|
||||
BTRFS_WORK_HELPER(flush_delalloc_helper);
|
||||
BTRFS_WORK_HELPER(cache_helper);
|
||||
BTRFS_WORK_HELPER(submit_helper);
|
||||
BTRFS_WORK_HELPER(fixup_helper);
|
||||
BTRFS_WORK_HELPER(endio_helper);
|
||||
BTRFS_WORK_HELPER(endio_meta_helper);
|
||||
BTRFS_WORK_HELPER(endio_meta_write_helper);
|
||||
BTRFS_WORK_HELPER(endio_raid56_helper);
|
||||
BTRFS_WORK_HELPER(endio_repair_helper);
|
||||
BTRFS_WORK_HELPER(rmw_helper);
|
||||
BTRFS_WORK_HELPER(endio_write_helper);
|
||||
BTRFS_WORK_HELPER(freespace_write_helper);
|
||||
BTRFS_WORK_HELPER(delayed_meta_helper);
|
||||
BTRFS_WORK_HELPER(readahead_helper);
|
||||
BTRFS_WORK_HELPER(qgroup_rescan_helper);
|
||||
BTRFS_WORK_HELPER(extent_refs_helper);
|
||||
BTRFS_WORK_HELPER(scrub_helper);
|
||||
BTRFS_WORK_HELPER(scrubwrc_helper);
|
||||
BTRFS_WORK_HELPER(scrubnc_helper);
|
||||
BTRFS_WORK_HELPER(scrubparity_helper);
|
||||
|
||||
static struct __btrfs_workqueue *
|
||||
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
|
||||
unsigned int flags, int limit_active, int thresh)
|
||||
|
@ -252,16 +217,16 @@ out:
|
|||
}
|
||||
}
|
||||
|
||||
static void run_ordered_work(struct __btrfs_workqueue *wq)
|
||||
static void run_ordered_work(struct __btrfs_workqueue *wq,
|
||||
struct btrfs_work *self)
|
||||
{
|
||||
struct list_head *list = &wq->ordered_list;
|
||||
struct btrfs_work *work;
|
||||
spinlock_t *lock = &wq->list_lock;
|
||||
unsigned long flags;
|
||||
bool free_self = false;
|
||||
|
||||
while (1) {
|
||||
void *wtag;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
if (list_empty(list))
|
||||
break;
|
||||
|
@ -287,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
|
|||
list_del(&work->ordered_list);
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
|
||||
/*
|
||||
* We don't want to call the ordered free functions with the
|
||||
* lock held though. Save the work as tag for the trace event,
|
||||
* because the callback could free the structure.
|
||||
*/
|
||||
wtag = work;
|
||||
work->ordered_free(work);
|
||||
trace_btrfs_all_work_done(wq->fs_info, wtag);
|
||||
if (work == self) {
|
||||
/*
|
||||
* This is the work item that the worker is currently
|
||||
* executing.
|
||||
*
|
||||
* The kernel workqueue code guarantees non-reentrancy
|
||||
* of work items. I.e., if a work item with the same
|
||||
* address and work function is queued twice, the second
|
||||
* execution is blocked until the first one finishes. A
|
||||
* work item may be freed and recycled with the same
|
||||
* work function; the workqueue code assumes that the
|
||||
* original work item cannot depend on the recycled work
|
||||
* item in that case (see find_worker_executing_work()).
|
||||
*
|
||||
* Note that different types of Btrfs work can depend on
|
||||
* each other, and one type of work on one Btrfs
|
||||
* filesystem may even depend on the same type of work
|
||||
* on another Btrfs filesystem via, e.g., a loop device.
|
||||
* Therefore, we must not allow the current work item to
|
||||
* be recycled until we are really done, otherwise we
|
||||
* break the above assumption and can deadlock.
|
||||
*/
|
||||
free_self = true;
|
||||
} else {
|
||||
/*
|
||||
* We don't want to call the ordered free functions with
|
||||
* the lock held.
|
||||
*/
|
||||
work->ordered_free(work);
|
||||
/* NB: work must not be dereferenced past this point. */
|
||||
trace_btrfs_all_work_done(wq->fs_info, work);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
|
||||
if (free_self) {
|
||||
self->ordered_free(self);
|
||||
/* NB: self must not be dereferenced past this point. */
|
||||
trace_btrfs_all_work_done(wq->fs_info, self);
|
||||
}
|
||||
}
|
||||
|
||||
static void normal_work_helper(struct btrfs_work *work)
|
||||
static void btrfs_work_helper(struct work_struct *normal_work)
|
||||
{
|
||||
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
|
||||
normal_work);
|
||||
struct __btrfs_workqueue *wq;
|
||||
void *wtag;
|
||||
int need_order = 0;
|
||||
|
||||
/*
|
||||
|
@ -316,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work)
|
|||
if (work->ordered_func)
|
||||
need_order = 1;
|
||||
wq = work->wq;
|
||||
/* Safe for tracepoints in case work gets freed by the callback */
|
||||
wtag = work;
|
||||
|
||||
trace_btrfs_work_sched(work);
|
||||
thresh_exec_hook(wq);
|
||||
work->func(work);
|
||||
if (need_order) {
|
||||
set_bit(WORK_DONE_BIT, &work->flags);
|
||||
run_ordered_work(wq);
|
||||
run_ordered_work(wq, work);
|
||||
} else {
|
||||
/* NB: work must not be dereferenced past this point. */
|
||||
trace_btrfs_all_work_done(wq->fs_info, work);
|
||||
}
|
||||
if (!need_order)
|
||||
trace_btrfs_all_work_done(wq->fs_info, wtag);
|
||||
}
|
||||
|
||||
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
|
||||
btrfs_func_t func,
|
||||
btrfs_func_t ordered_func,
|
||||
btrfs_func_t ordered_free)
|
||||
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
|
||||
btrfs_func_t ordered_func, btrfs_func_t ordered_free)
|
||||
{
|
||||
work->func = func;
|
||||
work->ordered_func = ordered_func;
|
||||
work->ordered_free = ordered_free;
|
||||
INIT_WORK(&work->normal_work, uniq_func);
|
||||
INIT_WORK(&work->normal_work, btrfs_work_helper);
|
||||
INIT_LIST_HEAD(&work->ordered_list);
|
||||
work->flags = 0;
|
||||
}
|
||||
|
|
|
@ -29,49 +29,20 @@ struct btrfs_work {
|
|||
unsigned long flags;
|
||||
};
|
||||
|
||||
#define BTRFS_WORK_HELPER_PROTO(name) \
|
||||
void btrfs_##name(struct work_struct *arg)
|
||||
|
||||
BTRFS_WORK_HELPER_PROTO(worker_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(delalloc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(cache_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(submit_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(fixup_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(rmw_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(endio_write_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(readahead_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrub_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
|
||||
|
||||
|
||||
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
|
||||
const char *name,
|
||||
unsigned int flags,
|
||||
int limit_active,
|
||||
int thresh);
|
||||
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
|
||||
btrfs_func_t func,
|
||||
btrfs_func_t ordered_func,
|
||||
btrfs_func_t ordered_free);
|
||||
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
|
||||
btrfs_func_t ordered_func, btrfs_func_t ordered_free);
|
||||
void btrfs_queue_work(struct btrfs_workqueue *wq,
|
||||
struct btrfs_work *work);
|
||||
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
|
||||
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
|
||||
void btrfs_set_work_high_priority(struct btrfs_work *work);
|
||||
struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work);
|
||||
struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
|
||||
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
|
||||
struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
|
||||
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
|
||||
|
||||
#endif
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -34,7 +34,7 @@ struct btrfs_caching_control {
|
|||
struct mutex mutex;
|
||||
wait_queue_head_t wait;
|
||||
struct btrfs_work work;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
u64 progress;
|
||||
refcount_t count;
|
||||
};
|
||||
|
@ -42,14 +42,15 @@ struct btrfs_caching_control {
|
|||
/* Once caching_thread() finds this much free space, it will wake up waiters. */
|
||||
#define CACHING_CTL_WAKE_UP SZ_2M
|
||||
|
||||
struct btrfs_block_group_cache {
|
||||
struct btrfs_key key;
|
||||
struct btrfs_block_group_item item;
|
||||
struct btrfs_block_group {
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct inode *inode;
|
||||
spinlock_t lock;
|
||||
u64 start;
|
||||
u64 length;
|
||||
u64 pinned;
|
||||
u64 reserved;
|
||||
u64 used;
|
||||
u64 delalloc_bytes;
|
||||
u64 bytes_super;
|
||||
u64 flags;
|
||||
|
@ -159,7 +160,7 @@ struct btrfs_block_group_cache {
|
|||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static inline int btrfs_should_fragment_free_space(
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
|
||||
|
@ -170,29 +171,29 @@ static inline int btrfs_should_fragment_free_space(
|
|||
}
|
||||
#endif
|
||||
|
||||
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
|
||||
struct btrfs_block_group *btrfs_lookup_first_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr);
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_block_group *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr);
|
||||
struct btrfs_block_group_cache *btrfs_next_block_group(
|
||||
struct btrfs_block_group_cache *cache);
|
||||
void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
|
||||
struct btrfs_block_group *btrfs_next_block_group(
|
||||
struct btrfs_block_group *cache);
|
||||
void btrfs_get_block_group(struct btrfs_block_group *cache);
|
||||
void btrfs_put_block_group(struct btrfs_block_group *cache);
|
||||
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
|
||||
const u64 start);
|
||||
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
|
||||
void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
|
||||
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
|
||||
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
|
||||
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
|
||||
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
|
||||
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
|
||||
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
|
||||
u64 num_bytes);
|
||||
int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache);
|
||||
int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
|
||||
int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
|
||||
int btrfs_cache_block_group(struct btrfs_block_group *cache,
|
||||
int load_cache_only);
|
||||
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
|
||||
struct btrfs_caching_control *btrfs_get_caching_control(
|
||||
struct btrfs_block_group_cache *cache);
|
||||
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *cache);
|
||||
u64 add_new_free_space(struct btrfs_block_group *block_group,
|
||||
u64 start, u64 end);
|
||||
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
||||
struct btrfs_fs_info *fs_info,
|
||||
|
@ -200,21 +201,22 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
|||
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
||||
u64 group_start, struct extent_map *em);
|
||||
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
|
||||
void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
|
||||
int btrfs_read_block_groups(struct btrfs_fs_info *info);
|
||||
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
|
||||
u64 type, u64 chunk_offset, u64 size);
|
||||
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
|
||||
int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
|
||||
bool do_chunk_alloc);
|
||||
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
|
||||
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
|
||||
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
|
||||
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
|
||||
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, int alloc);
|
||||
int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
|
||||
u64 ram_bytes, u64 num_bytes, int delalloc);
|
||||
void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
|
||||
u64 num_bytes, int delalloc);
|
||||
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||
enum btrfs_chunk_alloc_enum force);
|
||||
|
@ -239,8 +241,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
|
|||
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
|
||||
}
|
||||
|
||||
static inline int btrfs_block_group_cache_done(
|
||||
struct btrfs_block_group_cache *cache)
|
||||
static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
|
||||
{
|
||||
smp_mb();
|
||||
return cache->cached == BTRFS_CACHE_FINISHED ||
|
||||
|
|
|
@ -63,9 +63,6 @@ struct btrfs_inode {
|
|||
/* held while logging the inode in tree-log.c */
|
||||
struct mutex log_mutex;
|
||||
|
||||
/* held while doing delalloc reservations */
|
||||
struct mutex delalloc_mutex;
|
||||
|
||||
/* used to order data wrt metadata */
|
||||
struct btrfs_ordered_inode_tree ordered_tree;
|
||||
|
||||
|
|
|
@ -29,6 +29,41 @@
|
|||
#include "extent_io.h"
|
||||
#include "extent_map.h"
|
||||
|
||||
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen);
|
||||
struct list_head *zlib_alloc_workspace(unsigned int level);
|
||||
void zlib_free_workspace(struct list_head *ws);
|
||||
struct list_head *zlib_get_workspace(unsigned int level);
|
||||
|
||||
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen);
|
||||
struct list_head *lzo_alloc_workspace(unsigned int level);
|
||||
void lzo_free_workspace(struct list_head *ws);
|
||||
|
||||
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out);
|
||||
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||
int zstd_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen);
|
||||
void zstd_init_workspace_manager(void);
|
||||
void zstd_cleanup_workspace_manager(void);
|
||||
struct list_head *zstd_alloc_workspace(unsigned int level);
|
||||
void zstd_free_workspace(struct list_head *ws);
|
||||
struct list_head *zstd_get_workspace(unsigned int level);
|
||||
void zstd_put_workspace(struct list_head *ws);
|
||||
|
||||
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
|
||||
|
||||
const char* btrfs_compress_type2str(enum btrfs_compression_type type)
|
||||
|
@ -39,6 +74,8 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
|
|||
case BTRFS_COMPRESS_ZSTD:
|
||||
case BTRFS_COMPRESS_NONE:
|
||||
return btrfs_compress_types[type];
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -60,6 +97,70 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int compression_compress_pages(int type, struct list_head *ws,
|
||||
struct address_space *mapping, u64 start, struct page **pages,
|
||||
unsigned long *out_pages, unsigned long *total_in,
|
||||
unsigned long *total_out)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_ZLIB:
|
||||
return zlib_compress_pages(ws, mapping, start, pages,
|
||||
out_pages, total_in, total_out);
|
||||
case BTRFS_COMPRESS_LZO:
|
||||
return lzo_compress_pages(ws, mapping, start, pages,
|
||||
out_pages, total_in, total_out);
|
||||
case BTRFS_COMPRESS_ZSTD:
|
||||
return zstd_compress_pages(ws, mapping, start, pages,
|
||||
out_pages, total_in, total_out);
|
||||
case BTRFS_COMPRESS_NONE:
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here. As a sane fallback, return what the
|
||||
* callers will understand as 'no compression happened'.
|
||||
*/
|
||||
return -E2BIG;
|
||||
}
|
||||
}
|
||||
|
||||
static int compression_decompress_bio(int type, struct list_head *ws,
|
||||
struct compressed_bio *cb)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
|
||||
case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
|
||||
case BTRFS_COMPRESS_NONE:
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static int compression_decompress(int type, struct list_head *ws,
|
||||
unsigned char *data_in, struct page *dest_page,
|
||||
unsigned long start_byte, size_t srclen, size_t destlen)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
case BTRFS_COMPRESS_NONE:
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static int btrfs_decompress_bio(struct compressed_bio *cb);
|
||||
|
||||
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
|
||||
|
@ -311,7 +412,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
unsigned long compressed_len,
|
||||
struct page **compressed_pages,
|
||||
unsigned long nr_pages,
|
||||
unsigned int write_flags)
|
||||
unsigned int write_flags,
|
||||
struct cgroup_subsys_state *blkcg_css)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct bio *bio = NULL;
|
||||
|
@ -320,7 +422,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
int pg_index = 0;
|
||||
struct page *page;
|
||||
u64 first_byte = disk_start;
|
||||
struct block_device *bdev;
|
||||
blk_status_t ret;
|
||||
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
|
||||
|
||||
|
@ -339,13 +440,15 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
cb->orig_bio = NULL;
|
||||
cb->nr_pages = nr_pages;
|
||||
|
||||
bdev = fs_info->fs_devices->latest_bdev;
|
||||
|
||||
bio = btrfs_bio_alloc(first_byte);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | write_flags;
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
|
||||
if (blkcg_css) {
|
||||
bio->bi_opf |= REQ_CGROUP_PUNT;
|
||||
bio_associate_blkg_from_css(bio, blkcg_css);
|
||||
}
|
||||
refcount_set(&cb->pending_bios, 1);
|
||||
|
||||
/* create and submit bios for the compressed pages */
|
||||
|
@ -378,14 +481,13 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(fs_info, bio, 0, 1);
|
||||
ret = btrfs_map_bio(fs_info, bio, 0);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
bio = btrfs_bio_alloc(first_byte);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | write_flags;
|
||||
bio->bi_private = cb;
|
||||
bio->bi_end_io = end_compressed_bio_write;
|
||||
|
@ -409,7 +511,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(fs_info, bio, 0, 1);
|
||||
ret = btrfs_map_bio(fs_info, bio, 0);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
|
@ -553,7 +655,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
unsigned long nr_pages;
|
||||
unsigned long pg_index;
|
||||
struct page *page;
|
||||
struct block_device *bdev;
|
||||
struct bio *comp_bio;
|
||||
u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
|
||||
u64 em_len;
|
||||
|
@ -604,8 +705,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
if (!cb->compressed_pages)
|
||||
goto fail1;
|
||||
|
||||
bdev = fs_info->fs_devices->latest_bdev;
|
||||
|
||||
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
|
||||
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
|
||||
__GFP_HIGHMEM);
|
||||
|
@ -624,7 +723,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
cb->len = bio->bi_iter.bi_size;
|
||||
|
||||
comp_bio = btrfs_bio_alloc(cur_disk_byte);
|
||||
bio_set_dev(comp_bio, bdev);
|
||||
comp_bio->bi_opf = REQ_OP_READ;
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
@ -668,14 +766,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
fs_info->sectorsize);
|
||||
sums += csum_size * nr_sectors;
|
||||
|
||||
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
|
||||
if (ret) {
|
||||
comp_bio->bi_status = ret;
|
||||
bio_endio(comp_bio);
|
||||
}
|
||||
|
||||
comp_bio = btrfs_bio_alloc(cur_disk_byte);
|
||||
bio_set_dev(comp_bio, bdev);
|
||||
comp_bio->bi_opf = REQ_OP_READ;
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
@ -693,7 +790,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
BUG_ON(ret); /* -ENOMEM */
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
|
||||
if (ret) {
|
||||
comp_bio->bi_status = ret;
|
||||
bio_endio(comp_bio);
|
||||
|
@ -764,26 +861,6 @@ struct heuristic_ws {
|
|||
|
||||
static struct workspace_manager heuristic_wsm;
|
||||
|
||||
static void heuristic_init_workspace_manager(void)
|
||||
{
|
||||
btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
|
||||
}
|
||||
|
||||
static void heuristic_cleanup_workspace_manager(void)
|
||||
{
|
||||
btrfs_cleanup_workspace_manager(&heuristic_wsm);
|
||||
}
|
||||
|
||||
static struct list_head *heuristic_get_workspace(unsigned int level)
|
||||
{
|
||||
return btrfs_get_workspace(&heuristic_wsm, level);
|
||||
}
|
||||
|
||||
static void heuristic_put_workspace(struct list_head *ws)
|
||||
{
|
||||
btrfs_put_workspace(&heuristic_wsm, ws);
|
||||
}
|
||||
|
||||
static void free_heuristic_ws(struct list_head *ws)
|
||||
{
|
||||
struct heuristic_ws *workspace;
|
||||
|
@ -824,12 +901,7 @@ fail:
|
|||
}
|
||||
|
||||
const struct btrfs_compress_op btrfs_heuristic_compress = {
|
||||
.init_workspace_manager = heuristic_init_workspace_manager,
|
||||
.cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
|
||||
.get_workspace = heuristic_get_workspace,
|
||||
.put_workspace = heuristic_put_workspace,
|
||||
.alloc_workspace = alloc_heuristic_ws,
|
||||
.free_workspace = free_heuristic_ws,
|
||||
.workspace_manager = &heuristic_wsm,
|
||||
};
|
||||
|
||||
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
|
||||
|
@ -840,13 +912,44 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
|
|||
&btrfs_zstd_compress,
|
||||
};
|
||||
|
||||
void btrfs_init_workspace_manager(struct workspace_manager *wsm,
|
||||
const struct btrfs_compress_op *ops)
|
||||
static struct list_head *alloc_workspace(int type, unsigned int level)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
|
||||
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static void free_workspace(int type, struct list_head *ws)
|
||||
{
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
|
||||
case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static void btrfs_init_workspace_manager(int type)
|
||||
{
|
||||
struct workspace_manager *wsm;
|
||||
struct list_head *workspace;
|
||||
|
||||
wsm->ops = ops;
|
||||
|
||||
wsm = btrfs_compress_op[type]->workspace_manager;
|
||||
INIT_LIST_HEAD(&wsm->idle_ws);
|
||||
spin_lock_init(&wsm->ws_lock);
|
||||
atomic_set(&wsm->total_ws, 0);
|
||||
|
@ -856,7 +959,7 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
|
|||
* Preallocate one workspace for each compression type so we can
|
||||
* guarantee forward progress in the worst case
|
||||
*/
|
||||
workspace = wsm->ops->alloc_workspace(0);
|
||||
workspace = alloc_workspace(type, 0);
|
||||
if (IS_ERR(workspace)) {
|
||||
pr_warn(
|
||||
"BTRFS: cannot preallocate compression workspace, will try later\n");
|
||||
|
@ -867,14 +970,16 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
|
|||
}
|
||||
}
|
||||
|
||||
void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
|
||||
static void btrfs_cleanup_workspace_manager(int type)
|
||||
{
|
||||
struct workspace_manager *wsman;
|
||||
struct list_head *ws;
|
||||
|
||||
wsman = btrfs_compress_op[type]->workspace_manager;
|
||||
while (!list_empty(&wsman->idle_ws)) {
|
||||
ws = wsman->idle_ws.next;
|
||||
list_del(ws);
|
||||
wsman->ops->free_workspace(ws);
|
||||
free_workspace(type, ws);
|
||||
atomic_dec(&wsman->total_ws);
|
||||
}
|
||||
}
|
||||
|
@ -885,9 +990,9 @@ void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
|
|||
* Preallocation makes a forward progress guarantees and we do not return
|
||||
* errors.
|
||||
*/
|
||||
struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
|
||||
unsigned int level)
|
||||
struct list_head *btrfs_get_workspace(int type, unsigned int level)
|
||||
{
|
||||
struct workspace_manager *wsm;
|
||||
struct list_head *workspace;
|
||||
int cpus = num_online_cpus();
|
||||
unsigned nofs_flag;
|
||||
|
@ -897,6 +1002,7 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
|
|||
wait_queue_head_t *ws_wait;
|
||||
int *free_ws;
|
||||
|
||||
wsm = btrfs_compress_op[type]->workspace_manager;
|
||||
idle_ws = &wsm->idle_ws;
|
||||
ws_lock = &wsm->ws_lock;
|
||||
total_ws = &wsm->total_ws;
|
||||
|
@ -932,7 +1038,7 @@ again:
|
|||
* context of btrfs_compress_bio/btrfs_compress_pages
|
||||
*/
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
workspace = wsm->ops->alloc_workspace(level);
|
||||
workspace = alloc_workspace(type, level);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
|
||||
if (IS_ERR(workspace)) {
|
||||
|
@ -965,21 +1071,34 @@ again:
|
|||
|
||||
static struct list_head *get_workspace(int type, int level)
|
||||
{
|
||||
return btrfs_compress_op[type]->get_workspace(level);
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
|
||||
case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
|
||||
case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* put a workspace struct back on the list or free it if we have enough
|
||||
* idle ones sitting around
|
||||
*/
|
||||
void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
|
||||
void btrfs_put_workspace(int type, struct list_head *ws)
|
||||
{
|
||||
struct workspace_manager *wsm;
|
||||
struct list_head *idle_ws;
|
||||
spinlock_t *ws_lock;
|
||||
atomic_t *total_ws;
|
||||
wait_queue_head_t *ws_wait;
|
||||
int *free_ws;
|
||||
|
||||
wsm = btrfs_compress_op[type]->workspace_manager;
|
||||
idle_ws = &wsm->idle_ws;
|
||||
ws_lock = &wsm->ws_lock;
|
||||
total_ws = &wsm->total_ws;
|
||||
|
@ -995,7 +1114,7 @@ void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
|
|||
}
|
||||
spin_unlock(ws_lock);
|
||||
|
||||
wsm->ops->free_workspace(ws);
|
||||
free_workspace(type, ws);
|
||||
atomic_dec(total_ws);
|
||||
wake:
|
||||
cond_wake_up(ws_wait);
|
||||
|
@ -1003,7 +1122,18 @@ wake:
|
|||
|
||||
static void put_workspace(int type, struct list_head *ws)
|
||||
{
|
||||
return btrfs_compress_op[type]->put_workspace(ws);
|
||||
switch (type) {
|
||||
case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
|
||||
case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
|
||||
case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
|
||||
case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
|
||||
default:
|
||||
/*
|
||||
* This can't happen, the type is validated several times
|
||||
* before we get here.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1042,10 +1172,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
|||
|
||||
level = btrfs_compress_set_level(type, level);
|
||||
workspace = get_workspace(type, level);
|
||||
ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
|
||||
start, pages,
|
||||
out_pages,
|
||||
total_in, total_out);
|
||||
ret = compression_compress_pages(type, workspace, mapping, start, pages,
|
||||
out_pages, total_in, total_out);
|
||||
put_workspace(type, workspace);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1071,7 +1199,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
|
|||
int type = cb->compress_type;
|
||||
|
||||
workspace = get_workspace(type, 0);
|
||||
ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
|
||||
ret = compression_decompress_bio(type, workspace, cb);
|
||||
put_workspace(type, workspace);
|
||||
|
||||
return ret;
|
||||
|
@ -1089,9 +1217,8 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
|
|||
int ret;
|
||||
|
||||
workspace = get_workspace(type, 0);
|
||||
ret = btrfs_compress_op[type]->decompress(workspace, data_in,
|
||||
dest_page, start_byte,
|
||||
srclen, destlen);
|
||||
ret = compression_decompress(type, workspace, data_in, dest_page,
|
||||
start_byte, srclen, destlen);
|
||||
put_workspace(type, workspace);
|
||||
|
||||
return ret;
|
||||
|
@ -1099,18 +1226,18 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
|
|||
|
||||
void __init btrfs_init_compress(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
|
||||
btrfs_compress_op[i]->init_workspace_manager();
|
||||
btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
|
||||
btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
|
||||
btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
|
||||
zstd_init_workspace_manager();
|
||||
}
|
||||
|
||||
void __cold btrfs_exit_compress(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
|
||||
btrfs_compress_op[i]->cleanup_workspace_manager();
|
||||
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
|
||||
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
|
||||
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
|
||||
zstd_cleanup_workspace_manager();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
unsigned long compressed_len,
|
||||
struct page **compressed_pages,
|
||||
unsigned long nr_pages,
|
||||
unsigned int write_flags);
|
||||
unsigned int write_flags,
|
||||
struct cgroup_subsys_state *blkcg_css);
|
||||
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags);
|
||||
|
||||
|
@ -104,11 +105,10 @@ enum btrfs_compression_type {
|
|||
BTRFS_COMPRESS_ZLIB = 1,
|
||||
BTRFS_COMPRESS_LZO = 2,
|
||||
BTRFS_COMPRESS_ZSTD = 3,
|
||||
BTRFS_COMPRESS_TYPES = 3,
|
||||
BTRFS_NR_COMPRESS_TYPES = 4,
|
||||
};
|
||||
|
||||
struct workspace_manager {
|
||||
const struct btrfs_compress_op *ops;
|
||||
struct list_head idle_ws;
|
||||
spinlock_t ws_lock;
|
||||
/* Number of free workspaces */
|
||||
|
@ -119,50 +119,18 @@ struct workspace_manager {
|
|||
wait_queue_head_t ws_wait;
|
||||
};
|
||||
|
||||
void btrfs_init_workspace_manager(struct workspace_manager *wsm,
|
||||
const struct btrfs_compress_op *ops);
|
||||
struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
|
||||
unsigned int level);
|
||||
void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
|
||||
void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm);
|
||||
struct list_head *btrfs_get_workspace(int type, unsigned int level);
|
||||
void btrfs_put_workspace(int type, struct list_head *ws);
|
||||
|
||||
struct btrfs_compress_op {
|
||||
void (*init_workspace_manager)(void);
|
||||
|
||||
void (*cleanup_workspace_manager)(void);
|
||||
|
||||
struct list_head *(*get_workspace)(unsigned int level);
|
||||
|
||||
void (*put_workspace)(struct list_head *ws);
|
||||
|
||||
struct list_head *(*alloc_workspace)(unsigned int level);
|
||||
|
||||
void (*free_workspace)(struct list_head *workspace);
|
||||
|
||||
int (*compress_pages)(struct list_head *workspace,
|
||||
struct address_space *mapping,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out);
|
||||
|
||||
int (*decompress_bio)(struct list_head *workspace,
|
||||
struct compressed_bio *cb);
|
||||
|
||||
int (*decompress)(struct list_head *workspace,
|
||||
unsigned char *data_in,
|
||||
struct page *dest_page,
|
||||
unsigned long start_byte,
|
||||
size_t srclen, size_t destlen);
|
||||
|
||||
struct workspace_manager *workspace_manager;
|
||||
/* Maximum level supported by the compression algorithm */
|
||||
unsigned int max_level;
|
||||
unsigned int default_level;
|
||||
};
|
||||
|
||||
/* The heuristic workspaces are managed via the 0th workspace manager */
|
||||
#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
|
||||
#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
|
||||
|
||||
extern const struct btrfs_compress_op btrfs_heuristic_compress;
|
||||
extern const struct btrfs_compress_op btrfs_zlib_compress;
|
||||
|
|
295
fs/btrfs/ctree.c
295
fs/btrfs/ctree.c
|
@ -32,8 +32,13 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
|
|||
static const struct btrfs_csums {
|
||||
u16 size;
|
||||
const char *name;
|
||||
const char *driver;
|
||||
} btrfs_csums[] = {
|
||||
[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
|
||||
[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
|
||||
[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
|
||||
[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
|
||||
.driver = "blake2b-256" },
|
||||
};
|
||||
|
||||
int btrfs_super_csum_size(const struct btrfs_super_block *s)
|
||||
|
@ -51,36 +56,27 @@ const char *btrfs_super_csum_name(u16 csum_type)
|
|||
return btrfs_csums[csum_type].name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return driver name if defined, otherwise the name that's also a valid driver
|
||||
* name
|
||||
*/
|
||||
const char *btrfs_super_csum_driver(u16 csum_type)
|
||||
{
|
||||
/* csum type is validated at mount time */
|
||||
return btrfs_csums[csum_type].driver ?:
|
||||
btrfs_csums[csum_type].name;
|
||||
}
|
||||
|
||||
size_t __const btrfs_get_num_csums(void)
|
||||
{
|
||||
return ARRAY_SIZE(btrfs_csums);
|
||||
}
|
||||
|
||||
struct btrfs_path *btrfs_alloc_path(void)
|
||||
{
|
||||
return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* set all locked nodes in the path to blocking locks. This should
|
||||
* be done before scheduling
|
||||
*/
|
||||
noinline void btrfs_set_path_blocking(struct btrfs_path *p)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
|
||||
if (!p->nodes[i] || !p->locks[i])
|
||||
continue;
|
||||
/*
|
||||
* If we currently have a spinning reader or writer lock this
|
||||
* will bump the count of blocking holders and drop the
|
||||
* spinlock.
|
||||
*/
|
||||
if (p->locks[i] == BTRFS_READ_LOCK) {
|
||||
btrfs_set_lock_blocking_read(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
|
||||
} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
|
||||
btrfs_set_lock_blocking_write(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* this also releases the path */
|
||||
void btrfs_free_path(struct btrfs_path *p)
|
||||
{
|
||||
|
@ -1125,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
|
||||
parent_start = buf->start;
|
||||
|
||||
extent_buffer_get(cow);
|
||||
atomic_inc(&cow->refs);
|
||||
ret = tree_mod_log_insert_root(root->node, cow, 1);
|
||||
BUG_ON(ret < 0);
|
||||
rcu_assign_pointer(root->node, cow);
|
||||
|
@ -1563,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk,
|
|||
/*
|
||||
* same as comp_keys only with two btrfs_key's
|
||||
*/
|
||||
int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
|
||||
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
|
||||
{
|
||||
if (k1->objectid > k2->objectid)
|
||||
return 1;
|
||||
|
@ -2036,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
|
|||
/* update the path */
|
||||
if (left) {
|
||||
if (btrfs_header_nritems(left) > orig_slot) {
|
||||
extent_buffer_get(left);
|
||||
atomic_inc(&left->refs);
|
||||
/* left was locked after cow */
|
||||
path->nodes[level] = left;
|
||||
path->slots[level + 1] -= 1;
|
||||
|
@ -2378,32 +2374,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This releases any locks held in the path starting at level and
|
||||
* going all the way up to the root.
|
||||
*
|
||||
* btrfs_search_slot will keep the lock held on higher nodes in a few
|
||||
* corner cases, such as COW of the block at slot zero in the node. This
|
||||
* ignores those rules, and it should only be called when there are no
|
||||
* more updates to be done higher up in the tree.
|
||||
*/
|
||||
noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (path->keep_locks)
|
||||
return;
|
||||
|
||||
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
|
||||
if (!path->nodes[i])
|
||||
continue;
|
||||
if (!path->locks[i])
|
||||
continue;
|
||||
btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
|
||||
path->locks[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function for btrfs_search_slot. The goal is to find a block
|
||||
* in cache without setting the path to blocking. If we find the block
|
||||
|
@ -2652,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
|
|||
|
||||
} else {
|
||||
b = root->commit_root;
|
||||
extent_buffer_get(b);
|
||||
atomic_inc(&b->refs);
|
||||
}
|
||||
level = btrfs_header_level(b);
|
||||
/*
|
||||
|
@ -2785,12 +2755,10 @@ again:
|
|||
}
|
||||
|
||||
while (b) {
|
||||
int dec = 0;
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
|
||||
/*
|
||||
* setup the path here so we can release it under lock
|
||||
* contention with the cow code
|
||||
*/
|
||||
if (cow) {
|
||||
bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
|
||||
|
||||
|
@ -2861,73 +2829,7 @@ cow_done:
|
|||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
if (level != 0) {
|
||||
int dec = 0;
|
||||
if (ret && slot > 0) {
|
||||
dec = 1;
|
||||
slot -= 1;
|
||||
}
|
||||
p->slots[level] = slot;
|
||||
err = setup_nodes_for_search(trans, root, p, b, level,
|
||||
ins_len, &write_lock_level);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
b = p->nodes[level];
|
||||
slot = p->slots[level];
|
||||
|
||||
/*
|
||||
* slot 0 is special, if we change the key
|
||||
* we have to update the parent pointer
|
||||
* which means we must have a write lock
|
||||
* on the parent
|
||||
*/
|
||||
if (slot == 0 && ins_len &&
|
||||
write_lock_level < level + 1) {
|
||||
write_lock_level = level + 1;
|
||||
btrfs_release_path(p);
|
||||
goto again;
|
||||
}
|
||||
|
||||
unlock_up(p, level, lowest_unlock,
|
||||
min_write_lock_level, &write_lock_level);
|
||||
|
||||
if (level == lowest_level) {
|
||||
if (dec)
|
||||
p->slots[level]++;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = read_block_for_search(root, p, &b, level,
|
||||
slot, key);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!p->skip_locking) {
|
||||
level = btrfs_header_level(b);
|
||||
if (level <= write_lock_level) {
|
||||
if (!btrfs_try_tree_write_lock(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_lock(b);
|
||||
}
|
||||
p->locks[level] = BTRFS_WRITE_LOCK;
|
||||
} else {
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
}
|
||||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
}
|
||||
p->nodes[level] = b;
|
||||
}
|
||||
} else {
|
||||
if (level == 0) {
|
||||
p->slots[level] = slot;
|
||||
if (ins_len > 0 &&
|
||||
btrfs_leaf_free_space(b) < ins_len) {
|
||||
|
@ -2952,6 +2854,67 @@ cow_done:
|
|||
min_write_lock_level, NULL);
|
||||
goto done;
|
||||
}
|
||||
if (ret && slot > 0) {
|
||||
dec = 1;
|
||||
slot--;
|
||||
}
|
||||
p->slots[level] = slot;
|
||||
err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
|
||||
&write_lock_level);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
b = p->nodes[level];
|
||||
slot = p->slots[level];
|
||||
|
||||
/*
|
||||
* Slot 0 is special, if we change the key we have to update
|
||||
* the parent pointer which means we must have a write lock on
|
||||
* the parent
|
||||
*/
|
||||
if (slot == 0 && ins_len && write_lock_level < level + 1) {
|
||||
write_lock_level = level + 1;
|
||||
btrfs_release_path(p);
|
||||
goto again;
|
||||
}
|
||||
|
||||
unlock_up(p, level, lowest_unlock, min_write_lock_level,
|
||||
&write_lock_level);
|
||||
|
||||
if (level == lowest_level) {
|
||||
if (dec)
|
||||
p->slots[level]++;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = read_block_for_search(root, p, &b, level, slot, key);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!p->skip_locking) {
|
||||
level = btrfs_header_level(b);
|
||||
if (level <= write_lock_level) {
|
||||
if (!btrfs_try_tree_write_lock(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_lock(b);
|
||||
}
|
||||
p->locks[level] = BTRFS_WRITE_LOCK;
|
||||
} else {
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
}
|
||||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
}
|
||||
p->nodes[level] = b;
|
||||
}
|
||||
}
|
||||
ret = 1;
|
||||
done:
|
||||
|
@ -3008,6 +2971,8 @@ again:
|
|||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
|
||||
while (b) {
|
||||
int dec = 0;
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
p->nodes[level] = b;
|
||||
|
||||
|
@ -3028,47 +2993,45 @@ again:
|
|||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
if (level != 0) {
|
||||
int dec = 0;
|
||||
if (ret && slot > 0) {
|
||||
dec = 1;
|
||||
slot -= 1;
|
||||
}
|
||||
p->slots[level] = slot;
|
||||
unlock_up(p, level, lowest_unlock, 0, NULL);
|
||||
|
||||
if (level == lowest_level) {
|
||||
if (dec)
|
||||
p->slots[level]++;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = read_block_for_search(root, p, &b, level,
|
||||
slot, key);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
}
|
||||
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
|
||||
if (!b) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
p->nodes[level] = b;
|
||||
} else {
|
||||
if (level == 0) {
|
||||
p->slots[level] = slot;
|
||||
unlock_up(p, level, lowest_unlock, 0, NULL);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (ret && slot > 0) {
|
||||
dec = 1;
|
||||
slot--;
|
||||
}
|
||||
p->slots[level] = slot;
|
||||
unlock_up(p, level, lowest_unlock, 0, NULL);
|
||||
|
||||
if (level == lowest_level) {
|
||||
if (dec)
|
||||
p->slots[level]++;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = read_block_for_search(root, p, &b, level, slot, key);
|
||||
if (err == -EAGAIN)
|
||||
goto again;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
}
|
||||
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
|
||||
if (!b) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
p->nodes[level] = b;
|
||||
}
|
||||
ret = 1;
|
||||
done:
|
||||
|
@ -3433,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
|||
free_extent_buffer(old);
|
||||
|
||||
add_root_to_dirty_list(root);
|
||||
extent_buffer_get(c);
|
||||
atomic_inc(&c->refs);
|
||||
path->nodes[level] = c;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->slots[level] = 0;
|
||||
|
@ -4966,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
|
|||
|
||||
root_sub_used(root, leaf->len);
|
||||
|
||||
extent_buffer_get(leaf);
|
||||
atomic_inc(&leaf->refs);
|
||||
btrfs_free_tree_block(trans, root, leaf, 0, 1);
|
||||
free_extent_buffer_stale(leaf);
|
||||
}
|
||||
|
@ -5047,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
* for possible call to del_ptr below
|
||||
*/
|
||||
slot = path->slots[1];
|
||||
extent_buffer_get(leaf);
|
||||
atomic_inc(&leaf->refs);
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
wret = push_leaf_left(trans, root, path, 1, 1,
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/dynamic_debug.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/crc32c.h>
|
||||
#include "extent-io-tree.h"
|
||||
#include "extent_io.h"
|
||||
#include "extent_map.h"
|
||||
#include "async-thread.h"
|
||||
|
@ -38,7 +39,7 @@ struct btrfs_transaction;
|
|||
struct btrfs_pending_snapshot;
|
||||
struct btrfs_delayed_ref_root;
|
||||
struct btrfs_space_info;
|
||||
struct btrfs_block_group_cache;
|
||||
struct btrfs_block_group;
|
||||
extern struct kmem_cache *btrfs_trans_handle_cachep;
|
||||
extern struct kmem_cache *btrfs_bit_radix_cachep;
|
||||
extern struct kmem_cache *btrfs_path_cachep;
|
||||
|
@ -56,9 +57,9 @@ struct btrfs_ref;
|
|||
* filesystem data as well that can be used to read data in order to repair
|
||||
* read errors on other disks.
|
||||
*
|
||||
* Current value is derived from RAID1 with 2 copies.
|
||||
* Current value is derived from RAID1C4 with 4 copies.
|
||||
*/
|
||||
#define BTRFS_MAX_MIRRORS (2 + 1)
|
||||
#define BTRFS_MAX_MIRRORS (4 + 1)
|
||||
|
||||
#define BTRFS_MAX_LEVEL 8
|
||||
|
||||
|
@ -291,7 +292,8 @@ struct btrfs_super_block {
|
|||
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
|
||||
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
|
||||
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
|
||||
BTRFS_FEATURE_INCOMPAT_METADATA_UUID)
|
||||
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
|
||||
BTRFS_FEATURE_INCOMPAT_RAID1C34)
|
||||
|
||||
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
|
||||
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
|
||||
|
@ -413,7 +415,7 @@ struct btrfs_free_cluster {
|
|||
/* We did a full search and couldn't create a cluster */
|
||||
bool fragmented;
|
||||
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
/*
|
||||
* when a cluster is allocated from a block group, we put the
|
||||
* cluster onto a list in the block group so that it can
|
||||
|
@ -476,8 +478,8 @@ struct btrfs_swapfile_pin {
|
|||
void *ptr;
|
||||
struct inode *inode;
|
||||
/*
|
||||
* If true, ptr points to a struct btrfs_block_group_cache. Otherwise,
|
||||
* ptr points to a struct btrfs_device.
|
||||
* If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
|
||||
* points to a struct btrfs_device.
|
||||
*/
|
||||
bool is_block_group;
|
||||
};
|
||||
|
@ -722,7 +724,6 @@ struct btrfs_fs_info {
|
|||
struct btrfs_workqueue *endio_meta_write_workers;
|
||||
struct btrfs_workqueue *endio_write_workers;
|
||||
struct btrfs_workqueue *endio_freespace_worker;
|
||||
struct btrfs_workqueue *submit_workers;
|
||||
struct btrfs_workqueue *caching_workers;
|
||||
struct btrfs_workqueue *readahead_workers;
|
||||
|
||||
|
@ -1519,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
|
|||
}
|
||||
|
||||
/* struct btrfs_block_group_item */
|
||||
BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
|
||||
used, 64);
|
||||
BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
|
||||
BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item,
|
||||
used, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
|
||||
struct btrfs_block_group_item, chunk_objectid, 64);
|
||||
|
||||
BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
|
||||
BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
|
||||
struct btrfs_block_group_item, chunk_objectid, 64);
|
||||
BTRFS_SETGET_FUNCS(disk_block_group_flags,
|
||||
BTRFS_SETGET_FUNCS(block_group_flags,
|
||||
struct btrfs_block_group_item, flags, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(block_group_flags,
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
|
||||
struct btrfs_block_group_item, flags, 64);
|
||||
|
||||
/* struct btrfs_free_space_info */
|
||||
|
@ -2163,6 +2164,9 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
|
|||
|
||||
int btrfs_super_csum_size(const struct btrfs_super_block *s);
|
||||
const char *btrfs_super_csum_name(u16 csum_type);
|
||||
const char *btrfs_super_csum_driver(u16 csum_type);
|
||||
size_t __const btrfs_get_num_csums(void);
|
||||
|
||||
|
||||
/*
|
||||
* The leaf data grows from end-to-front in the node.
|
||||
|
@ -2397,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
|
|||
|
||||
int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
|
||||
u64 start, u64 num_bytes);
|
||||
void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
|
||||
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
unsigned long count);
|
||||
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
|
||||
|
@ -2453,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_ref *generic_ref);
|
||||
|
||||
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
|
||||
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_get_block_group_trimming(struct btrfs_block_group *cache);
|
||||
void btrfs_put_block_group_trimming(struct btrfs_block_group *cache);
|
||||
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
|
||||
|
||||
enum btrfs_reserve_flush_enum {
|
||||
|
@ -2507,7 +2511,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
|
|||
/* ctree.c */
|
||||
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
|
||||
int level, int *slot);
|
||||
int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
|
||||
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
|
||||
int btrfs_previous_item(struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 min_objectid,
|
||||
int type);
|
||||
|
@ -2567,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
void btrfs_release_path(struct btrfs_path *p);
|
||||
struct btrfs_path *btrfs_alloc_path(void);
|
||||
void btrfs_free_path(struct btrfs_path *p);
|
||||
void btrfs_set_path_blocking(struct btrfs_path *p);
|
||||
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
|
||||
|
||||
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
struct btrfs_path *path, int slot, int nr);
|
||||
|
@ -2870,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode);
|
|||
int __init btrfs_init_cachep(void);
|
||||
void __cold btrfs_destroy_cachep(void);
|
||||
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
|
||||
struct btrfs_root *root, int *new,
|
||||
struct btrfs_path *path);
|
||||
struct btrfs_root *root, struct btrfs_path *path);
|
||||
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
|
||||
struct btrfs_root *root, int *was_new);
|
||||
struct btrfs_root *root);
|
||||
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
||||
struct page *page, size_t pg_offset,
|
||||
u64 start, u64 end, int create);
|
||||
|
@ -2909,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
|||
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
int btrfs_ioctl_get_supported_features(void __user *arg);
|
||||
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
|
||||
int btrfs_is_empty_uuid(u8 *uuid);
|
||||
int __pure btrfs_is_empty_uuid(u8 *uuid);
|
||||
int btrfs_defrag_file(struct inode *inode, struct file *file,
|
||||
struct btrfs_ioctl_defrag_range_args *range,
|
||||
u64 newer_than, unsigned long max_pages);
|
||||
|
@ -3143,7 +3144,7 @@ __cold
|
|||
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...);
|
||||
|
||||
const char *btrfs_decode_error(int errno);
|
||||
const char * __attribute_const__ btrfs_decode_error(int errno);
|
||||
|
||||
__cold
|
||||
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
||||
|
|
|
@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
|||
unsigned nr_extents;
|
||||
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
|
||||
int ret = 0;
|
||||
bool delalloc_lock = true;
|
||||
|
||||
/*
|
||||
* If we are a free space inode we need to not flush since we will be in
|
||||
|
@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
|||
*/
|
||||
if (btrfs_is_free_space_inode(inode)) {
|
||||
flush = BTRFS_RESERVE_NO_FLUSH;
|
||||
delalloc_lock = false;
|
||||
} else {
|
||||
if (current->journal_info)
|
||||
flush = BTRFS_RESERVE_FLUSH_LIMIT;
|
||||
|
@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
|||
schedule_timeout(1);
|
||||
}
|
||||
|
||||
if (delalloc_lock)
|
||||
mutex_lock(&inode->delalloc_mutex);
|
||||
|
||||
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
|
||||
|
||||
/*
|
||||
|
@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
|||
&qgroup_reserve);
|
||||
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
return ret;
|
||||
ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
|
||||
if (ret)
|
||||
goto out_qgroup;
|
||||
if (ret) {
|
||||
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we need to update our outstanding extents and csum bytes _first_
|
||||
|
@ -375,15 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
|||
block_rsv->qgroup_rsv_reserved += qgroup_reserve;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
|
||||
if (delalloc_lock)
|
||||
mutex_unlock(&inode->delalloc_mutex);
|
||||
return 0;
|
||||
out_qgroup:
|
||||
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
|
||||
out_fail:
|
||||
if (delalloc_lock)
|
||||
mutex_unlock(&inode->delalloc_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "transaction.h"
|
||||
#include "ctree.h"
|
||||
#include "qgroup.h"
|
||||
#include "locking.h"
|
||||
|
||||
#define BTRFS_DELAYED_WRITEBACK 512
|
||||
#define BTRFS_DELAYED_BACKGROUND 128
|
||||
|
@ -1367,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
|
|||
return -ENOMEM;
|
||||
|
||||
async_work->delayed_root = delayed_root;
|
||||
btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
|
||||
btrfs_async_run_delayed_root, NULL, NULL);
|
||||
btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
|
||||
NULL);
|
||||
async_work->nr = nr;
|
||||
|
||||
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
|
||||
|
@ -1949,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
|
|||
}
|
||||
|
||||
inode_id = delayed_nodes[n - 1]->inode_id + 1;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
refcount_inc(&delayed_nodes[i]->refs);
|
||||
for (i = 0; i < n; i++) {
|
||||
/*
|
||||
* Don't increase refs in case the node is dead and
|
||||
* about to be removed from the tree in the loop below
|
||||
*/
|
||||
if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
|
||||
delayed_nodes[i] = NULL;
|
||||
}
|
||||
spin_unlock(&root->inode_lock);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (!delayed_nodes[i])
|
||||
continue;
|
||||
__btrfs_kill_delayed_node(delayed_nodes[i]);
|
||||
btrfs_release_delayed_node(delayed_nodes[i]);
|
||||
}
|
||||
|
|
|
@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
|
||||
int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
|
||||
{
|
||||
if (!dev_replace->is_valid)
|
||||
return 0;
|
||||
|
|
|
@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
|
|||
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
|
||||
int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -205,7 +205,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
|
|||
struct page *page, size_t pg_offset, u64 start, u64 len,
|
||||
int create)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
struct extent_map *em;
|
||||
int ret;
|
||||
|
@ -213,7 +212,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
|
|||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, start, len);
|
||||
if (em) {
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
read_unlock(&em_tree->lock);
|
||||
goto out;
|
||||
}
|
||||
|
@ -228,7 +226,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
|
|||
em->len = (u64)-1;
|
||||
em->block_len = (u64)-1;
|
||||
em->block_start = 0;
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
|
||||
write_lock(&em_tree->lock);
|
||||
ret = add_extent_mapping(em_tree, em, 0);
|
||||
|
@ -352,6 +349,9 @@ static bool btrfs_supported_super_csum(u16 csum_type)
|
|||
{
|
||||
switch (csum_type) {
|
||||
case BTRFS_CSUM_TYPE_CRC32:
|
||||
case BTRFS_CSUM_TYPE_XXHASH:
|
||||
case BTRFS_CSUM_TYPE_SHA256:
|
||||
case BTRFS_CSUM_TYPE_BLAKE2:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -545,9 +545,11 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
|
|||
ret = btrfs_check_leaf_full(eb);
|
||||
|
||||
if (ret < 0) {
|
||||
btrfs_print_tree(eb, 0);
|
||||
btrfs_err(fs_info,
|
||||
"block=%llu write time tree block corruption detected",
|
||||
eb->start);
|
||||
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
|
||||
return ret;
|
||||
}
|
||||
write_extent_buffer(eb, result, 0, csum_size);
|
||||
|
@ -608,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
|||
/* the pending IO might have been the only thing that kept this buffer
|
||||
* in memory. Make sure we have a ref for all this other checks
|
||||
*/
|
||||
extent_buffer_get(eb);
|
||||
atomic_inc(&eb->refs);
|
||||
|
||||
reads_done = atomic_dec_and_test(&eb->io_pages);
|
||||
if (!reads_done)
|
||||
|
@ -706,43 +708,31 @@ static void end_workqueue_bio(struct bio *bio)
|
|||
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_workqueue *wq;
|
||||
btrfs_work_func_t func;
|
||||
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->status = bio->bi_status;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
|
||||
wq = fs_info->endio_meta_write_workers;
|
||||
func = btrfs_endio_meta_write_helper;
|
||||
} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
|
||||
wq = fs_info->endio_freespace_worker;
|
||||
func = btrfs_freespace_write_helper;
|
||||
} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
wq = fs_info->endio_raid56_workers;
|
||||
func = btrfs_endio_raid56_helper;
|
||||
} else {
|
||||
else
|
||||
wq = fs_info->endio_write_workers;
|
||||
func = btrfs_endio_write_helper;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(end_io_wq->metadata ==
|
||||
BTRFS_WQ_ENDIO_DIO_REPAIR)) {
|
||||
if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
|
||||
wq = fs_info->endio_repair_workers;
|
||||
func = btrfs_endio_repair_helper;
|
||||
} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
wq = fs_info->endio_raid56_workers;
|
||||
func = btrfs_endio_raid56_helper;
|
||||
} else if (end_io_wq->metadata) {
|
||||
else if (end_io_wq->metadata)
|
||||
wq = fs_info->endio_meta_workers;
|
||||
func = btrfs_endio_meta_helper;
|
||||
} else {
|
||||
else
|
||||
wq = fs_info->endio_workers;
|
||||
func = btrfs_endio_helper;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
|
||||
btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
|
||||
btrfs_queue_work(wq, &end_io_wq->work);
|
||||
}
|
||||
|
||||
|
@ -803,8 +793,13 @@ static void run_one_async_done(struct btrfs_work *work)
|
|||
return;
|
||||
}
|
||||
|
||||
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
|
||||
async->mirror_num, 1);
|
||||
/*
|
||||
* All of the bios that pass through here are from async helpers.
|
||||
* Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
|
||||
* This changes nothing when cgroups aren't in use.
|
||||
*/
|
||||
async->bio->bi_opf |= REQ_CGROUP_PUNT;
|
||||
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
|
||||
if (ret) {
|
||||
async->bio->bi_status = ret;
|
||||
bio_endio(async->bio);
|
||||
|
@ -835,8 +830,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
async->mirror_num = mirror_num;
|
||||
async->submit_bio_start = submit_bio_start;
|
||||
|
||||
btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
|
||||
run_one_async_done, run_one_async_free);
|
||||
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
|
||||
run_one_async_free);
|
||||
|
||||
async->bio_offset = bio_offset;
|
||||
|
||||
|
@ -904,12 +899,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
|||
BTRFS_WQ_ENDIO_METADATA);
|
||||
if (ret)
|
||||
goto out_w_error;
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
} else if (!async) {
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (ret)
|
||||
goto out_w_error;
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
} else {
|
||||
/*
|
||||
* kthread helpers are used to submit writes so that
|
||||
|
@ -1657,8 +1652,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
|
|||
bio->bi_status = end_io_wq->status;
|
||||
bio->bi_private = end_io_wq->private;
|
||||
bio->bi_end_io = end_io_wq->end_io;
|
||||
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
|
||||
bio_endio(bio);
|
||||
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
|
||||
}
|
||||
|
||||
static int cleaner_kthread(void *arg)
|
||||
|
@ -1753,7 +1748,7 @@ static int transaction_kthread(void *arg)
|
|||
}
|
||||
|
||||
now = ktime_get_seconds();
|
||||
if (cur->state < TRANS_STATE_BLOCKED &&
|
||||
if (cur->state < TRANS_STATE_COMMIT_START &&
|
||||
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
|
||||
(now < cur->start_time ||
|
||||
now - cur->start_time < fs_info->commit_interval)) {
|
||||
|
@ -1792,18 +1787,18 @@ sleep:
|
|||
}
|
||||
|
||||
/*
|
||||
* this will find the highest generation in the array of
|
||||
* root backups. The index of the highest array is returned,
|
||||
* or -1 if we can't find anything.
|
||||
* This will find the highest generation in the array of root backups. The
|
||||
* index of the highest array is returned, or -EINVAL if we can't find
|
||||
* anything.
|
||||
*
|
||||
* We check to make sure the array is valid by comparing the
|
||||
* generation of the latest root in the array with the generation
|
||||
* in the super block. If they don't match we pitch it.
|
||||
*/
|
||||
static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
|
||||
static int find_newest_super_backup(struct btrfs_fs_info *info)
|
||||
{
|
||||
const u64 newest_gen = btrfs_super_generation(info->super_copy);
|
||||
u64 cur;
|
||||
int newest_index = -1;
|
||||
struct btrfs_root_backup *root_backup;
|
||||
int i;
|
||||
|
||||
|
@ -1811,37 +1806,10 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
|
|||
root_backup = info->super_copy->super_roots + i;
|
||||
cur = btrfs_backup_tree_root_gen(root_backup);
|
||||
if (cur == newest_gen)
|
||||
newest_index = i;
|
||||
return i;
|
||||
}
|
||||
|
||||
/* check to see if we actually wrapped around */
|
||||
if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
|
||||
root_backup = info->super_copy->super_roots;
|
||||
cur = btrfs_backup_tree_root_gen(root_backup);
|
||||
if (cur == newest_gen)
|
||||
newest_index = 0;
|
||||
}
|
||||
return newest_index;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* find the oldest backup so we know where to store new entries
|
||||
* in the backup array. This will set the backup_root_index
|
||||
* field in the fs_info struct
|
||||
*/
|
||||
static void find_oldest_super_backup(struct btrfs_fs_info *info,
|
||||
u64 newest_gen)
|
||||
{
|
||||
int newest_index = -1;
|
||||
|
||||
newest_index = find_newest_super_backup(info, newest_gen);
|
||||
/* if there was garbage in there, just move along */
|
||||
if (newest_index == -1) {
|
||||
info->backup_root_index = 0;
|
||||
} else {
|
||||
info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1851,22 +1819,8 @@ static void find_oldest_super_backup(struct btrfs_fs_info *info,
|
|||
*/
|
||||
static void backup_super_roots(struct btrfs_fs_info *info)
|
||||
{
|
||||
int next_backup;
|
||||
const int next_backup = info->backup_root_index;
|
||||
struct btrfs_root_backup *root_backup;
|
||||
int last_backup;
|
||||
|
||||
next_backup = info->backup_root_index;
|
||||
last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
|
||||
BTRFS_NUM_BACKUP_ROOTS;
|
||||
|
||||
/*
|
||||
* just overwrite the last backup if we're at the same generation
|
||||
* this happens only at umount
|
||||
*/
|
||||
root_backup = info->super_for_commit->super_roots + last_backup;
|
||||
if (btrfs_backup_tree_root_gen(root_backup) ==
|
||||
btrfs_header_generation(info->tree_root->node))
|
||||
next_backup = last_backup;
|
||||
|
||||
root_backup = info->super_for_commit->super_roots + next_backup;
|
||||
|
||||
|
@ -1939,40 +1893,31 @@ static void backup_super_roots(struct btrfs_fs_info *info)
|
|||
}
|
||||
|
||||
/*
|
||||
* this copies info out of the root backup array and back into
|
||||
* the in-memory super block. It is meant to help iterate through
|
||||
* the array, so you send it the number of backups you've already
|
||||
* tried and the last backup index you used.
|
||||
* read_backup_root - Reads a backup root based on the passed priority. Prio 0
|
||||
* is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
|
||||
*
|
||||
* this returns -1 when it has tried all the backups
|
||||
* fs_info - filesystem whose backup roots need to be read
|
||||
* priority - priority of backup root required
|
||||
*
|
||||
* Returns backup root index on success and -EINVAL otherwise.
|
||||
*/
|
||||
static noinline int next_root_backup(struct btrfs_fs_info *info,
|
||||
struct btrfs_super_block *super,
|
||||
int *num_backups_tried, int *backup_index)
|
||||
static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
|
||||
{
|
||||
int backup_index = find_newest_super_backup(fs_info);
|
||||
struct btrfs_super_block *super = fs_info->super_copy;
|
||||
struct btrfs_root_backup *root_backup;
|
||||
int newest = *backup_index;
|
||||
|
||||
if (*num_backups_tried == 0) {
|
||||
u64 gen = btrfs_super_generation(super);
|
||||
if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
|
||||
if (priority == 0)
|
||||
return backup_index;
|
||||
|
||||
newest = find_newest_super_backup(info, gen);
|
||||
if (newest == -1)
|
||||
return -1;
|
||||
|
||||
*backup_index = newest;
|
||||
*num_backups_tried = 1;
|
||||
} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
|
||||
/* we've tried all the backups, all done */
|
||||
return -1;
|
||||
backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
|
||||
backup_index %= BTRFS_NUM_BACKUP_ROOTS;
|
||||
} else {
|
||||
/* jump to the next oldest backup */
|
||||
newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
|
||||
BTRFS_NUM_BACKUP_ROOTS;
|
||||
*backup_index = newest;
|
||||
*num_backups_tried += 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
root_backup = super->super_roots + newest;
|
||||
|
||||
root_backup = super->super_roots + backup_index;
|
||||
|
||||
btrfs_set_super_generation(super,
|
||||
btrfs_backup_tree_root_gen(root_backup));
|
||||
|
@ -1982,12 +1927,13 @@ static noinline int next_root_backup(struct btrfs_fs_info *info,
|
|||
btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
|
||||
|
||||
/*
|
||||
* fixme: the total bytes and num_devices need to match or we should
|
||||
* Fixme: the total bytes and num_devices need to match or we should
|
||||
* need a fsck
|
||||
*/
|
||||
btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
|
||||
btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
|
||||
return 0;
|
||||
|
||||
return backup_index;
|
||||
}
|
||||
|
||||
/* helper to cleanup workers */
|
||||
|
@ -2002,7 +1948,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
|
|||
btrfs_destroy_workqueue(fs_info->rmw_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_write_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
|
||||
btrfs_destroy_workqueue(fs_info->submit_workers);
|
||||
btrfs_destroy_workqueue(fs_info->delayed_workers);
|
||||
btrfs_destroy_workqueue(fs_info->caching_workers);
|
||||
btrfs_destroy_workqueue(fs_info->readahead_workers);
|
||||
|
@ -2028,7 +1973,7 @@ static void free_root_extent_buffers(struct btrfs_root *root)
|
|||
}
|
||||
|
||||
/* helper to cleanup tree roots */
|
||||
static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
|
||||
static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
|
||||
{
|
||||
free_root_extent_buffers(info->tree_root);
|
||||
|
||||
|
@ -2037,7 +1982,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
|
|||
free_root_extent_buffers(info->csum_root);
|
||||
free_root_extent_buffers(info->quota_root);
|
||||
free_root_extent_buffers(info->uuid_root);
|
||||
if (chunk_root)
|
||||
if (free_chunk_root)
|
||||
free_root_extent_buffers(info->chunk_root);
|
||||
free_root_extent_buffers(info->free_space_root);
|
||||
}
|
||||
|
@ -2167,16 +2112,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
|
|||
fs_info->caching_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
|
||||
|
||||
/*
|
||||
* a higher idle thresh on the submit workers makes it much more
|
||||
* likely that bios will be send down in a sane order to the
|
||||
* devices
|
||||
*/
|
||||
fs_info->submit_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "submit", flags,
|
||||
min_t(u64, fs_devices->num_devices,
|
||||
max_active), 64);
|
||||
|
||||
fs_info->fixup_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
|
||||
|
||||
|
@ -2215,7 +2150,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
|
|||
btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
|
||||
|
||||
if (!(fs_info->workers && fs_info->delalloc_workers &&
|
||||
fs_info->submit_workers && fs_info->flush_workers &&
|
||||
fs_info->flush_workers &&
|
||||
fs_info->endio_workers && fs_info->endio_meta_workers &&
|
||||
fs_info->endio_meta_write_workers &&
|
||||
fs_info->endio_repair_workers &&
|
||||
|
@ -2233,13 +2168,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
|
|||
static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
|
||||
{
|
||||
struct crypto_shash *csum_shash;
|
||||
const char *csum_name = btrfs_super_csum_name(csum_type);
|
||||
const char *csum_driver = btrfs_super_csum_driver(csum_type);
|
||||
|
||||
csum_shash = crypto_alloc_shash(csum_name, 0, 0);
|
||||
csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
|
||||
|
||||
if (IS_ERR(csum_shash)) {
|
||||
btrfs_err(fs_info, "error allocating %s hash for checksum",
|
||||
csum_name);
|
||||
csum_driver);
|
||||
return PTR_ERR(csum_shash);
|
||||
}
|
||||
|
||||
|
@ -2589,7 +2524,101 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int open_ctree(struct super_block *sb,
|
||||
static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int backup_index = find_newest_super_backup(fs_info);
|
||||
struct btrfs_super_block *sb = fs_info->super_copy;
|
||||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
bool handle_error = false;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
|
||||
u64 generation;
|
||||
int level;
|
||||
|
||||
if (handle_error) {
|
||||
if (!IS_ERR(tree_root->node))
|
||||
free_extent_buffer(tree_root->node);
|
||||
tree_root->node = NULL;
|
||||
|
||||
if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
|
||||
break;
|
||||
|
||||
free_root_pointers(fs_info, 0);
|
||||
|
||||
/*
|
||||
* Don't use the log in recovery mode, it won't be
|
||||
* valid
|
||||
*/
|
||||
btrfs_set_super_log_root(sb, 0);
|
||||
|
||||
/* We can't trust the free space cache either */
|
||||
btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
|
||||
|
||||
ret = read_backup_root(fs_info, i);
|
||||
backup_index = ret;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
generation = btrfs_super_generation(sb);
|
||||
level = btrfs_super_root_level(sb);
|
||||
tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
|
||||
generation, level, NULL);
|
||||
if (IS_ERR(tree_root->node) ||
|
||||
!extent_buffer_uptodate(tree_root->node)) {
|
||||
handle_error = true;
|
||||
|
||||
if (IS_ERR(tree_root->node))
|
||||
ret = PTR_ERR(tree_root->node);
|
||||
else if (!extent_buffer_uptodate(tree_root->node))
|
||||
ret = -EUCLEAN;
|
||||
|
||||
btrfs_warn(fs_info, "failed to read tree root");
|
||||
continue;
|
||||
}
|
||||
|
||||
btrfs_set_root_node(&tree_root->root_item, tree_root->node);
|
||||
tree_root->commit_root = btrfs_root_node(tree_root);
|
||||
btrfs_set_root_refs(&tree_root->root_item, 1);
|
||||
|
||||
/*
|
||||
* No need to hold btrfs_root::objectid_mutex since the fs
|
||||
* hasn't been fully initialised and we are the only user
|
||||
*/
|
||||
ret = btrfs_find_highest_objectid(tree_root,
|
||||
&tree_root->highest_objectid);
|
||||
if (ret < 0) {
|
||||
handle_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
|
||||
|
||||
ret = btrfs_read_roots(fs_info);
|
||||
if (ret < 0) {
|
||||
handle_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* All successful */
|
||||
fs_info->generation = generation;
|
||||
fs_info->last_trans_committed = generation;
|
||||
|
||||
/* Always begin writing backup roots after the one being used */
|
||||
if (backup_index < 0) {
|
||||
fs_info->backup_root_index = 0;
|
||||
} else {
|
||||
fs_info->backup_root_index = backup_index + 1;
|
||||
fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __cold open_ctree(struct super_block *sb,
|
||||
struct btrfs_fs_devices *fs_devices,
|
||||
char *options)
|
||||
{
|
||||
|
@ -2607,8 +2636,6 @@ int open_ctree(struct super_block *sb,
|
|||
struct btrfs_root *chunk_root;
|
||||
int ret;
|
||||
int err = -EINVAL;
|
||||
int num_backups_tried = 0;
|
||||
int backup_index = 0;
|
||||
int clear_free_space_tree = 0;
|
||||
int level;
|
||||
|
||||
|
@ -2878,13 +2905,6 @@ int open_ctree(struct super_block *sb,
|
|||
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
|
||||
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
|
||||
|
||||
/*
|
||||
* run through our array of backup supers and setup
|
||||
* our ring pointer to the oldest one
|
||||
*/
|
||||
generation = btrfs_super_generation(disk_super);
|
||||
find_oldest_super_backup(fs_info, generation);
|
||||
|
||||
/*
|
||||
* In the long term, we'll store the compression type in the super
|
||||
* block, and it'll be used for per file compression control.
|
||||
|
@ -3031,44 +3051,9 @@ int open_ctree(struct super_block *sb,
|
|||
goto fail_tree_roots;
|
||||
}
|
||||
|
||||
retry_root_backup:
|
||||
generation = btrfs_super_generation(disk_super);
|
||||
level = btrfs_super_root_level(disk_super);
|
||||
|
||||
tree_root->node = read_tree_block(fs_info,
|
||||
btrfs_super_root(disk_super),
|
||||
generation, level, NULL);
|
||||
if (IS_ERR(tree_root->node) ||
|
||||
!extent_buffer_uptodate(tree_root->node)) {
|
||||
btrfs_warn(fs_info, "failed to read tree root");
|
||||
if (!IS_ERR(tree_root->node))
|
||||
free_extent_buffer(tree_root->node);
|
||||
tree_root->node = NULL;
|
||||
goto recovery_tree_root;
|
||||
}
|
||||
|
||||
btrfs_set_root_node(&tree_root->root_item, tree_root->node);
|
||||
tree_root->commit_root = btrfs_root_node(tree_root);
|
||||
btrfs_set_root_refs(&tree_root->root_item, 1);
|
||||
|
||||
mutex_lock(&tree_root->objectid_mutex);
|
||||
ret = btrfs_find_highest_objectid(tree_root,
|
||||
&tree_root->highest_objectid);
|
||||
if (ret) {
|
||||
mutex_unlock(&tree_root->objectid_mutex);
|
||||
goto recovery_tree_root;
|
||||
}
|
||||
|
||||
ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
|
||||
|
||||
mutex_unlock(&tree_root->objectid_mutex);
|
||||
|
||||
ret = btrfs_read_roots(fs_info);
|
||||
ret = init_tree_roots(fs_info);
|
||||
if (ret)
|
||||
goto recovery_tree_root;
|
||||
|
||||
fs_info->generation = generation;
|
||||
fs_info->last_trans_committed = generation;
|
||||
goto fail_tree_roots;
|
||||
|
||||
ret = btrfs_verify_dev_extents(fs_info);
|
||||
if (ret) {
|
||||
|
@ -3336,7 +3321,7 @@ fail_block_groups:
|
|||
btrfs_put_block_group_cache(fs_info);
|
||||
|
||||
fail_tree_roots:
|
||||
free_root_pointers(fs_info, 1);
|
||||
free_root_pointers(fs_info, true);
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
|
||||
fail_sb_buffer:
|
||||
|
@ -3363,24 +3348,6 @@ fail:
|
|||
btrfs_free_stripe_hash_table(fs_info);
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
return err;
|
||||
|
||||
recovery_tree_root:
|
||||
if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
|
||||
goto fail_tree_roots;
|
||||
|
||||
free_root_pointers(fs_info, 0);
|
||||
|
||||
/* don't use the log in recovery mode, it won't be valid */
|
||||
btrfs_set_super_log_root(disk_super, 0);
|
||||
|
||||
/* we can't trust the free space cache either */
|
||||
btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
|
||||
|
||||
ret = next_root_backup(fs_info, fs_info->super_copy,
|
||||
&num_backups_tried, &backup_index);
|
||||
if (ret == -1)
|
||||
goto fail_block_groups;
|
||||
goto retry_root_backup;
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
|
||||
|
||||
|
@ -3974,7 +3941,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
|
|||
return btrfs_commit_transaction(trans);
|
||||
}
|
||||
|
||||
void close_ctree(struct btrfs_fs_info *fs_info)
|
||||
void __cold close_ctree(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -4062,7 +4029,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
|
|||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
|
||||
free_root_pointers(fs_info, 1);
|
||||
free_root_pointers(fs_info, true);
|
||||
|
||||
iput(fs_info->btree_inode);
|
||||
|
||||
|
@ -4439,7 +4406,7 @@ again:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
|
||||
static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
|
@ -4456,12 +4423,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
|
|||
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
spin_lock(&cur_trans->dirty_bgs_lock);
|
||||
while (!list_empty(&cur_trans->dirty_bgs)) {
|
||||
cache = list_first_entry(&cur_trans->dirty_bgs,
|
||||
struct btrfs_block_group_cache,
|
||||
struct btrfs_block_group,
|
||||
dirty_list);
|
||||
|
||||
if (!list_empty(&cache->io_list)) {
|
||||
|
@ -4489,7 +4456,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
|
|||
*/
|
||||
while (!list_empty(&cur_trans->io_bgs)) {
|
||||
cache = list_first_entry(&cur_trans->io_bgs,
|
||||
struct btrfs_block_group_cache,
|
||||
struct btrfs_block_group,
|
||||
io_list);
|
||||
|
||||
list_del_init(&cache->io_list);
|
||||
|
|
|
@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block(
|
|||
struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr);
|
||||
void btrfs_clean_tree_block(struct extent_buffer *buf);
|
||||
int open_ctree(struct super_block *sb,
|
||||
int __cold open_ctree(struct super_block *sb,
|
||||
struct btrfs_fs_devices *fs_devices,
|
||||
char *options);
|
||||
void close_ctree(struct btrfs_fs_info *fs_info);
|
||||
void __cold close_ctree(struct btrfs_fs_info *fs_info);
|
||||
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
|
||||
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
|
||||
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
|
||||
|
|
|
@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
inode = btrfs_iget(sb, &key, root, NULL);
|
||||
inode = btrfs_iget(sb, &key, root);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto fail;
|
||||
|
@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
|
|||
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
|
||||
return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root));
|
||||
fail:
|
||||
btrfs_free_path(path);
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -0,0 +1,248 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef BTRFS_EXTENT_IO_TREE_H
|
||||
#define BTRFS_EXTENT_IO_TREE_H
|
||||
|
||||
struct extent_changeset;
|
||||
struct io_failure_record;
|
||||
|
||||
/* Bits for the extent state */
|
||||
#define EXTENT_DIRTY (1U << 0)
|
||||
#define EXTENT_UPTODATE (1U << 1)
|
||||
#define EXTENT_LOCKED (1U << 2)
|
||||
#define EXTENT_NEW (1U << 3)
|
||||
#define EXTENT_DELALLOC (1U << 4)
|
||||
#define EXTENT_DEFRAG (1U << 5)
|
||||
#define EXTENT_BOUNDARY (1U << 6)
|
||||
#define EXTENT_NODATASUM (1U << 7)
|
||||
#define EXTENT_CLEAR_META_RESV (1U << 8)
|
||||
#define EXTENT_NEED_WAIT (1U << 9)
|
||||
#define EXTENT_DAMAGED (1U << 10)
|
||||
#define EXTENT_NORESERVE (1U << 11)
|
||||
#define EXTENT_QGROUP_RESERVED (1U << 12)
|
||||
#define EXTENT_CLEAR_DATA_RESV (1U << 13)
|
||||
#define EXTENT_DELALLOC_NEW (1U << 14)
|
||||
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
|
||||
EXTENT_CLEAR_DATA_RESV)
|
||||
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
|
||||
|
||||
/*
|
||||
* Redefined bits above which are used only in the device allocation tree,
|
||||
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
|
||||
* / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
|
||||
* manipulation functions
|
||||
*/
|
||||
#define CHUNK_ALLOCATED EXTENT_DIRTY
|
||||
#define CHUNK_TRIMMED EXTENT_DEFRAG
|
||||
|
||||
enum {
|
||||
IO_TREE_FS_INFO_FREED_EXTENTS0,
|
||||
IO_TREE_FS_INFO_FREED_EXTENTS1,
|
||||
IO_TREE_INODE_IO,
|
||||
IO_TREE_INODE_IO_FAILURE,
|
||||
IO_TREE_RELOC_BLOCKS,
|
||||
IO_TREE_TRANS_DIRTY_PAGES,
|
||||
IO_TREE_ROOT_DIRTY_LOG_PAGES,
|
||||
IO_TREE_SELFTEST,
|
||||
};
|
||||
|
||||
struct extent_io_tree {
|
||||
struct rb_root state;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
void *private_data;
|
||||
u64 dirty_bytes;
|
||||
bool track_uptodate;
|
||||
|
||||
/* Who owns this io tree, should be one of IO_TREE_* */
|
||||
u8 owner;
|
||||
|
||||
spinlock_t lock;
|
||||
const struct extent_io_ops *ops;
|
||||
};
|
||||
|
||||
struct extent_state {
|
||||
u64 start;
|
||||
u64 end; /* inclusive */
|
||||
struct rb_node rb_node;
|
||||
|
||||
/* ADD NEW ELEMENTS AFTER THIS */
|
||||
wait_queue_head_t wq;
|
||||
refcount_t refs;
|
||||
unsigned state;
|
||||
|
||||
struct io_failure_record *failrec;
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
struct list_head leak_list;
|
||||
#endif
|
||||
};
|
||||
|
||||
int __init extent_state_cache_init(void);
|
||||
void __cold extent_state_cache_exit(void);
|
||||
|
||||
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *tree, unsigned int owner,
|
||||
void *private_data);
|
||||
void extent_io_tree_release(struct extent_io_tree *tree);
|
||||
|
||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
struct extent_state **cached);
|
||||
|
||||
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
return lock_extent_bits(tree, start, end, NULL);
|
||||
}
|
||||
|
||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
||||
|
||||
int __init extent_io_init(void);
|
||||
void __cold extent_io_exit(void);
|
||||
|
||||
u64 count_range_bits(struct extent_io_tree *tree,
|
||||
u64 *start, u64 search_end,
|
||||
u64 max_bytes, unsigned bits, int contig);
|
||||
|
||||
void free_extent_state(struct extent_state *state);
|
||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int filled,
|
||||
struct extent_state *cached_state);
|
||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, struct extent_changeset *changeset);
|
||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int wake, int delete,
|
||||
struct extent_state **cached);
|
||||
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int wake, int delete,
|
||||
struct extent_state **cached, gfp_t mask,
|
||||
struct extent_changeset *changeset);
|
||||
|
||||
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
|
||||
}
|
||||
|
||||
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||
GFP_NOFS, NULL);
|
||||
}
|
||||
|
||||
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
|
||||
u64 start, u64 end, struct extent_state **cached)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||
GFP_ATOMIC, NULL);
|
||||
}
|
||||
|
||||
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned bits)
|
||||
{
|
||||
int wake = 0;
|
||||
|
||||
if (bits & EXTENT_LOCKED)
|
||||
wake = 1;
|
||||
|
||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
|
||||
}
|
||||
|
||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, struct extent_changeset *changeset);
|
||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, u64 *failed_start,
|
||||
struct extent_state **cached_state, gfp_t mask);
|
||||
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits);
|
||||
|
||||
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned bits)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
||||
cached_state, GFP_NOFS, NULL);
|
||||
}
|
||||
|
||||
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
||||
NULL, mask);
|
||||
}
|
||||
|
||||
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end,
|
||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||
EXTENT_DO_ACCOUNTING, 0, 0, cached);
|
||||
}
|
||||
|
||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, unsigned clear_bits,
|
||||
struct extent_state **cached_state);
|
||||
|
||||
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned int extra_bits,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
return set_extent_bit(tree, start, end,
|
||||
EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
|
||||
NULL, cached_state, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state)
|
||||
{
|
||||
return set_extent_bit(tree, start, end,
|
||||
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
||||
NULL, cached_state, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
||||
u64 end)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
|
||||
GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
||||
cached_state, mask);
|
||||
}
|
||||
|
||||
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||
u64 *start_ret, u64 *end_ret, unsigned bits,
|
||||
struct extent_state **cached_state);
|
||||
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||
u64 *start_ret, u64 *end_ret, unsigned bits);
|
||||
int extent_invalidatepage(struct extent_io_tree *tree,
|
||||
struct page *page, unsigned long offset);
|
||||
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
|
||||
u64 *end, u64 max_bytes,
|
||||
struct extent_state **cached_state);
|
||||
|
||||
/* This should be reworked in the future and put elsewhere. */
|
||||
int get_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record **failrec);
|
||||
int set_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record *failrec);
|
||||
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
|
||||
u64 end);
|
||||
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
|
||||
struct io_failure_record **failrec_ret);
|
||||
int free_io_failure(struct extent_io_tree *failure_tree,
|
||||
struct extent_io_tree *io_tree,
|
||||
struct io_failure_record *rec);
|
||||
int clean_io_failure(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *failure_tree,
|
||||
struct extent_io_tree *io_tree, u64 start,
|
||||
struct page *page, u64 ino, unsigned int pg_offset);
|
||||
|
||||
#endif /* BTRFS_EXTENT_IO_TREE_H */
|
|
@ -54,7 +54,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
|
|||
static int find_next_key(struct btrfs_path *path, int level,
|
||||
struct btrfs_key *key);
|
||||
|
||||
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
||||
static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
|
||||
{
|
||||
return (cache->flags & bits) == bits;
|
||||
}
|
||||
|
@ -70,13 +70,13 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache)
|
||||
void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
u64 start, end;
|
||||
|
||||
start = cache->key.objectid;
|
||||
end = start + cache->key.offset - 1;
|
||||
start = cache->start;
|
||||
end = start + cache->length - 1;
|
||||
|
||||
clear_extent_bits(&fs_info->freed_extents[0],
|
||||
start, end, EXTENT_UPTODATE);
|
||||
|
@ -1306,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
|
|||
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||
u64 num_bytes, u64 *actual_bytes)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
u64 discarded_bytes = 0;
|
||||
u64 end = bytenr + num_bytes;
|
||||
u64 cur = bytenr;
|
||||
struct btrfs_bio *bbio = NULL;
|
||||
|
||||
|
||||
|
@ -1316,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
|||
* associated to its stripes that don't go away while we are discarding.
|
||||
*/
|
||||
btrfs_bio_counter_inc_blocked(fs_info);
|
||||
/* Tell the block device(s) that the sectors can be discarded */
|
||||
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
|
||||
&bbio, 0);
|
||||
/* Error condition is -ENOMEM */
|
||||
if (!ret) {
|
||||
struct btrfs_bio_stripe *stripe = bbio->stripes;
|
||||
while (cur < end) {
|
||||
struct btrfs_bio_stripe *stripe;
|
||||
int i;
|
||||
|
||||
num_bytes = end - cur;
|
||||
/* Tell the block device(s) that the sectors can be discarded */
|
||||
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
|
||||
&num_bytes, &bbio, 0);
|
||||
/*
|
||||
* Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
|
||||
* -EOPNOTSUPP. For any such error, @num_bytes is not updated,
|
||||
* thus we can't continue anyway.
|
||||
*/
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
stripe = bbio->stripes;
|
||||
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
|
||||
u64 bytes;
|
||||
struct request_queue *req_q;
|
||||
|
@ -1341,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
|||
stripe->physical,
|
||||
stripe->length,
|
||||
&bytes);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
discarded_bytes += bytes;
|
||||
else if (ret != -EOPNOTSUPP)
|
||||
break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
|
||||
} else if (ret != -EOPNOTSUPP) {
|
||||
/*
|
||||
* Logic errors or -ENOMEM, or -EIO, but
|
||||
* unlikely to happen.
|
||||
*
|
||||
* And since there are two loops, explicitly
|
||||
* go to out to avoid confusion.
|
||||
*/
|
||||
btrfs_put_bbio(bbio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Just in case we get back EOPNOTSUPP for some reason,
|
||||
|
@ -1354,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
|||
ret = 0;
|
||||
}
|
||||
btrfs_put_bbio(bbio);
|
||||
cur += num_bytes;
|
||||
}
|
||||
out:
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
|
||||
if (actual_bytes)
|
||||
|
@ -2516,7 +2537,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
|
||||
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
int readonly = 0;
|
||||
|
||||
block_group = btrfs_lookup_block_group(fs_info, bytenr);
|
||||
|
@ -2546,7 +2567,7 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
|
|||
|
||||
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
u64 bytenr;
|
||||
|
||||
spin_lock(&fs_info->block_group_cache_lock);
|
||||
|
@ -2560,13 +2581,13 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
|
|||
if (!cache)
|
||||
return 0;
|
||||
|
||||
bytenr = cache->key.objectid;
|
||||
bytenr = cache->start;
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
return bytenr;
|
||||
}
|
||||
|
||||
static int pin_down_extent(struct btrfs_block_group_cache *cache,
|
||||
static int pin_down_extent(struct btrfs_block_group *cache,
|
||||
u64 bytenr, u64 num_bytes, int reserved)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
|
@ -2590,13 +2611,12 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* this function must be called within transaction
|
||||
*/
|
||||
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr, u64 num_bytes, int reserved)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
ASSERT(fs_info->running_transaction);
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, bytenr);
|
||||
BUG_ON(!cache); /* Logic error */
|
||||
|
@ -2613,7 +2633,7 @@ int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
|
|||
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr, u64 num_bytes)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
int ret;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, bytenr);
|
||||
|
@ -2640,7 +2660,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
|
|||
u64 start, u64 num_bytes)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
|
||||
block_group = btrfs_lookup_block_group(fs_info, start);
|
||||
|
@ -2652,7 +2672,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
|
|||
|
||||
if (!caching_ctl) {
|
||||
/* Logic error */
|
||||
BUG_ON(!btrfs_block_group_cache_done(block_group));
|
||||
BUG_ON(!btrfs_block_group_done(block_group));
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
} else {
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
|
@ -2717,7 +2737,7 @@ int btrfs_exclude_logged_extents(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
static void
|
||||
btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
|
||||
btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
|
||||
{
|
||||
atomic_inc(&bg->reservations);
|
||||
}
|
||||
|
@ -2726,14 +2746,14 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
|
|||
{
|
||||
struct btrfs_caching_control *next;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
down_write(&fs_info->commit_root_sem);
|
||||
|
||||
list_for_each_entry_safe(caching_ctl, next,
|
||||
&fs_info->caching_block_groups, list) {
|
||||
cache = caching_ctl->block_group;
|
||||
if (btrfs_block_group_cache_done(cache)) {
|
||||
if (btrfs_block_group_done(cache)) {
|
||||
cache->last_byte_to_unpin = (u64)-1;
|
||||
list_del_init(&caching_ctl->list);
|
||||
btrfs_put_caching_control(caching_ctl);
|
||||
|
@ -2785,7 +2805,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
|
|||
u64 start, u64 end,
|
||||
const bool return_free_space)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
struct btrfs_block_group *cache = NULL;
|
||||
struct btrfs_space_info *space_info;
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
struct btrfs_free_cluster *cluster = NULL;
|
||||
|
@ -2797,7 +2817,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
|
|||
while (start <= end) {
|
||||
readonly = false;
|
||||
if (!cache ||
|
||||
start >= cache->key.objectid + cache->key.offset) {
|
||||
start >= cache->start + cache->length) {
|
||||
if (cache)
|
||||
btrfs_put_block_group(cache);
|
||||
total_unpinned = 0;
|
||||
|
@ -2810,7 +2830,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
|
|||
empty_cluster <<= 1;
|
||||
}
|
||||
|
||||
len = cache->key.objectid + cache->key.offset - start;
|
||||
len = cache->start + cache->length - start;
|
||||
len = min(len, end + 1 - start);
|
||||
|
||||
if (start < cache->last_byte_to_unpin) {
|
||||
|
@ -2880,7 +2900,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
|
|||
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_block_group_cache *block_group, *tmp;
|
||||
struct btrfs_block_group *block_group, *tmp;
|
||||
struct list_head *deleted_bgs;
|
||||
struct extent_io_tree *unpin;
|
||||
u64 start;
|
||||
|
@ -2926,8 +2946,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
|
|||
ret = -EROFS;
|
||||
if (!trans->aborted)
|
||||
ret = btrfs_discard_extent(fs_info,
|
||||
block_group->key.objectid,
|
||||
block_group->key.offset,
|
||||
block_group->start,
|
||||
block_group->length,
|
||||
&trimmed);
|
||||
|
||||
list_del_init(&block_group->bg_list);
|
||||
|
@ -3262,7 +3282,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||
ret = check_ref_cleanup(trans, buf->start);
|
||||
|
@ -3349,15 +3369,14 @@ enum btrfs_loop_type {
|
|||
};
|
||||
|
||||
static inline void
|
||||
btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
|
||||
btrfs_lock_block_group(struct btrfs_block_group *cache,
|
||||
int delalloc)
|
||||
{
|
||||
if (delalloc)
|
||||
down_read(&cache->data_rwsem);
|
||||
}
|
||||
|
||||
static inline void
|
||||
btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
|
||||
static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
|
||||
int delalloc)
|
||||
{
|
||||
btrfs_get_block_group(cache);
|
||||
|
@ -3365,12 +3384,12 @@ btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
|
|||
down_read(&cache->data_rwsem);
|
||||
}
|
||||
|
||||
static struct btrfs_block_group_cache *
|
||||
btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
|
||||
static struct btrfs_block_group *btrfs_lock_cluster(
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
int delalloc)
|
||||
{
|
||||
struct btrfs_block_group_cache *used_bg = NULL;
|
||||
struct btrfs_block_group *used_bg = NULL;
|
||||
|
||||
spin_lock(&cluster->refill_lock);
|
||||
while (1) {
|
||||
|
@ -3404,7 +3423,7 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
|
|||
}
|
||||
|
||||
static inline void
|
||||
btrfs_release_block_group(struct btrfs_block_group_cache *cache,
|
||||
btrfs_release_block_group(struct btrfs_block_group *cache,
|
||||
int delalloc)
|
||||
{
|
||||
if (delalloc)
|
||||
|
@ -3475,12 +3494,12 @@ struct find_free_extent_ctl {
|
|||
* Return >0 to inform caller that we find nothing
|
||||
* Return 0 means we have found a location and set ffe_ctl->found_offset.
|
||||
*/
|
||||
static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
|
||||
static int find_free_extent_clustered(struct btrfs_block_group *bg,
|
||||
struct btrfs_free_cluster *last_ptr,
|
||||
struct find_free_extent_ctl *ffe_ctl,
|
||||
struct btrfs_block_group_cache **cluster_bg_ret)
|
||||
struct btrfs_block_group **cluster_bg_ret)
|
||||
{
|
||||
struct btrfs_block_group_cache *cluster_bg;
|
||||
struct btrfs_block_group *cluster_bg;
|
||||
u64 aligned_cluster;
|
||||
u64 offset;
|
||||
int ret;
|
||||
|
@ -3493,7 +3512,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
|
|||
goto release_cluster;
|
||||
|
||||
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
|
||||
ffe_ctl->num_bytes, cluster_bg->key.objectid,
|
||||
ffe_ctl->num_bytes, cluster_bg->start,
|
||||
&ffe_ctl->max_extent_size);
|
||||
if (offset) {
|
||||
/* We have a block, we're done */
|
||||
|
@ -3579,7 +3598,7 @@ refill_cluster:
|
|||
* Return 0 when we found an free extent and set ffe_ctrl->found_offset
|
||||
* Return -EAGAIN to inform caller that we need to re-search this block group
|
||||
*/
|
||||
static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
|
||||
static int find_free_extent_unclustered(struct btrfs_block_group *bg,
|
||||
struct btrfs_free_cluster *last_ptr,
|
||||
struct find_free_extent_ctl *ffe_ctl)
|
||||
{
|
||||
|
@ -3781,7 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
|
|||
{
|
||||
int ret = 0;
|
||||
struct btrfs_free_cluster *last_ptr = NULL;
|
||||
struct btrfs_block_group_cache *block_group = NULL;
|
||||
struct btrfs_block_group *block_group = NULL;
|
||||
struct find_free_extent_ctl ffe_ctl = {0};
|
||||
struct btrfs_space_info *space_info;
|
||||
bool use_cluster = true;
|
||||
|
@ -3904,7 +3923,7 @@ search:
|
|||
continue;
|
||||
|
||||
btrfs_grab_block_group(block_group, delalloc);
|
||||
ffe_ctl.search_start = block_group->key.objectid;
|
||||
ffe_ctl.search_start = block_group->start;
|
||||
|
||||
/*
|
||||
* this can happen if we end up cycling through all the
|
||||
|
@ -3935,7 +3954,7 @@ search:
|
|||
}
|
||||
|
||||
have_block_group:
|
||||
ffe_ctl.cached = btrfs_block_group_cache_done(block_group);
|
||||
ffe_ctl.cached = btrfs_block_group_done(block_group);
|
||||
if (unlikely(!ffe_ctl.cached)) {
|
||||
ffe_ctl.have_caching_bg = true;
|
||||
ret = btrfs_cache_block_group(block_group, 0);
|
||||
|
@ -3951,7 +3970,7 @@ have_block_group:
|
|||
* lets look there
|
||||
*/
|
||||
if (last_ptr && use_cluster) {
|
||||
struct btrfs_block_group_cache *cluster_bg = NULL;
|
||||
struct btrfs_block_group *cluster_bg = NULL;
|
||||
|
||||
ret = find_free_extent_clustered(block_group, last_ptr,
|
||||
&ffe_ctl, &cluster_bg);
|
||||
|
@ -3984,7 +4003,7 @@ checks:
|
|||
|
||||
/* move on to the next group */
|
||||
if (ffe_ctl.search_start + num_bytes >
|
||||
block_group->key.objectid + block_group->key.offset) {
|
||||
block_group->start + block_group->length) {
|
||||
btrfs_add_free_space(block_group, ffe_ctl.found_offset,
|
||||
num_bytes);
|
||||
goto loop;
|
||||
|
@ -4133,7 +4152,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
|
|||
u64 start, u64 len,
|
||||
int pin, int delalloc)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
int ret = 0;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, start);
|
||||
|
@ -4366,7 +4385,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
int ret;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_space_info *space_info;
|
||||
|
||||
/*
|
||||
|
@ -5436,7 +5455,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_assert_tree_locked(parent);
|
||||
parent_level = btrfs_header_level(parent);
|
||||
extent_buffer_get(parent);
|
||||
atomic_inc(&parent->refs);
|
||||
path->nodes[parent_level] = parent;
|
||||
path->slots[parent_level] = btrfs_header_nritems(parent);
|
||||
|
||||
|
@ -5480,7 +5499,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
u64 free_bytes = 0;
|
||||
int factor;
|
||||
|
||||
|
@ -5498,9 +5517,8 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
|
|||
}
|
||||
|
||||
factor = btrfs_bg_type_to_factor(block_group->flags);
|
||||
free_bytes += (block_group->key.offset -
|
||||
btrfs_block_group_used(&block_group->item)) *
|
||||
factor;
|
||||
free_bytes += (block_group->length -
|
||||
block_group->used) * factor;
|
||||
|
||||
spin_unlock(&block_group->lock);
|
||||
}
|
||||
|
@ -5623,7 +5641,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
|
|||
*/
|
||||
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
struct btrfs_block_group *cache = NULL;
|
||||
struct btrfs_device *device;
|
||||
struct list_head *devices;
|
||||
u64 group_trimmed;
|
||||
|
@ -5647,16 +5665,16 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
|||
|
||||
cache = btrfs_lookup_first_block_group(fs_info, range->start);
|
||||
for (; cache; cache = btrfs_next_block_group(cache)) {
|
||||
if (cache->key.objectid >= range_end) {
|
||||
if (cache->start >= range_end) {
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
}
|
||||
|
||||
start = max(range->start, cache->key.objectid);
|
||||
end = min(range_end, cache->key.objectid + cache->key.offset);
|
||||
start = max(range->start, cache->start);
|
||||
end = min(range_end, cache->start + cache->length);
|
||||
|
||||
if (end - start >= range->minlen) {
|
||||
if (!btrfs_block_group_cache_done(cache)) {
|
||||
if (!btrfs_block_group_done(cache)) {
|
||||
ret = btrfs_cache_block_group(cache, 0);
|
||||
if (ret) {
|
||||
bg_failed++;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/prefetch.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include "extent_io.h"
|
||||
#include "extent-io-tree.h"
|
||||
#include "extent_map.h"
|
||||
#include "ctree.h"
|
||||
#include "btrfs_inode.h"
|
||||
|
@ -59,11 +60,22 @@ void btrfs_leak_debug_del(struct list_head *entry)
|
|||
spin_unlock_irqrestore(&leak_lock, flags);
|
||||
}
|
||||
|
||||
static inline
|
||||
void btrfs_leak_debug_check(void)
|
||||
static inline void btrfs_extent_buffer_leak_debug_check(void)
|
||||
{
|
||||
struct extent_buffer *eb;
|
||||
|
||||
while (!list_empty(&buffers)) {
|
||||
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
|
||||
pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
|
||||
eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
|
||||
list_del(&eb->leak_list);
|
||||
kmem_cache_free(extent_buffer_cache, eb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void btrfs_extent_state_leak_debug_check(void)
|
||||
{
|
||||
struct extent_state *state;
|
||||
struct extent_buffer *eb;
|
||||
|
||||
while (!list_empty(&states)) {
|
||||
state = list_entry(states.next, struct extent_state, leak_list);
|
||||
|
@ -74,14 +86,6 @@ void btrfs_leak_debug_check(void)
|
|||
list_del(&state->leak_list);
|
||||
kmem_cache_free(extent_state_cache, state);
|
||||
}
|
||||
|
||||
while (!list_empty(&buffers)) {
|
||||
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
|
||||
pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
|
||||
eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
|
||||
list_del(&eb->leak_list);
|
||||
kmem_cache_free(extent_buffer_cache, eb);
|
||||
}
|
||||
}
|
||||
|
||||
#define btrfs_debug_check_extent_io_range(tree, start, end) \
|
||||
|
@ -105,7 +109,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
|||
#else
|
||||
#define btrfs_leak_debug_add(new, head) do {} while (0)
|
||||
#define btrfs_leak_debug_del(entry) do {} while (0)
|
||||
#define btrfs_leak_debug_check() do {} while (0)
|
||||
#define btrfs_extent_buffer_leak_debug_check() do {} while (0)
|
||||
#define btrfs_extent_state_leak_debug_check() do {} while (0)
|
||||
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
|
||||
#endif
|
||||
|
||||
|
@ -196,19 +201,23 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __init extent_io_init(void)
|
||||
int __init extent_state_cache_init(void)
|
||||
{
|
||||
extent_state_cache = kmem_cache_create("btrfs_extent_state",
|
||||
sizeof(struct extent_state), 0,
|
||||
SLAB_MEM_SPREAD, NULL);
|
||||
if (!extent_state_cache)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init extent_io_init(void)
|
||||
{
|
||||
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
|
||||
sizeof(struct extent_buffer), 0,
|
||||
SLAB_MEM_SPREAD, NULL);
|
||||
if (!extent_buffer_cache)
|
||||
goto free_state_cache;
|
||||
return -ENOMEM;
|
||||
|
||||
if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
|
||||
offsetof(struct btrfs_io_bio, bio),
|
||||
|
@ -226,23 +235,24 @@ free_bioset:
|
|||
free_buffer_cache:
|
||||
kmem_cache_destroy(extent_buffer_cache);
|
||||
extent_buffer_cache = NULL;
|
||||
|
||||
free_state_cache:
|
||||
kmem_cache_destroy(extent_state_cache);
|
||||
extent_state_cache = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void __cold extent_state_cache_exit(void)
|
||||
{
|
||||
btrfs_extent_state_leak_debug_check();
|
||||
kmem_cache_destroy(extent_state_cache);
|
||||
}
|
||||
|
||||
void __cold extent_io_exit(void)
|
||||
{
|
||||
btrfs_leak_debug_check();
|
||||
btrfs_extent_buffer_leak_debug_check();
|
||||
|
||||
/*
|
||||
* Make sure all delayed rcu free are flushed before we
|
||||
* destroy caches.
|
||||
*/
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(extent_state_cache);
|
||||
kmem_cache_destroy(extent_buffer_cache);
|
||||
bioset_exit(&btrfs_bioset);
|
||||
}
|
||||
|
@ -1676,9 +1686,9 @@ out:
|
|||
*
|
||||
* true is returned if we find something, false if nothing was in the tree
|
||||
*/
|
||||
static noinline bool find_delalloc_range(struct extent_io_tree *tree,
|
||||
u64 *start, u64 *end, u64 max_bytes,
|
||||
struct extent_state **cached_state)
|
||||
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
|
||||
u64 *end, u64 max_bytes,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct extent_state *state;
|
||||
|
@ -1796,8 +1806,8 @@ again:
|
|||
/* step one, find a bunch of delalloc bytes starting at start */
|
||||
delalloc_start = *start;
|
||||
delalloc_end = 0;
|
||||
found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
|
||||
max_bytes, &cached_state);
|
||||
found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
|
||||
max_bytes, &cached_state);
|
||||
if (!found || delalloc_end <= *start) {
|
||||
*start = delalloc_start;
|
||||
*end = delalloc_end;
|
||||
|
@ -1899,7 +1909,7 @@ static int __process_pages_contig(struct address_space *mapping,
|
|||
if (page_ops & PAGE_SET_PRIVATE2)
|
||||
SetPagePrivate2(pages[i]);
|
||||
|
||||
if (pages[i] == locked_page) {
|
||||
if (locked_page && pages[i] == locked_page) {
|
||||
put_page(pages[i]);
|
||||
pages_locked++;
|
||||
continue;
|
||||
|
@ -2014,8 +2024,8 @@ out:
|
|||
* set the private field for a given byte offset in the tree. If there isn't
|
||||
* an extent_state there already, this does nothing.
|
||||
*/
|
||||
static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record *failrec)
|
||||
int set_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record *failrec)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct extent_state *state;
|
||||
|
@ -2042,8 +2052,8 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record **failrec)
|
||||
int get_state_failrec(struct extent_io_tree *tree, u64 start,
|
||||
struct io_failure_record **failrec)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct extent_state *state;
|
||||
|
@ -2534,7 +2544,6 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
|
|||
bio = btrfs_io_bio_alloc(1);
|
||||
bio->bi_end_io = endio_func;
|
||||
bio->bi_iter.bi_sector = failrec->logical >> 9;
|
||||
bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_private = data;
|
||||
|
||||
|
@ -2920,7 +2929,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
|
|||
* a contiguous page to the previous one
|
||||
* @size: portion of page that we want to write
|
||||
* @offset: starting offset in the page
|
||||
* @bdev: attach newly created bios to this bdev
|
||||
* @bio_ret: must be valid pointer, newly allocated bio will be stored there
|
||||
* @end_io_func: end_io callback for new bio
|
||||
* @mirror_num: desired mirror to read/write
|
||||
|
@ -2931,7 +2939,6 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
|
|||
struct writeback_control *wbc,
|
||||
struct page *page, u64 offset,
|
||||
size_t size, unsigned long pg_offset,
|
||||
struct block_device *bdev,
|
||||
struct bio **bio_ret,
|
||||
bio_end_io_t end_io_func,
|
||||
int mirror_num,
|
||||
|
@ -2977,13 +2984,16 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
|
|||
}
|
||||
|
||||
bio = btrfs_bio_alloc(offset);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio_add_page(bio, page, page_size, pg_offset);
|
||||
bio->bi_end_io = end_io_func;
|
||||
bio->bi_private = tree;
|
||||
bio->bi_write_hint = page->mapping->host->i_write_hint;
|
||||
bio->bi_opf = opf;
|
||||
if (wbc) {
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
|
||||
bio_set_dev(bio, bdev);
|
||||
wbc_init_bio(wbc, bio);
|
||||
wbc_account_cgroup_owner(wbc, page, page_size);
|
||||
}
|
||||
|
@ -3065,7 +3075,6 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
u64 block_start;
|
||||
u64 cur_end;
|
||||
struct extent_map *em;
|
||||
struct block_device *bdev;
|
||||
int ret = 0;
|
||||
int nr = 0;
|
||||
size_t pg_offset = 0;
|
||||
|
@ -3142,7 +3151,6 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
offset = em->block_start + extent_offset;
|
||||
disk_io_size = iosize;
|
||||
}
|
||||
bdev = em->bdev;
|
||||
block_start = em->block_start;
|
||||
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
|
||||
block_start = EXTENT_MAP_HOLE;
|
||||
|
@ -3232,7 +3240,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
|
||||
ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
|
||||
page, offset, disk_io_size,
|
||||
pg_offset, bdev, bio,
|
||||
pg_offset, bio,
|
||||
end_bio_extent_readpage, mirror_num,
|
||||
*bio_flags,
|
||||
this_bio_flag,
|
||||
|
@ -3409,7 +3417,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
struct extent_page_data *epd,
|
||||
loff_t i_size,
|
||||
unsigned long nr_written,
|
||||
unsigned int write_flags, int *nr_ret)
|
||||
int *nr_ret)
|
||||
{
|
||||
struct extent_io_tree *tree = epd->tree;
|
||||
u64 start = page_offset(page);
|
||||
|
@ -3420,11 +3428,11 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
u64 block_start;
|
||||
u64 iosize;
|
||||
struct extent_map *em;
|
||||
struct block_device *bdev;
|
||||
size_t pg_offset = 0;
|
||||
size_t blocksize;
|
||||
int ret = 0;
|
||||
int nr = 0;
|
||||
const unsigned int write_flags = wbc_to_write_flags(wbc);
|
||||
bool compressed;
|
||||
|
||||
ret = btrfs_writepage_cow_fixup(page, start, page_end);
|
||||
|
@ -3478,7 +3486,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
iosize = min(em_end - cur, end - cur + 1);
|
||||
iosize = ALIGN(iosize, blocksize);
|
||||
offset = em->block_start + extent_offset;
|
||||
bdev = em->bdev;
|
||||
block_start = em->block_start;
|
||||
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
|
||||
free_extent_map(em);
|
||||
|
@ -3520,7 +3527,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
|
||||
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
|
||||
page, offset, iosize, pg_offset,
|
||||
bdev, &epd->bio,
|
||||
&epd->bio,
|
||||
end_bio_extent_writepage,
|
||||
0, 0, 0, false);
|
||||
if (ret) {
|
||||
|
@ -3558,11 +3565,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
size_t pg_offset = 0;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
unsigned long end_index = i_size >> PAGE_SHIFT;
|
||||
unsigned int write_flags = 0;
|
||||
unsigned long nr_written = 0;
|
||||
|
||||
write_flags = wbc_to_write_flags(wbc);
|
||||
|
||||
trace___extent_writepage(page, inode, wbc);
|
||||
|
||||
WARN_ON(!PageLocked(page));
|
||||
|
@ -3600,7 +3604,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
}
|
||||
|
||||
ret = __extent_writepage_io(inode, page, wbc, epd,
|
||||
i_size, nr_written, write_flags, &nr);
|
||||
i_size, nr_written, &nr);
|
||||
if (ret == 1)
|
||||
goto done_unlocked;
|
||||
|
||||
|
@ -3849,7 +3853,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
struct extent_page_data *epd)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = eb->fs_info;
|
||||
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
|
||||
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
|
||||
u64 offset = eb->start;
|
||||
u32 nritems;
|
||||
|
@ -3884,7 +3887,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
clear_page_dirty_for_io(p);
|
||||
set_page_writeback(p);
|
||||
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
|
||||
p, offset, PAGE_SIZE, 0, bdev,
|
||||
p, offset, PAGE_SIZE, 0,
|
||||
&epd->bio,
|
||||
end_bio_extent_buffer_writepage,
|
||||
0, 0, 0, false);
|
||||
|
@ -4121,7 +4124,7 @@ retry:
|
|||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
done_index = page->index;
|
||||
done_index = page->index + 1;
|
||||
/*
|
||||
* At this point we hold neither the i_pages lock nor
|
||||
* the page lock: the page may be truncated or
|
||||
|
@ -4156,16 +4159,6 @@ retry:
|
|||
|
||||
ret = __extent_writepage(page, wbc, epd);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* done_index is set past this page,
|
||||
* so media errors will not choke
|
||||
* background writeout for the entire
|
||||
* file. This has consequences for
|
||||
* range_cyclic semantics (ie. it may
|
||||
* not be suitable for data integrity
|
||||
* writeout).
|
||||
*/
|
||||
done_index = page->index + 1;
|
||||
done = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -4240,8 +4233,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
|
|||
.nr_to_write = nr_pages * 2,
|
||||
.range_start = start,
|
||||
.range_end = end + 1,
|
||||
/* We're called from an async helper function */
|
||||
.punt_to_cgroup = 1,
|
||||
.no_cgroup_owner = 1,
|
||||
};
|
||||
|
||||
wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
|
||||
while (start <= end) {
|
||||
page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||
if (clear_page_dirty_for_io(page))
|
||||
|
@ -4256,11 +4253,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
|
|||
}
|
||||
|
||||
ASSERT(ret <= 0);
|
||||
if (ret < 0) {
|
||||
if (ret == 0)
|
||||
ret = flush_write_bio(&epd);
|
||||
else
|
||||
end_write_bio(&epd, ret);
|
||||
return ret;
|
||||
}
|
||||
ret = flush_write_bio(&epd);
|
||||
|
||||
wbc_detach_inode(&wbc_writepages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,35 +7,6 @@
|
|||
#include <linux/refcount.h>
|
||||
#include "ulist.h"
|
||||
|
||||
/* bits for the extent state */
|
||||
#define EXTENT_DIRTY (1U << 0)
|
||||
#define EXTENT_UPTODATE (1U << 1)
|
||||
#define EXTENT_LOCKED (1U << 2)
|
||||
#define EXTENT_NEW (1U << 3)
|
||||
#define EXTENT_DELALLOC (1U << 4)
|
||||
#define EXTENT_DEFRAG (1U << 5)
|
||||
#define EXTENT_BOUNDARY (1U << 6)
|
||||
#define EXTENT_NODATASUM (1U << 7)
|
||||
#define EXTENT_CLEAR_META_RESV (1U << 8)
|
||||
#define EXTENT_NEED_WAIT (1U << 9)
|
||||
#define EXTENT_DAMAGED (1U << 10)
|
||||
#define EXTENT_NORESERVE (1U << 11)
|
||||
#define EXTENT_QGROUP_RESERVED (1U << 12)
|
||||
#define EXTENT_CLEAR_DATA_RESV (1U << 13)
|
||||
#define EXTENT_DELALLOC_NEW (1U << 14)
|
||||
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
|
||||
EXTENT_CLEAR_DATA_RESV)
|
||||
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
|
||||
|
||||
/*
|
||||
* Redefined bits above which are used only in the device allocation tree,
|
||||
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
|
||||
* / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
|
||||
* manipulation functions
|
||||
*/
|
||||
#define CHUNK_ALLOCATED EXTENT_DIRTY
|
||||
#define CHUNK_TRIMMED EXTENT_DEFRAG
|
||||
|
||||
/*
|
||||
* flags for bio submission. The high bits indicate the compression
|
||||
* type for this bio
|
||||
|
@ -89,12 +60,11 @@ enum {
|
|||
#define BITMAP_LAST_BYTE_MASK(nbits) \
|
||||
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
|
||||
|
||||
struct extent_state;
|
||||
struct btrfs_root;
|
||||
struct btrfs_inode;
|
||||
struct btrfs_io_bio;
|
||||
struct io_failure_record;
|
||||
|
||||
struct extent_io_tree;
|
||||
|
||||
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
|
||||
struct bio *bio, u64 bio_offset);
|
||||
|
@ -111,47 +81,6 @@ struct extent_io_ops {
|
|||
int mirror);
|
||||
};
|
||||
|
||||
enum {
|
||||
IO_TREE_FS_INFO_FREED_EXTENTS0,
|
||||
IO_TREE_FS_INFO_FREED_EXTENTS1,
|
||||
IO_TREE_INODE_IO,
|
||||
IO_TREE_INODE_IO_FAILURE,
|
||||
IO_TREE_RELOC_BLOCKS,
|
||||
IO_TREE_TRANS_DIRTY_PAGES,
|
||||
IO_TREE_ROOT_DIRTY_LOG_PAGES,
|
||||
IO_TREE_SELFTEST,
|
||||
};
|
||||
|
||||
struct extent_io_tree {
|
||||
struct rb_root state;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
void *private_data;
|
||||
u64 dirty_bytes;
|
||||
bool track_uptodate;
|
||||
|
||||
/* Who owns this io tree, should be one of IO_TREE_* */
|
||||
u8 owner;
|
||||
|
||||
spinlock_t lock;
|
||||
const struct extent_io_ops *ops;
|
||||
};
|
||||
|
||||
struct extent_state {
|
||||
u64 start;
|
||||
u64 end; /* inclusive */
|
||||
struct rb_node rb_node;
|
||||
|
||||
/* ADD NEW ELEMENTS AFTER THIS */
|
||||
wait_queue_head_t wq;
|
||||
refcount_t refs;
|
||||
unsigned state;
|
||||
|
||||
struct io_failure_record *failrec;
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
struct list_head leak_list;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define INLINE_EXTENT_BUFFER_PAGES 16
|
||||
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
|
||||
|
@ -259,152 +188,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
|
|||
u64 start, u64 len,
|
||||
int create);
|
||||
|
||||
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *tree, unsigned int owner,
|
||||
void *private_data);
|
||||
void extent_io_tree_release(struct extent_io_tree *tree);
|
||||
int try_release_extent_mapping(struct page *page, gfp_t mask);
|
||||
int try_release_extent_buffer(struct page *page);
|
||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
struct extent_state **cached);
|
||||
|
||||
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
return lock_extent_bits(tree, start, end, NULL);
|
||||
}
|
||||
|
||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||
get_extent_t *get_extent, int mirror_num);
|
||||
int __init extent_io_init(void);
|
||||
void __cold extent_io_exit(void);
|
||||
|
||||
u64 count_range_bits(struct extent_io_tree *tree,
|
||||
u64 *start, u64 search_end,
|
||||
u64 max_bytes, unsigned bits, int contig);
|
||||
|
||||
void free_extent_state(struct extent_state *state);
|
||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int filled,
|
||||
struct extent_state *cached_state);
|
||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, struct extent_changeset *changeset);
|
||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int wake, int delete,
|
||||
struct extent_state **cached);
|
||||
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, int wake, int delete,
|
||||
struct extent_state **cached, gfp_t mask,
|
||||
struct extent_changeset *changeset);
|
||||
|
||||
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
|
||||
}
|
||||
|
||||
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||
GFP_NOFS, NULL);
|
||||
}
|
||||
|
||||
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
|
||||
u64 start, u64 end, struct extent_state **cached)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||
GFP_ATOMIC, NULL);
|
||||
}
|
||||
|
||||
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned bits)
|
||||
{
|
||||
int wake = 0;
|
||||
|
||||
if (bits & EXTENT_LOCKED)
|
||||
wake = 1;
|
||||
|
||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
|
||||
}
|
||||
|
||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, struct extent_changeset *changeset);
|
||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, u64 *failed_start,
|
||||
struct extent_state **cached_state, gfp_t mask);
|
||||
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits);
|
||||
|
||||
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned bits)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state)
|
||||
{
|
||||
return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
||||
cached_state, GFP_NOFS, NULL);
|
||||
}
|
||||
|
||||
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
||||
NULL, mask);
|
||||
}
|
||||
|
||||
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end,
|
||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||
EXTENT_DO_ACCOUNTING, 0, 0, cached);
|
||||
}
|
||||
|
||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, unsigned clear_bits,
|
||||
struct extent_state **cached_state);
|
||||
|
||||
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, unsigned int extra_bits,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
return set_extent_bit(tree, start, end,
|
||||
EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
|
||||
NULL, cached_state, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state)
|
||||
{
|
||||
return set_extent_bit(tree, start, end,
|
||||
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
||||
NULL, cached_state, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
||||
u64 end)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
|
||||
GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
||||
cached_state, mask);
|
||||
}
|
||||
|
||||
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||
u64 *start_ret, u64 *end_ret, unsigned bits,
|
||||
struct extent_state **cached_state);
|
||||
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||
u64 *start_ret, u64 *end_ret, unsigned bits);
|
||||
int extent_invalidatepage(struct extent_io_tree *tree,
|
||||
struct page *page, unsigned long offset);
|
||||
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
|
||||
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
|
||||
int mode);
|
||||
|
@ -442,11 +230,6 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
|
|||
(eb->start >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void extent_buffer_get(struct extent_buffer *eb)
|
||||
{
|
||||
atomic_inc(&eb->refs);
|
||||
}
|
||||
|
||||
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
|
||||
{
|
||||
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
|
@ -508,10 +291,6 @@ struct btrfs_inode;
|
|||
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
|
||||
u64 length, u64 logical, struct page *page,
|
||||
unsigned int pg_offset, int mirror_num);
|
||||
int clean_io_failure(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *failure_tree,
|
||||
struct extent_io_tree *io_tree, u64 start,
|
||||
struct page *page, u64 ino, unsigned int pg_offset);
|
||||
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
||||
int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
|
||||
|
||||
|
@ -535,19 +314,12 @@ struct io_failure_record {
|
|||
};
|
||||
|
||||
|
||||
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
|
||||
u64 end);
|
||||
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
|
||||
struct io_failure_record **failrec_ret);
|
||||
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
|
||||
struct io_failure_record *failrec, int fail_mirror);
|
||||
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
|
||||
struct io_failure_record *failrec,
|
||||
struct page *page, int pg_offset, int icsum,
|
||||
bio_end_io_t *endio_func, void *data);
|
||||
int free_io_failure(struct extent_io_tree *failure_tree,
|
||||
struct extent_io_tree *io_tree,
|
||||
struct io_failure_record *rec);
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
bool find_lock_delalloc_range(struct inode *inode,
|
||||
struct page *locked_page, u64 *start,
|
||||
|
@ -555,5 +327,4 @@ bool find_lock_delalloc_range(struct inode *inode,
|
|||
#endif
|
||||
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
|
|||
ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
|
||||
prev->block_start != EXTENT_MAP_DELALLOC);
|
||||
|
||||
if (prev->map_lookup || next->map_lookup)
|
||||
ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
|
||||
test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
|
||||
|
||||
if (extent_map_end(prev) == next->start &&
|
||||
prev->flags == next->flags &&
|
||||
prev->bdev == next->bdev &&
|
||||
prev->map_lookup == next->map_lookup &&
|
||||
((next->block_start == EXTENT_MAP_HOLE &&
|
||||
prev->block_start == EXTENT_MAP_HOLE) ||
|
||||
(next->block_start == EXTENT_MAP_INLINE &&
|
||||
|
|
|
@ -42,15 +42,8 @@ struct extent_map {
|
|||
u64 block_len;
|
||||
u64 generation;
|
||||
unsigned long flags;
|
||||
union {
|
||||
struct block_device *bdev;
|
||||
|
||||
/*
|
||||
* used for chunk mappings
|
||||
* flags & EXTENT_FLAG_FS_MAPPING must be set
|
||||
*/
|
||||
struct map_lookup *map_lookup;
|
||||
};
|
||||
/* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
|
||||
struct map_lookup *map_lookup;
|
||||
refcount_t refs;
|
||||
unsigned int compress_type;
|
||||
struct list_head list;
|
||||
|
|
|
@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
|
|||
u8 type = btrfs_file_extent_type(leaf, fi);
|
||||
int compress_type = btrfs_file_extent_compression(leaf, fi);
|
||||
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
extent_start = key.offset;
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
|||
key.objectid = defrag->ino;
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, inode_root);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
goto cleanup;
|
||||
|
@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
|
|||
}
|
||||
|
||||
split->generation = gen;
|
||||
split->bdev = em->bdev;
|
||||
split->flags = flags;
|
||||
split->compress_type = em->compress_type;
|
||||
replace_extent_mapping(em_tree, em, split, modified);
|
||||
|
@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
|
|||
|
||||
split->start = start + len;
|
||||
split->len = em->start + em->len - (start + len);
|
||||
split->bdev = em->bdev;
|
||||
split->flags = flags;
|
||||
split->compress_type = em->compress_type;
|
||||
split->generation = gen;
|
||||
|
@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
|
|||
break;
|
||||
}
|
||||
|
||||
only_release_metadata = false;
|
||||
sector_offset = pos & (fs_info->sectorsize - 1);
|
||||
reserve_bytes = round_up(write_bytes + sector_offset,
|
||||
fs_info->sectorsize);
|
||||
|
@ -1791,7 +1790,6 @@ again:
|
|||
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
|
||||
lockend, EXTENT_NORESERVE, NULL,
|
||||
NULL, GFP_NOFS);
|
||||
only_release_metadata = false;
|
||||
}
|
||||
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
|
@ -1903,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
(iocb->ki_flags & IOCB_NOWAIT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!inode_trylock(inode)) {
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (!inode_trylock(inode))
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
inode_lock(inode);
|
||||
}
|
||||
|
||||
|
@ -2359,7 +2358,6 @@ out:
|
|||
hole_em->block_start = EXTENT_MAP_HOLE;
|
||||
hole_em->block_len = 0;
|
||||
hole_em->orig_block_len = 0;
|
||||
hole_em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
hole_em->compress_type = BTRFS_COMPRESS_NONE;
|
||||
hole_em->generation = trans->transid;
|
||||
|
||||
|
@ -3350,29 +3348,30 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
||||
static loff_t find_desired_extent(struct inode *inode, loff_t offset,
|
||||
int whence)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct extent_map *em = NULL;
|
||||
struct extent_state *cached_state = NULL;
|
||||
loff_t i_size = inode->i_size;
|
||||
u64 lockstart;
|
||||
u64 lockend;
|
||||
u64 start;
|
||||
u64 len;
|
||||
int ret = 0;
|
||||
|
||||
if (inode->i_size == 0)
|
||||
if (i_size == 0 || offset >= i_size)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* *offset can be negative, in this case we start finding DATA/HOLE from
|
||||
* offset can be negative, in this case we start finding DATA/HOLE from
|
||||
* the very start of the file.
|
||||
*/
|
||||
start = max_t(loff_t, 0, *offset);
|
||||
start = max_t(loff_t, 0, offset);
|
||||
|
||||
lockstart = round_down(start, fs_info->sectorsize);
|
||||
lockend = round_up(i_size_read(inode),
|
||||
fs_info->sectorsize);
|
||||
lockend = round_up(i_size, fs_info->sectorsize);
|
||||
if (lockend <= lockstart)
|
||||
lockend = lockstart + fs_info->sectorsize;
|
||||
lockend--;
|
||||
|
@ -3381,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||
&cached_state);
|
||||
|
||||
while (start < inode->i_size) {
|
||||
while (start < i_size) {
|
||||
em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
|
||||
if (IS_ERR(em)) {
|
||||
ret = PTR_ERR(em);
|
||||
|
@ -3404,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|||
cond_resched();
|
||||
}
|
||||
free_extent_map(em);
|
||||
if (!ret) {
|
||||
if (whence == SEEK_DATA && start >= inode->i_size)
|
||||
ret = -ENXIO;
|
||||
else
|
||||
*offset = min_t(loff_t, start, inode->i_size);
|
||||
}
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||
&cached_state);
|
||||
return ret;
|
||||
if (ret) {
|
||||
offset = ret;
|
||||
} else {
|
||||
if (whence == SEEK_DATA && start >= i_size)
|
||||
offset = -ENXIO;
|
||||
else
|
||||
offset = min_t(loff_t, start, i_size);
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
int ret;
|
||||
|
||||
inode_lock(inode);
|
||||
switch (whence) {
|
||||
case SEEK_END:
|
||||
case SEEK_CUR:
|
||||
offset = generic_file_llseek(file, offset, whence);
|
||||
goto out;
|
||||
default:
|
||||
return generic_file_llseek(file, offset, whence);
|
||||
case SEEK_DATA:
|
||||
case SEEK_HOLE:
|
||||
if (offset >= i_size_read(inode)) {
|
||||
inode_unlock(inode);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ret = find_desired_extent(inode, &offset, whence);
|
||||
if (ret) {
|
||||
inode_unlock(inode);
|
||||
return ret;
|
||||
}
|
||||
inode_lock_shared(inode);
|
||||
offset = find_desired_extent(inode, offset, whence);
|
||||
inode_unlock_shared(inode);
|
||||
break;
|
||||
}
|
||||
|
||||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
return offset;
|
||||
if (offset < 0)
|
||||
return offset;
|
||||
|
||||
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
}
|
||||
|
||||
static int btrfs_file_open(struct inode *inode, struct file *filp)
|
||||
|
|
|
@ -78,7 +78,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
|
|||
* sure NOFS is set to keep us from deadlocking.
|
||||
*/
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
|
||||
inode = btrfs_iget_path(fs_info->sb, &location, root, path);
|
||||
btrfs_release_path(path);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
if (IS_ERR(inode))
|
||||
|
@ -91,8 +91,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
|
|||
return inode;
|
||||
}
|
||||
|
||||
struct inode *lookup_free_space_inode(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
|
@ -107,7 +106,7 @@ struct inode *lookup_free_space_inode(
|
|||
return inode;
|
||||
|
||||
inode = __lookup_free_space_inode(fs_info->tree_root, path,
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
if (IS_ERR(inode))
|
||||
return inode;
|
||||
|
||||
|
@ -190,7 +189,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
int create_free_space_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
int ret;
|
||||
|
@ -201,7 +200,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
|
||||
return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
|
||||
ino, block_group->key.objectid);
|
||||
ino, block_group->start);
|
||||
}
|
||||
|
||||
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
|
||||
|
@ -224,7 +223,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
|
@ -385,6 +384,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode
|
|||
if (uptodate && !PageUptodate(page)) {
|
||||
btrfs_readpage(NULL, page);
|
||||
lock_page(page);
|
||||
if (page->mapping != inode->i_mapping) {
|
||||
btrfs_err(BTRFS_I(inode)->root->fs_info,
|
||||
"free space cache page truncated");
|
||||
io_ctl_drop_pages(io_ctl);
|
||||
return -EIO;
|
||||
}
|
||||
if (!PageUptodate(page)) {
|
||||
btrfs_err(BTRFS_I(inode)->root->fs_info,
|
||||
"error reading free space cache");
|
||||
|
@ -814,7 +819,7 @@ free_cache:
|
|||
goto out;
|
||||
}
|
||||
|
||||
int load_free_space_cache(struct btrfs_block_group_cache *block_group)
|
||||
int load_free_space_cache(struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
@ -822,7 +827,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
struct btrfs_path *path;
|
||||
int ret = 0;
|
||||
bool matched;
|
||||
u64 used = btrfs_block_group_used(&block_group->item);
|
||||
u64 used = block_group->used;
|
||||
|
||||
/*
|
||||
* If this block group has been marked to be cleared for one reason or
|
||||
|
@ -876,13 +881,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
spin_unlock(&block_group->lock);
|
||||
|
||||
ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
|
||||
path, block_group->key.objectid);
|
||||
path, block_group->start);
|
||||
btrfs_free_path(path);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
matched = (ctl->free_space == (block_group->key.offset - used -
|
||||
matched = (ctl->free_space == (block_group->length - used -
|
||||
block_group->bytes_super));
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
||||
|
@ -890,7 +895,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
__btrfs_remove_free_space_cache(ctl);
|
||||
btrfs_warn(fs_info,
|
||||
"block group %llu has wrong amount of free space",
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
ret = -1;
|
||||
}
|
||||
out:
|
||||
|
@ -903,7 +908,7 @@ out:
|
|||
|
||||
btrfs_warn(fs_info,
|
||||
"failed to load free space cache for block group %llu, rebuilding it now",
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
}
|
||||
|
||||
iput(inode);
|
||||
|
@ -913,7 +918,7 @@ out:
|
|||
static noinline_for_stack
|
||||
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
|
||||
struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
int *entries, int *bitmaps,
|
||||
struct list_head *bitmap_list)
|
||||
{
|
||||
|
@ -1041,7 +1046,7 @@ fail:
|
|||
}
|
||||
|
||||
static noinline_for_stack int write_pinned_extent_entries(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_io_ctl *io_ctl,
|
||||
int *entries)
|
||||
{
|
||||
|
@ -1061,9 +1066,9 @@ static noinline_for_stack int write_pinned_extent_entries(
|
|||
*/
|
||||
unpin = block_group->fs_info->pinned_extents;
|
||||
|
||||
start = block_group->key.objectid;
|
||||
start = block_group->start;
|
||||
|
||||
while (start < block_group->key.objectid + block_group->key.offset) {
|
||||
while (start < block_group->start + block_group->length) {
|
||||
ret = find_first_extent_bit(unpin, start,
|
||||
&extent_start, &extent_end,
|
||||
EXTENT_DIRTY, NULL);
|
||||
|
@ -1071,13 +1076,12 @@ static noinline_for_stack int write_pinned_extent_entries(
|
|||
return 0;
|
||||
|
||||
/* This pinned extent is out of our range */
|
||||
if (extent_start >= block_group->key.objectid +
|
||||
block_group->key.offset)
|
||||
if (extent_start >= block_group->start + block_group->length)
|
||||
return 0;
|
||||
|
||||
extent_start = max(extent_start, start);
|
||||
extent_end = min(block_group->key.objectid +
|
||||
block_group->key.offset, extent_end + 1);
|
||||
extent_end = min(block_group->start + block_group->length,
|
||||
extent_end + 1);
|
||||
len = extent_end - extent_start;
|
||||
|
||||
*entries += 1;
|
||||
|
@ -1141,7 +1145,7 @@ cleanup_write_cache_enospc(struct inode *inode,
|
|||
|
||||
static int __btrfs_wait_cache_io(struct btrfs_root *root,
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_io_ctl *io_ctl,
|
||||
struct btrfs_path *path, u64 offset)
|
||||
{
|
||||
|
@ -1168,7 +1172,7 @@ out:
|
|||
#ifdef DEBUG
|
||||
btrfs_err(root->fs_info,
|
||||
"failed to write free space cache for block group %llu",
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -1210,12 +1214,12 @@ static int btrfs_wait_cache_io_root(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
|
||||
block_group, &block_group->io_ctl,
|
||||
path, block_group->key.objectid);
|
||||
path, block_group->start);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1231,7 +1235,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
||||
struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_io_ctl *io_ctl,
|
||||
struct btrfs_trans_handle *trans)
|
||||
{
|
||||
|
@ -1369,7 +1373,7 @@ out_unlock:
|
|||
}
|
||||
|
||||
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
|
@ -1394,7 +1398,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
|
|||
#ifdef DEBUG
|
||||
btrfs_err(fs_info,
|
||||
"failed to write free space cache for block group %llu",
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
#endif
|
||||
spin_lock(&block_group->lock);
|
||||
block_group->disk_cache_state = BTRFS_DC_ERROR;
|
||||
|
@ -1647,11 +1651,11 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
|
|||
|
||||
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group = ctl->private;
|
||||
struct btrfs_block_group *block_group = ctl->private;
|
||||
u64 max_bytes;
|
||||
u64 bitmap_bytes;
|
||||
u64 extent_bytes;
|
||||
u64 size = block_group->key.offset;
|
||||
u64 size = block_group->length;
|
||||
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
|
||||
u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
|
||||
|
||||
|
@ -1991,7 +1995,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_free_space *info)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group = ctl->private;
|
||||
struct btrfs_block_group *block_group = ctl->private;
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
bool forced = false;
|
||||
|
||||
|
@ -2028,7 +2032,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
* so allow those block groups to still be allowed to have a bitmap
|
||||
* entry.
|
||||
*/
|
||||
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
|
||||
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -2043,7 +2047,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
struct btrfs_free_space *info)
|
||||
{
|
||||
struct btrfs_free_space *bitmap_info;
|
||||
struct btrfs_block_group_cache *block_group = NULL;
|
||||
struct btrfs_block_group *block_group = NULL;
|
||||
int added = 0;
|
||||
u64 bytes, offset, bytes_added;
|
||||
int ret;
|
||||
|
@ -2380,7 +2384,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_add_free_space(struct btrfs_block_group *block_group,
|
||||
u64 bytenr, u64 size)
|
||||
{
|
||||
return __btrfs_add_free_space(block_group->fs_info,
|
||||
|
@ -2388,7 +2392,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
|||
bytenr, size);
|
||||
}
|
||||
|
||||
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
@ -2478,7 +2482,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
||||
void btrfs_dump_free_space(struct btrfs_block_group *block_group,
|
||||
u64 bytes)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
|
@ -2503,14 +2507,14 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
|||
"%d blocks of free space at or bigger than bytes is", count);
|
||||
}
|
||||
|
||||
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
|
||||
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
||||
spin_lock_init(&ctl->tree_lock);
|
||||
ctl->unit = fs_info->sectorsize;
|
||||
ctl->start = block_group->key.objectid;
|
||||
ctl->start = block_group->start;
|
||||
ctl->private = block_group;
|
||||
ctl->op = &free_space_op;
|
||||
INIT_LIST_HEAD(&ctl->trimming_ranges);
|
||||
|
@ -2532,7 +2536,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
|
|||
*/
|
||||
static int
|
||||
__btrfs_return_cluster_to_free_space(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
@ -2598,7 +2602,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
|
|||
spin_unlock(&ctl->tree_lock);
|
||||
}
|
||||
|
||||
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
|
||||
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
struct btrfs_free_cluster *cluster;
|
||||
|
@ -2620,7 +2624,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
|
||||
}
|
||||
|
||||
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
|
||||
u64 offset, u64 bytes, u64 empty_size,
|
||||
u64 *max_extent_size)
|
||||
{
|
||||
|
@ -2674,7 +2678,7 @@ out:
|
|||
* cluster and remove the cluster from it.
|
||||
*/
|
||||
int btrfs_return_cluster_to_free_space(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl;
|
||||
|
@ -2708,7 +2712,7 @@ int btrfs_return_cluster_to_free_space(
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
|
||||
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
struct btrfs_free_space *entry,
|
||||
u64 bytes, u64 min_start,
|
||||
|
@ -2741,7 +2745,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
* if it couldn't find anything suitably large, or a logical disk offset
|
||||
* if things worked out
|
||||
*/
|
||||
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
|
||||
u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster, u64 bytes,
|
||||
u64 min_start, u64 *max_extent_size)
|
||||
{
|
||||
|
@ -2827,7 +2831,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
|
||||
static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_space *entry,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
u64 offset, u64 bytes,
|
||||
|
@ -2909,7 +2913,7 @@ again:
|
|||
* extent of cont1_bytes, and other clusters of at least min_bytes.
|
||||
*/
|
||||
static noinline int
|
||||
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
||||
setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
struct list_head *bitmaps, u64 offset, u64 bytes,
|
||||
u64 cont1_bytes, u64 min_bytes)
|
||||
|
@ -3000,7 +3004,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
* that we have already failed to find extents that will work.
|
||||
*/
|
||||
static noinline int
|
||||
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
||||
setup_cluster_bitmap(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
struct list_head *bitmaps, u64 offset, u64 bytes,
|
||||
u64 cont1_bytes, u64 min_bytes)
|
||||
|
@ -3050,7 +3054,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
* returns zero and sets up cluster if things worked out, otherwise
|
||||
* it returns -enospc
|
||||
*/
|
||||
int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
u64 offset, u64 bytes, u64 empty_size)
|
||||
{
|
||||
|
@ -3141,7 +3145,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
|
|||
cluster->block_group = NULL;
|
||||
}
|
||||
|
||||
static int do_trimming(struct btrfs_block_group_cache *block_group,
|
||||
static int do_trimming(struct btrfs_block_group *block_group,
|
||||
u64 *total_trimmed, u64 start, u64 bytes,
|
||||
u64 reserved_start, u64 reserved_bytes,
|
||||
struct btrfs_trim_range *trim_entry)
|
||||
|
@ -3186,7 +3190,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
|
||||
static int trim_no_bitmap(struct btrfs_block_group *block_group,
|
||||
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
@ -3271,7 +3275,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
|
||||
static int trim_bitmaps(struct btrfs_block_group *block_group,
|
||||
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
|
@ -3352,12 +3356,12 @@ next:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
|
||||
void btrfs_get_block_group_trimming(struct btrfs_block_group *cache)
|
||||
{
|
||||
atomic_inc(&cache->trimming);
|
||||
}
|
||||
|
||||
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
|
||||
void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
struct extent_map_tree *em_tree;
|
||||
|
@ -3373,7 +3377,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
|
|||
mutex_lock(&fs_info->chunk_mutex);
|
||||
em_tree = &fs_info->mapping_tree;
|
||||
write_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
|
||||
em = lookup_extent_mapping(em_tree, block_group->start,
|
||||
1);
|
||||
BUG_ON(!em); /* logic error, can't happen */
|
||||
remove_extent_mapping(em_tree, em);
|
||||
|
@ -3392,7 +3396,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
|
|||
}
|
||||
}
|
||||
|
||||
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_trim_block_group(struct btrfs_block_group *block_group,
|
||||
u64 *trimmed, u64 start, u64 end, u64 minlen)
|
||||
{
|
||||
int ret;
|
||||
|
@ -3590,7 +3594,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
|
|||
* how the free space cache loading stuff works, so you can get really weird
|
||||
* configurations.
|
||||
*/
|
||||
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
|
||||
int test_add_free_space_entry(struct btrfs_block_group *cache,
|
||||
u64 offset, u64 bytes, bool bitmap)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
|
||||
|
@ -3658,7 +3662,7 @@ again:
|
|||
* just used to check the absence of space, so if there is free space in the
|
||||
* range at all we will return 1.
|
||||
*/
|
||||
int test_check_exists(struct btrfs_block_group_cache *cache,
|
||||
int test_check_exists(struct btrfs_block_group *cache,
|
||||
u64 offset, u64 bytes)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
|
||||
|
|
|
@ -50,24 +50,23 @@ struct btrfs_io_ctl {
|
|||
unsigned check_crcs:1;
|
||||
};
|
||||
|
||||
struct inode *lookup_free_space_inode(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
int create_free_space_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
|
||||
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv);
|
||||
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct inode *inode);
|
||||
int load_free_space_cache(struct btrfs_block_group_cache *block_group);
|
||||
int load_free_space_cache(struct btrfs_block_group *block_group);
|
||||
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
struct inode *lookup_free_ino_inode(struct btrfs_root *root,
|
||||
struct btrfs_path *path);
|
||||
|
@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
|
|||
struct btrfs_path *path,
|
||||
struct inode *inode);
|
||||
|
||||
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
|
||||
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group);
|
||||
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_free_space_ctl *ctl,
|
||||
u64 bytenr, u64 size);
|
||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_add_free_space(struct btrfs_block_group *block_group,
|
||||
u64 bytenr, u64 size);
|
||||
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
|
||||
u64 bytenr, u64 size);
|
||||
void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
|
||||
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
|
||||
*block_group);
|
||||
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group);
|
||||
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
|
||||
u64 offset, u64 bytes, u64 empty_size,
|
||||
u64 *max_extent_size);
|
||||
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
|
||||
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
|
||||
void btrfs_dump_free_space(struct btrfs_block_group *block_group,
|
||||
u64 bytes);
|
||||
int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster,
|
||||
u64 offset, u64 bytes, u64 empty_size);
|
||||
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
|
||||
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
|
||||
u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster, u64 bytes,
|
||||
u64 min_start, u64 *max_extent_size);
|
||||
int btrfs_return_cluster_to_free_space(
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_free_cluster *cluster);
|
||||
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
||||
int btrfs_trim_block_group(struct btrfs_block_group *block_group,
|
||||
u64 *trimmed, u64 start, u64 end, u64 minlen);
|
||||
|
||||
/* Support functions for running our sanity tests */
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
|
||||
int test_add_free_space_entry(struct btrfs_block_group *cache,
|
||||
u64 offset, u64 bytes, bool bitmap);
|
||||
int test_check_exists(struct btrfs_block_group_cache *cache,
|
||||
u64 offset, u64 bytes);
|
||||
int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
#include "block-group.h"
|
||||
|
||||
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
|
||||
void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
|
||||
void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
|
||||
{
|
||||
u32 bitmap_range;
|
||||
size_t bitmap_size;
|
||||
|
@ -27,8 +27,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
|
|||
* exceeds that required for using bitmaps.
|
||||
*/
|
||||
bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
|
||||
num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
|
||||
bitmap_range);
|
||||
num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range);
|
||||
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
|
||||
total_bitmap_size = num_bitmaps * bitmap_size;
|
||||
cache->bitmap_high_thresh = div_u64(total_bitmap_size,
|
||||
|
@ -45,7 +44,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
|
|||
}
|
||||
|
||||
static int add_new_free_space_info(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_root *root = trans->fs_info->free_space_root;
|
||||
|
@ -54,9 +53,9 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
|
|||
struct extent_buffer *leaf;
|
||||
int ret;
|
||||
|
||||
key.objectid = block_group->key.objectid;
|
||||
key.objectid = block_group->start;
|
||||
key.type = BTRFS_FREE_SPACE_INFO_KEY;
|
||||
key.offset = block_group->key.offset;
|
||||
key.offset = block_group->length;
|
||||
|
||||
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
|
||||
if (ret)
|
||||
|
@ -78,7 +77,7 @@ out:
|
|||
EXPORT_FOR_TESTS
|
||||
struct btrfs_free_space_info *search_free_space_info(
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, int cow)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
|
@ -86,16 +85,16 @@ struct btrfs_free_space_info *search_free_space_info(
|
|||
struct btrfs_key key;
|
||||
int ret;
|
||||
|
||||
key.objectid = block_group->key.objectid;
|
||||
key.objectid = block_group->start;
|
||||
key.type = BTRFS_FREE_SPACE_INFO_KEY;
|
||||
key.offset = block_group->key.offset;
|
||||
key.offset = block_group->length;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
if (ret != 0) {
|
||||
btrfs_warn(fs_info, "missing free space info for %llu",
|
||||
block_group->key.objectid);
|
||||
block_group->start);
|
||||
ASSERT(0);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
@ -180,7 +179,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
|
|||
|
||||
EXPORT_FOR_TESTS
|
||||
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
|
@ -197,7 +196,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
|||
int done = 0, nr;
|
||||
int ret;
|
||||
|
||||
bitmap_size = free_space_bitmap_size(block_group->key.offset,
|
||||
bitmap_size = free_space_bitmap_size(block_group->length,
|
||||
fs_info->sectorsize);
|
||||
bitmap = alloc_bitmap(bitmap_size);
|
||||
if (!bitmap) {
|
||||
|
@ -205,8 +204,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
start = block_group->key.objectid;
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
start = block_group->start;
|
||||
end = block_group->start + block_group->length;
|
||||
|
||||
key.objectid = end - 1;
|
||||
key.type = (u8)-1;
|
||||
|
@ -224,8 +223,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
|||
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
|
||||
|
||||
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
|
||||
ASSERT(found_key.objectid == block_group->key.objectid);
|
||||
ASSERT(found_key.offset == block_group->key.offset);
|
||||
ASSERT(found_key.objectid == block_group->start);
|
||||
ASSERT(found_key.offset == block_group->length);
|
||||
done = 1;
|
||||
break;
|
||||
} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
|
||||
|
@ -271,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
|||
if (extent_count != expected_extent_count) {
|
||||
btrfs_err(fs_info,
|
||||
"incorrect extent count for %llu; counted %u, expected %u",
|
||||
block_group->key.objectid, extent_count,
|
||||
block_group->start, extent_count,
|
||||
expected_extent_count);
|
||||
ASSERT(0);
|
||||
ret = -EIO;
|
||||
|
@ -320,7 +319,7 @@ out:
|
|||
|
||||
EXPORT_FOR_TESTS
|
||||
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
|
@ -336,7 +335,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
|||
int done = 0, nr;
|
||||
int ret;
|
||||
|
||||
bitmap_size = free_space_bitmap_size(block_group->key.offset,
|
||||
bitmap_size = free_space_bitmap_size(block_group->length,
|
||||
fs_info->sectorsize);
|
||||
bitmap = alloc_bitmap(bitmap_size);
|
||||
if (!bitmap) {
|
||||
|
@ -344,8 +343,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
start = block_group->key.objectid;
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
start = block_group->start;
|
||||
end = block_group->start + block_group->length;
|
||||
|
||||
key.objectid = end - 1;
|
||||
key.type = (u8)-1;
|
||||
|
@ -363,8 +362,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
|||
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
|
||||
|
||||
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
|
||||
ASSERT(found_key.objectid == block_group->key.objectid);
|
||||
ASSERT(found_key.offset == block_group->key.offset);
|
||||
ASSERT(found_key.objectid == block_group->start);
|
||||
ASSERT(found_key.offset == block_group->length);
|
||||
done = 1;
|
||||
break;
|
||||
} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
|
||||
|
@ -413,7 +412,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
|||
btrfs_mark_buffer_dirty(leaf);
|
||||
btrfs_release_path(path);
|
||||
|
||||
nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize);
|
||||
nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
|
||||
start_bit = find_next_bit_le(bitmap, nrbits, 0);
|
||||
|
||||
while (start_bit < nrbits) {
|
||||
|
@ -437,7 +436,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
|||
if (extent_count != expected_extent_count) {
|
||||
btrfs_err(fs_info,
|
||||
"incorrect extent count for %llu; counted %u, expected %u",
|
||||
block_group->key.objectid, extent_count,
|
||||
block_group->start, extent_count,
|
||||
expected_extent_count);
|
||||
ASSERT(0);
|
||||
ret = -EIO;
|
||||
|
@ -453,7 +452,7 @@ out:
|
|||
}
|
||||
|
||||
static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path,
|
||||
int new_extents)
|
||||
{
|
||||
|
@ -491,7 +490,7 @@ out:
|
|||
}
|
||||
|
||||
EXPORT_FOR_TESTS
|
||||
int free_space_test_bit(struct btrfs_block_group_cache *block_group,
|
||||
int free_space_test_bit(struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 offset)
|
||||
{
|
||||
struct extent_buffer *leaf;
|
||||
|
@ -513,7 +512,7 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
|
|||
return !!extent_buffer_test_bit(leaf, ptr, i);
|
||||
}
|
||||
|
||||
static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
|
||||
static void free_space_set_bits(struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 *start, u64 *size,
|
||||
int bit)
|
||||
{
|
||||
|
@ -581,7 +580,7 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
|
|||
* the bitmap.
|
||||
*/
|
||||
static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path,
|
||||
u64 start, u64 size, int remove)
|
||||
{
|
||||
|
@ -597,7 +596,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
|
|||
* Read the bit for the block immediately before the extent of space if
|
||||
* that block is within the block group.
|
||||
*/
|
||||
if (start > block_group->key.objectid) {
|
||||
if (start > block_group->start) {
|
||||
u64 prev_block = start - block_group->fs_info->sectorsize;
|
||||
|
||||
key.objectid = prev_block;
|
||||
|
@ -649,7 +648,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
|
|||
* Read the bit for the block immediately after the extent of space if
|
||||
* that block is within the block group.
|
||||
*/
|
||||
if (end < block_group->key.objectid + block_group->key.offset) {
|
||||
if (end < block_group->start + block_group->length) {
|
||||
/* The next block may be in the next bitmap. */
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
if (end >= key.objectid + key.offset) {
|
||||
|
@ -694,7 +693,7 @@ out:
|
|||
}
|
||||
|
||||
static int remove_free_space_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path,
|
||||
u64 start, u64 size)
|
||||
{
|
||||
|
@ -781,7 +780,7 @@ out:
|
|||
|
||||
EXPORT_FOR_TESTS
|
||||
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 start, u64 size)
|
||||
{
|
||||
struct btrfs_free_space_info *info;
|
||||
|
@ -812,7 +811,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
u64 start, u64 size)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
|
||||
|
@ -846,7 +845,7 @@ out:
|
|||
}
|
||||
|
||||
static int add_free_space_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path,
|
||||
u64 start, u64 size)
|
||||
{
|
||||
|
@ -880,7 +879,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
|
|||
new_key.offset = size;
|
||||
|
||||
/* Search for a neighbor on the left. */
|
||||
if (start == block_group->key.objectid)
|
||||
if (start == block_group->start)
|
||||
goto right;
|
||||
key.objectid = start - 1;
|
||||
key.type = (u8)-1;
|
||||
|
@ -900,8 +899,8 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
|
|||
|
||||
found_start = key.objectid;
|
||||
found_end = key.objectid + key.offset;
|
||||
ASSERT(found_start >= block_group->key.objectid &&
|
||||
found_end > block_group->key.objectid);
|
||||
ASSERT(found_start >= block_group->start &&
|
||||
found_end > block_group->start);
|
||||
ASSERT(found_start < start && found_end <= start);
|
||||
|
||||
/*
|
||||
|
@ -920,7 +919,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
|
|||
|
||||
right:
|
||||
/* Search for a neighbor on the right. */
|
||||
if (end == block_group->key.objectid + block_group->key.offset)
|
||||
if (end == block_group->start + block_group->length)
|
||||
goto insert;
|
||||
key.objectid = end;
|
||||
key.type = (u8)-1;
|
||||
|
@ -940,8 +939,8 @@ right:
|
|||
|
||||
found_start = key.objectid;
|
||||
found_end = key.objectid + key.offset;
|
||||
ASSERT(found_start >= block_group->key.objectid &&
|
||||
found_end > block_group->key.objectid);
|
||||
ASSERT(found_start >= block_group->start &&
|
||||
found_end > block_group->start);
|
||||
ASSERT((found_start < start && found_end <= start) ||
|
||||
(found_start >= end && found_end > end));
|
||||
|
||||
|
@ -974,7 +973,7 @@ out:
|
|||
|
||||
EXPORT_FOR_TESTS
|
||||
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 start, u64 size)
|
||||
{
|
||||
struct btrfs_free_space_info *info;
|
||||
|
@ -1005,7 +1004,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
u64 start, u64 size)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
|
||||
|
@ -1043,7 +1042,7 @@ out:
|
|||
* through the normal add/remove hooks.
|
||||
*/
|
||||
static int populate_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_root *extent_root = trans->fs_info->extent_root;
|
||||
struct btrfs_path *path, *path2;
|
||||
|
@ -1075,7 +1074,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
* BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
|
||||
* contained in.
|
||||
*/
|
||||
key.objectid = block_group->key.objectid;
|
||||
key.objectid = block_group->start;
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
|
@ -1084,8 +1083,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
goto out_locked;
|
||||
ASSERT(ret == 0);
|
||||
|
||||
start = block_group->key.objectid;
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
start = block_group->start;
|
||||
end = block_group->start + block_group->length;
|
||||
while (1) {
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
|
||||
|
@ -1109,7 +1108,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
else
|
||||
start += key.offset;
|
||||
} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
|
||||
if (key.objectid != block_group->key.objectid)
|
||||
if (key.objectid != block_group->start)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1140,7 +1139,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
struct btrfs_root *free_space_root;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct rb_node *node;
|
||||
int ret;
|
||||
|
||||
|
@ -1159,7 +1158,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
|
|||
|
||||
node = rb_first(&fs_info->block_group_cache_tree);
|
||||
while (node) {
|
||||
block_group = rb_entry(node, struct btrfs_block_group_cache,
|
||||
block_group = rb_entry(node, struct btrfs_block_group,
|
||||
cache_node);
|
||||
ret = populate_free_space_tree(trans, block_group);
|
||||
if (ret)
|
||||
|
@ -1265,7 +1264,7 @@ abort:
|
|||
}
|
||||
|
||||
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1277,12 +1276,12 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
|
||||
return __add_to_free_space_tree(trans, block_group, path,
|
||||
block_group->key.objectid,
|
||||
block_group->key.offset);
|
||||
block_group->start,
|
||||
block_group->length);
|
||||
}
|
||||
|
||||
int add_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_path *path = NULL;
|
||||
|
@ -1312,7 +1311,7 @@ out:
|
|||
}
|
||||
|
||||
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_root *root = trans->fs_info->free_space_root;
|
||||
struct btrfs_path *path;
|
||||
|
@ -1336,8 +1335,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
start = block_group->key.objectid;
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
start = block_group->start;
|
||||
end = block_group->start + block_group->length;
|
||||
|
||||
key.objectid = end - 1;
|
||||
key.type = (u8)-1;
|
||||
|
@ -1355,8 +1354,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
|
|||
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
|
||||
|
||||
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
|
||||
ASSERT(found_key.objectid == block_group->key.objectid);
|
||||
ASSERT(found_key.offset == block_group->key.offset);
|
||||
ASSERT(found_key.objectid == block_group->start);
|
||||
ASSERT(found_key.offset == block_group->length);
|
||||
done = 1;
|
||||
nr++;
|
||||
path->slots[0]--;
|
||||
|
@ -1391,7 +1390,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
|
|||
struct btrfs_path *path,
|
||||
u32 expected_extent_count)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_key key;
|
||||
|
@ -1407,7 +1406,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
|
|||
fs_info = block_group->fs_info;
|
||||
root = fs_info->free_space_root;
|
||||
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
end = block_group->start + block_group->length;
|
||||
|
||||
while (1) {
|
||||
ret = btrfs_next_item(root, path);
|
||||
|
@ -1454,7 +1453,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
|
|||
if (extent_count != expected_extent_count) {
|
||||
btrfs_err(fs_info,
|
||||
"incorrect extent count for %llu; counted %u, expected %u",
|
||||
block_group->key.objectid, extent_count,
|
||||
block_group->start, extent_count,
|
||||
expected_extent_count);
|
||||
ASSERT(0);
|
||||
ret = -EIO;
|
||||
|
@ -1472,7 +1471,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
|
|||
struct btrfs_path *path,
|
||||
u32 expected_extent_count)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_key key;
|
||||
|
@ -1485,7 +1484,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
|
|||
fs_info = block_group->fs_info;
|
||||
root = fs_info->free_space_root;
|
||||
|
||||
end = block_group->key.objectid + block_group->key.offset;
|
||||
end = block_group->start + block_group->length;
|
||||
|
||||
while (1) {
|
||||
ret = btrfs_next_item(root, path);
|
||||
|
@ -1516,7 +1515,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
|
|||
if (extent_count != expected_extent_count) {
|
||||
btrfs_err(fs_info,
|
||||
"incorrect extent count for %llu; counted %u, expected %u",
|
||||
block_group->key.objectid, extent_count,
|
||||
block_group->start, extent_count,
|
||||
expected_extent_count);
|
||||
ASSERT(0);
|
||||
ret = -EIO;
|
||||
|
@ -1532,7 +1531,7 @@ out:
|
|||
|
||||
int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_free_space_info *info;
|
||||
struct btrfs_path *path;
|
||||
u32 extent_count, flags;
|
||||
|
|
|
@ -16,14 +16,14 @@ struct btrfs_caching_control;
|
|||
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
|
||||
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
|
||||
|
||||
void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group);
|
||||
void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
|
||||
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
|
||||
int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
|
||||
int add_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group);
|
||||
struct btrfs_block_group *block_group);
|
||||
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group);
|
||||
struct btrfs_block_group *block_group);
|
||||
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
u64 start, u64 size);
|
||||
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
|
@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
|||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
struct btrfs_free_space_info *
|
||||
search_free_space_info(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, int cow);
|
||||
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 start, u64 size);
|
||||
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 start, u64 size);
|
||||
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path);
|
||||
int free_space_test_bit(struct btrfs_block_group_cache *block_group,
|
||||
int free_space_test_bit(struct btrfs_block_group *block_group,
|
||||
struct btrfs_path *path, u64 offset);
|
||||
#endif
|
||||
|
||||
|
|
170
fs/btrfs/inode.c
170
fs/btrfs/inode.c
|
@ -368,6 +368,7 @@ struct async_chunk {
|
|||
u64 end;
|
||||
unsigned int write_flags;
|
||||
struct list_head extents;
|
||||
struct cgroup_subsys_state *blkcg_css;
|
||||
struct btrfs_work work;
|
||||
atomic_t *pending;
|
||||
};
|
||||
|
@ -712,10 +713,12 @@ cleanup_and_bail_uncompressed:
|
|||
* to our extent and set things up for the async work queue to run
|
||||
* cow_file_range to do the normal delalloc dance.
|
||||
*/
|
||||
if (page_offset(async_chunk->locked_page) >= start &&
|
||||
page_offset(async_chunk->locked_page) <= end)
|
||||
if (async_chunk->locked_page &&
|
||||
(page_offset(async_chunk->locked_page) >= start &&
|
||||
page_offset(async_chunk->locked_page)) <= end) {
|
||||
__set_page_dirty_nobuffers(async_chunk->locked_page);
|
||||
/* unlocked later on in the async handlers */
|
||||
}
|
||||
|
||||
if (redirty)
|
||||
extent_range_redirty_for_io(inode, start, end);
|
||||
|
@ -795,7 +798,7 @@ retry:
|
|||
async_extent->start +
|
||||
async_extent->ram_size - 1,
|
||||
WB_SYNC_ALL);
|
||||
else if (ret)
|
||||
else if (ret && async_chunk->locked_page)
|
||||
unlock_page(async_chunk->locked_page);
|
||||
kfree(async_extent);
|
||||
cond_resched();
|
||||
|
@ -878,7 +881,8 @@ retry:
|
|||
ins.objectid,
|
||||
ins.offset, async_extent->pages,
|
||||
async_extent->nr_pages,
|
||||
async_chunk->write_flags)) {
|
||||
async_chunk->write_flags,
|
||||
async_chunk->blkcg_css)) {
|
||||
struct page *p = async_extent->pages[0];
|
||||
const u64 start = async_extent->start;
|
||||
const u64 end = start + async_extent->ram_size - 1;
|
||||
|
@ -1196,6 +1200,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
|
|||
async_chunk = container_of(work, struct async_chunk, work);
|
||||
if (async_chunk->inode)
|
||||
btrfs_add_delayed_iput(async_chunk->inode);
|
||||
if (async_chunk->blkcg_css)
|
||||
css_put(async_chunk->blkcg_css);
|
||||
/*
|
||||
* Since the pointer to 'pending' is at the beginning of the array of
|
||||
* async_chunk's, freeing it ensures the whole array has been freed.
|
||||
|
@ -1204,12 +1210,14 @@ static noinline void async_cow_free(struct btrfs_work *work)
|
|||
kvfree(async_chunk->pending);
|
||||
}
|
||||
|
||||
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
|
||||
static int cow_file_range_async(struct inode *inode,
|
||||
struct writeback_control *wbc,
|
||||
struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written,
|
||||
unsigned int write_flags)
|
||||
unsigned long *nr_written)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
|
||||
struct async_cow *ctx;
|
||||
struct async_chunk *async_chunk;
|
||||
unsigned long nr_pages;
|
||||
|
@ -1218,6 +1226,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
|
|||
int i;
|
||||
bool should_compress;
|
||||
unsigned nofs_flag;
|
||||
const unsigned int write_flags = wbc_to_write_flags(wbc);
|
||||
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
|
||||
|
@ -1264,14 +1273,45 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
|
|||
async_chunk[i].inode = inode;
|
||||
async_chunk[i].start = start;
|
||||
async_chunk[i].end = cur_end;
|
||||
async_chunk[i].locked_page = locked_page;
|
||||
async_chunk[i].write_flags = write_flags;
|
||||
INIT_LIST_HEAD(&async_chunk[i].extents);
|
||||
|
||||
btrfs_init_work(&async_chunk[i].work,
|
||||
btrfs_delalloc_helper,
|
||||
async_cow_start, async_cow_submit,
|
||||
async_cow_free);
|
||||
/*
|
||||
* The locked_page comes all the way from writepage and its
|
||||
* the original page we were actually given. As we spread
|
||||
* this large delalloc region across multiple async_chunk
|
||||
* structs, only the first struct needs a pointer to locked_page
|
||||
*
|
||||
* This way we don't need racey decisions about who is supposed
|
||||
* to unlock it.
|
||||
*/
|
||||
if (locked_page) {
|
||||
/*
|
||||
* Depending on the compressibility, the pages might or
|
||||
* might not go through async. We want all of them to
|
||||
* be accounted against wbc once. Let's do it here
|
||||
* before the paths diverge. wbc accounting is used
|
||||
* only for foreign writeback detection and doesn't
|
||||
* need full accuracy. Just account the whole thing
|
||||
* against the first page.
|
||||
*/
|
||||
wbc_account_cgroup_owner(wbc, locked_page,
|
||||
cur_end - start);
|
||||
async_chunk[i].locked_page = locked_page;
|
||||
locked_page = NULL;
|
||||
} else {
|
||||
async_chunk[i].locked_page = NULL;
|
||||
}
|
||||
|
||||
if (blkcg_css != blkcg_root_css) {
|
||||
css_get(blkcg_css);
|
||||
async_chunk[i].blkcg_css = blkcg_css;
|
||||
} else {
|
||||
async_chunk[i].blkcg_css = NULL;
|
||||
}
|
||||
|
||||
btrfs_init_work(&async_chunk[i].work, async_cow_start,
|
||||
async_cow_submit, async_cow_free);
|
||||
|
||||
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
|
||||
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
|
||||
|
@ -1697,7 +1737,6 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
|
|||
{
|
||||
int ret;
|
||||
int force_cow = need_force_cow(inode, start, end);
|
||||
unsigned int write_flags = wbc_to_write_flags(wbc);
|
||||
|
||||
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
|
||||
ret = run_delalloc_nocow(inode, locked_page, start, end,
|
||||
|
@ -1712,9 +1751,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
|
|||
} else {
|
||||
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
ret = cow_file_range_async(inode, locked_page, start, end,
|
||||
page_started, nr_written,
|
||||
write_flags);
|
||||
ret = cow_file_range_async(inode, wbc, locked_page, start, end,
|
||||
page_started, nr_written);
|
||||
}
|
||||
if (ret)
|
||||
btrfs_cleanup_ordered_extents(inode, locked_page, start,
|
||||
|
@ -2110,7 +2148,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
|
|||
}
|
||||
|
||||
mapit:
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
|
||||
out:
|
||||
if (ret) {
|
||||
|
@ -2214,12 +2252,16 @@ again:
|
|||
mapping_set_error(page->mapping, ret);
|
||||
end_extent_writepage(page, ret, page_start, page_end);
|
||||
ClearPageChecked(page);
|
||||
goto out;
|
||||
goto out_reserved;
|
||||
}
|
||||
|
||||
ClearPageChecked(page);
|
||||
set_page_dirty(page);
|
||||
out_reserved:
|
||||
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
|
||||
if (ret)
|
||||
btrfs_delalloc_release_space(inode, data_reserved, page_start,
|
||||
PAGE_SIZE, true);
|
||||
out:
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
||||
&cached_state);
|
||||
|
@ -2260,8 +2302,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
|
|||
|
||||
SetPageChecked(page);
|
||||
get_page(page);
|
||||
btrfs_init_work(&fixup->work, btrfs_fixup_helper,
|
||||
btrfs_writepage_fixup_worker, NULL, NULL);
|
||||
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
|
||||
fixup->page = page;
|
||||
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
|
||||
return -EBUSY;
|
||||
|
@ -2675,7 +2716,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
if (IS_ERR(inode)) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, index);
|
||||
return 0;
|
||||
|
@ -2999,7 +3040,7 @@ out_kfree:
|
|||
static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, start);
|
||||
ASSERT(cache);
|
||||
|
@ -3027,7 +3068,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||
int compress_type = 0;
|
||||
int ret = 0;
|
||||
u64 logical_len = ordered_extent->len;
|
||||
bool nolock;
|
||||
bool freespace_inode;
|
||||
bool truncated = false;
|
||||
bool range_locked = false;
|
||||
bool clear_new_delalloc_bytes = false;
|
||||
|
@ -3038,7 +3079,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||
!test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
|
||||
clear_new_delalloc_bytes = true;
|
||||
|
||||
nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
|
||||
freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
|
||||
|
||||
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
|
||||
ret = -EIO;
|
||||
|
@ -3069,8 +3110,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||
btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
|
||||
ordered_extent->len);
|
||||
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
|
||||
if (nolock)
|
||||
trans = btrfs_join_transaction_nolock(root);
|
||||
if (freespace_inode)
|
||||
trans = btrfs_join_transaction_spacecache(root);
|
||||
else
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans)) {
|
||||
|
@ -3104,8 +3145,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||
EXTENT_DEFRAG, 0, 0, &cached_state);
|
||||
}
|
||||
|
||||
if (nolock)
|
||||
trans = btrfs_join_transaction_nolock(root);
|
||||
if (freespace_inode)
|
||||
trans = btrfs_join_transaction_spacecache(root);
|
||||
else
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans)) {
|
||||
|
@ -3254,7 +3295,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
|
|||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_ordered_extent *ordered_extent = NULL;
|
||||
struct btrfs_workqueue *wq;
|
||||
btrfs_work_func_t func;
|
||||
|
||||
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
|
||||
|
||||
|
@ -3263,16 +3303,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
|
|||
end - start + 1, uptodate))
|
||||
return;
|
||||
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
|
||||
wq = fs_info->endio_freespace_worker;
|
||||
func = btrfs_freespace_write_helper;
|
||||
} else {
|
||||
else
|
||||
wq = fs_info->endio_write_workers;
|
||||
func = btrfs_endio_write_helper;
|
||||
}
|
||||
|
||||
btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
|
||||
NULL);
|
||||
btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
|
||||
btrfs_queue_work(wq, &ordered_extent->work);
|
||||
}
|
||||
|
||||
|
@ -3531,7 +3567,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
|||
found_key.objectid = found_key.offset;
|
||||
found_key.type = BTRFS_INODE_ITEM_KEY;
|
||||
found_key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &found_key, root);
|
||||
ret = PTR_ERR_OR_ZERO(inode);
|
||||
if (ret && ret != -ENOENT)
|
||||
goto out;
|
||||
|
@ -5153,7 +5189,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
hole_em->block_len = 0;
|
||||
hole_em->orig_block_len = 0;
|
||||
hole_em->ram_bytes = hole_size;
|
||||
hole_em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
hole_em->compress_type = BTRFS_COMPRESS_NONE;
|
||||
hole_em->generation = fs_info->generation;
|
||||
|
||||
|
@ -5750,12 +5785,14 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
|
|||
return inode;
|
||||
}
|
||||
|
||||
/* Get an inode object given its location and corresponding root.
|
||||
* Returns in *is_new if the inode was read from disk
|
||||
/*
|
||||
* Get an inode object given its location and corresponding root.
|
||||
* Path can be preallocated to prevent recursing back to iget through
|
||||
* allocator. NULL is also valid but may require an additional allocation
|
||||
* later.
|
||||
*/
|
||||
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
|
||||
struct btrfs_root *root, int *new,
|
||||
struct btrfs_path *path)
|
||||
struct btrfs_root *root, struct btrfs_path *path)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
|
@ -5770,8 +5807,6 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
|
|||
if (!ret) {
|
||||
inode_tree_add(inode);
|
||||
unlock_new_inode(inode);
|
||||
if (new)
|
||||
*new = 1;
|
||||
} else {
|
||||
iget_failed(inode);
|
||||
/*
|
||||
|
@ -5789,9 +5824,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
|
|||
}
|
||||
|
||||
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
|
||||
struct btrfs_root *root, int *new)
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
return btrfs_iget_path(s, location, root, new, NULL);
|
||||
return btrfs_iget_path(s, location, root, NULL);
|
||||
}
|
||||
|
||||
static struct inode *new_simple_dir(struct super_block *s,
|
||||
|
@ -5857,7 +5892,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
|||
return ERR_PTR(ret);
|
||||
|
||||
if (location.type == BTRFS_INODE_ITEM_KEY) {
|
||||
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
|
||||
inode = btrfs_iget(dir->i_sb, &location, root);
|
||||
if (IS_ERR(inode))
|
||||
return inode;
|
||||
|
||||
|
@ -5882,7 +5917,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
|||
else
|
||||
inode = new_simple_dir(dir->i_sb, &location, sub_root);
|
||||
} else {
|
||||
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
|
||||
inode = btrfs_iget(dir->i_sb, &location, sub_root);
|
||||
}
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, index);
|
||||
|
||||
|
@ -6931,8 +6966,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, start, len);
|
||||
if (em)
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
if (em) {
|
||||
|
@ -6948,7 +6981,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
em->start = EXTENT_MAP_HOLE;
|
||||
em->orig_start = EXTENT_MAP_HOLE;
|
||||
em->len = (u64)-1;
|
||||
|
@ -7207,7 +7239,6 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
|
|||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
em->bdev = NULL;
|
||||
|
||||
ASSERT(hole_em);
|
||||
/*
|
||||
|
@ -7567,7 +7598,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
|
|||
{
|
||||
struct extent_map_tree *em_tree;
|
||||
struct extent_map *em;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
|
||||
|
@ -7585,7 +7615,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
|
|||
em->len = len;
|
||||
em->block_len = block_len;
|
||||
em->block_start = block_start;
|
||||
em->bdev = root->fs_info->fs_devices->latest_bdev;
|
||||
em->orig_block_len = orig_block_len;
|
||||
em->ram_bytes = ram_bytes;
|
||||
em->generation = -1;
|
||||
|
@ -7624,6 +7653,8 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
|
|||
struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
|
||||
if (em->block_start == EXTENT_MAP_HOLE ||
|
||||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
|
||||
return -ENOENT;
|
||||
|
@ -7633,7 +7664,7 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
|
|||
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
|
||||
inode->i_blkbits;
|
||||
bh_result->b_size = len;
|
||||
bh_result->b_bdev = em->bdev;
|
||||
bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
|
||||
set_buffer_mapped(bh_result);
|
||||
|
||||
return 0;
|
||||
|
@ -7716,7 +7747,7 @@ skip_cow:
|
|||
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
|
||||
inode->i_blkbits;
|
||||
bh_result->b_size = len;
|
||||
bh_result->b_bdev = em->bdev;
|
||||
bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
|
||||
set_buffer_mapped(bh_result);
|
||||
|
||||
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
|
||||
|
@ -7858,7 +7889,7 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8211,18 +8242,14 @@ static void __endio_write_update_ordered(struct inode *inode,
|
|||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_ordered_extent *ordered = NULL;
|
||||
struct btrfs_workqueue *wq;
|
||||
btrfs_work_func_t func;
|
||||
u64 ordered_offset = offset;
|
||||
u64 ordered_bytes = bytes;
|
||||
u64 last_offset;
|
||||
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
|
||||
wq = fs_info->endio_freespace_worker;
|
||||
func = btrfs_freespace_write_helper;
|
||||
} else {
|
||||
else
|
||||
wq = fs_info->endio_write_workers;
|
||||
func = btrfs_endio_write_helper;
|
||||
}
|
||||
|
||||
while (ordered_offset < offset + bytes) {
|
||||
last_offset = ordered_offset;
|
||||
|
@ -8230,9 +8257,8 @@ static void __endio_write_update_ordered(struct inode *inode,
|
|||
&ordered_offset,
|
||||
ordered_bytes,
|
||||
uptodate)) {
|
||||
btrfs_init_work(&ordered->work, func,
|
||||
finish_ordered_fn,
|
||||
NULL, NULL);
|
||||
btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
|
||||
NULL);
|
||||
btrfs_queue_work(wq, &ordered->work);
|
||||
}
|
||||
/*
|
||||
|
@ -8389,7 +8415,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
|
|||
goto err;
|
||||
}
|
||||
map:
|
||||
ret = btrfs_map_bio(fs_info, bio, 0, 0);
|
||||
ret = btrfs_map_bio(fs_info, bio, 0);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
@ -9321,7 +9347,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||
ei->io_failure_tree.track_uptodate = true;
|
||||
atomic_set(&ei->sync_writers, 0);
|
||||
mutex_init(&ei->log_mutex);
|
||||
mutex_init(&ei->delalloc_mutex);
|
||||
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
||||
INIT_LIST_HEAD(&ei->delalloc_inodes);
|
||||
INIT_LIST_HEAD(&ei->delayed_iput);
|
||||
|
@ -9550,6 +9575,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
goto out_notrans;
|
||||
}
|
||||
|
||||
if (dest != root)
|
||||
btrfs_record_root_in_trans(trans, dest);
|
||||
|
||||
/*
|
||||
* We need to find a free sequence number both in the source and
|
||||
* in the destination directory for the exchange.
|
||||
|
@ -10116,8 +10144,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
|
|||
init_completion(&work->completion);
|
||||
INIT_LIST_HEAD(&work->list);
|
||||
work->inode = inode;
|
||||
btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
|
||||
btrfs_run_delalloc_work, NULL, NULL);
|
||||
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
|
||||
|
||||
return work;
|
||||
}
|
||||
|
@ -10450,7 +10477,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|||
em->block_len = ins.offset;
|
||||
em->orig_block_len = ins.offset;
|
||||
em->ram_bytes = ins.offset;
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
|
||||
em->generation = trans->transid;
|
||||
|
||||
|
@ -10806,7 +10832,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
|||
start = 0;
|
||||
while (start < isize) {
|
||||
u64 logical_block_start, physical_block_start;
|
||||
struct btrfs_block_group_cache *bg;
|
||||
struct btrfs_block_group *bg;
|
||||
u64 len = isize - start;
|
||||
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
|
||||
|
|
|
@ -479,10 +479,9 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
|
|||
return put_user(inode->i_generation, arg);
|
||||
}
|
||||
|
||||
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
|
||||
static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
|
||||
void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_device *device;
|
||||
struct request_queue *q;
|
||||
struct fstrim_range range;
|
||||
|
@ -541,7 +540,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_is_empty_uuid(u8 *uuid)
|
||||
int __pure btrfs_is_empty_uuid(u8 *uuid)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1409,7 +1408,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
return -EINVAL;
|
||||
|
||||
if (do_compress) {
|
||||
if (range->compress_type > BTRFS_COMPRESS_TYPES)
|
||||
if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
|
||||
return -EINVAL;
|
||||
if (range->compress_type)
|
||||
compress_type = range->compress_type;
|
||||
|
@ -2462,7 +2461,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
|
|||
goto out;
|
||||
}
|
||||
|
||||
temp_inode = btrfs_iget(sb, &key2, root, NULL);
|
||||
temp_inode = btrfs_iget(sb, &key2, root);
|
||||
if (IS_ERR(temp_inode)) {
|
||||
ret = PTR_ERR(temp_inode);
|
||||
goto out;
|
||||
|
@ -4032,16 +4031,15 @@ out:
|
|||
static void get_block_group_info(struct list_head *groups_list,
|
||||
struct btrfs_ioctl_space_info *space)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
|
||||
space->total_bytes = 0;
|
||||
space->used_bytes = 0;
|
||||
space->flags = 0;
|
||||
list_for_each_entry(block_group, groups_list, list) {
|
||||
space->flags = block_group->flags;
|
||||
space->total_bytes += block_group->key.offset;
|
||||
space->used_bytes +=
|
||||
btrfs_block_group_used(&block_group->item);
|
||||
space->total_bytes += block_group->length;
|
||||
space->used_bytes += block_group->used;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4952,10 +4950,9 @@ drop_write:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
|
||||
static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
|
||||
void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_ioctl_quota_rescan_args *qsa;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -4978,11 +4975,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
|
||||
static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
|
||||
void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
|
@ -5154,10 +5149,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
|
||||
static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
|
||||
void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
size_t len;
|
||||
int ret;
|
||||
char label[BTRFS_LABEL_SIZE];
|
||||
|
@ -5241,10 +5235,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
|
||||
static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
|
||||
void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_super_block *super_block = fs_info->super_copy;
|
||||
struct btrfs_ioctl_feature_flags features;
|
||||
|
||||
|
@ -5445,11 +5438,11 @@ long btrfs_ioctl(struct file *file, unsigned int
|
|||
case FS_IOC_GETVERSION:
|
||||
return btrfs_ioctl_getversion(file, argp);
|
||||
case FS_IOC_GETFSLABEL:
|
||||
return btrfs_ioctl_get_fslabel(file, argp);
|
||||
return btrfs_ioctl_get_fslabel(fs_info, argp);
|
||||
case FS_IOC_SETFSLABEL:
|
||||
return btrfs_ioctl_set_fslabel(file, argp);
|
||||
case FITRIM:
|
||||
return btrfs_ioctl_fitrim(file, argp);
|
||||
return btrfs_ioctl_fitrim(fs_info, argp);
|
||||
case BTRFS_IOC_SNAP_CREATE:
|
||||
return btrfs_ioctl_snap_create(file, argp, 0);
|
||||
case BTRFS_IOC_SNAP_CREATE_V2:
|
||||
|
@ -5554,15 +5547,15 @@ long btrfs_ioctl(struct file *file, unsigned int
|
|||
case BTRFS_IOC_QUOTA_RESCAN:
|
||||
return btrfs_ioctl_quota_rescan(file, argp);
|
||||
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
|
||||
return btrfs_ioctl_quota_rescan_status(file, argp);
|
||||
return btrfs_ioctl_quota_rescan_status(fs_info, argp);
|
||||
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
|
||||
return btrfs_ioctl_quota_rescan_wait(file, argp);
|
||||
return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
|
||||
case BTRFS_IOC_DEV_REPLACE:
|
||||
return btrfs_ioctl_dev_replace(fs_info, argp);
|
||||
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
|
||||
return btrfs_ioctl_get_supported_features(argp);
|
||||
case BTRFS_IOC_GET_FEATURES:
|
||||
return btrfs_ioctl_get_features(file, argp);
|
||||
return btrfs_ioctl_get_features(fs_info, argp);
|
||||
case BTRFS_IOC_SET_FEATURES:
|
||||
return btrfs_ioctl_set_features(file, argp);
|
||||
case FS_IOC_FSGETXATTR:
|
||||
|
|
|
@ -13,65 +13,164 @@
|
|||
#include "extent_io.h"
|
||||
#include "locking.h"
|
||||
|
||||
/*
|
||||
* Extent buffer locking
|
||||
* =====================
|
||||
*
|
||||
* The locks use a custom scheme that allows to do more operations than are
|
||||
* available fromt current locking primitives. The building blocks are still
|
||||
* rwlock and wait queues.
|
||||
*
|
||||
* Required semantics:
|
||||
*
|
||||
* - reader/writer exclusion
|
||||
* - writer/writer exclusion
|
||||
* - reader/reader sharing
|
||||
* - spinning lock semantics
|
||||
* - blocking lock semantics
|
||||
* - try-lock semantics for readers and writers
|
||||
* - one level nesting, allowing read lock to be taken by the same thread that
|
||||
* already has write lock
|
||||
*
|
||||
* The extent buffer locks (also called tree locks) manage access to eb data
|
||||
* related to the storage in the b-tree (keys, items, but not the individual
|
||||
* members of eb).
|
||||
* We want concurrency of many readers and safe updates. The underlying locking
|
||||
* is done by read-write spinlock and the blocking part is implemented using
|
||||
* counters and wait queues.
|
||||
*
|
||||
* spinning semantics - the low-level rwlock is held so all other threads that
|
||||
* want to take it are spinning on it.
|
||||
*
|
||||
* blocking semantics - the low-level rwlock is not held but the counter
|
||||
* denotes how many times the blocking lock was held;
|
||||
* sleeping is possible
|
||||
*
|
||||
* Write lock always allows only one thread to access the data.
|
||||
*
|
||||
*
|
||||
* Debugging
|
||||
* ---------
|
||||
*
|
||||
* There are additional state counters that are asserted in various contexts,
|
||||
* removed from non-debug build to reduce extent_buffer size and for
|
||||
* performance reasons.
|
||||
*
|
||||
*
|
||||
* Lock nesting
|
||||
* ------------
|
||||
*
|
||||
* A write operation on a tree might indirectly start a look up on the same
|
||||
* tree. This can happen when btrfs_cow_block locks the tree and needs to
|
||||
* lookup free extents.
|
||||
*
|
||||
* btrfs_cow_block
|
||||
* ..
|
||||
* alloc_tree_block_no_bg_flush
|
||||
* btrfs_alloc_tree_block
|
||||
* btrfs_reserve_extent
|
||||
* ..
|
||||
* load_free_space_cache
|
||||
* ..
|
||||
* btrfs_lookup_file_extent
|
||||
* btrfs_search_slot
|
||||
*
|
||||
*
|
||||
* Locking pattern - spinning
|
||||
* --------------------------
|
||||
*
|
||||
* The simple locking scenario, the +--+ denotes the spinning section.
|
||||
*
|
||||
* +- btrfs_tree_lock
|
||||
* | - extent_buffer::rwlock is held
|
||||
* | - no heavy operations should happen, eg. IO, memory allocations, large
|
||||
* | structure traversals
|
||||
* +- btrfs_tree_unock
|
||||
*
|
||||
*
|
||||
* Locking pattern - blocking
|
||||
* --------------------------
|
||||
*
|
||||
* The blocking write uses the following scheme. The +--+ denotes the spinning
|
||||
* section.
|
||||
*
|
||||
* +- btrfs_tree_lock
|
||||
* |
|
||||
* +- btrfs_set_lock_blocking_write
|
||||
*
|
||||
* - allowed: IO, memory allocations, etc.
|
||||
*
|
||||
* -- btrfs_tree_unlock - note, no explicit unblocking necessary
|
||||
*
|
||||
*
|
||||
* Blocking read is similar.
|
||||
*
|
||||
* +- btrfs_tree_read_lock
|
||||
* |
|
||||
* +- btrfs_set_lock_blocking_read
|
||||
*
|
||||
* - heavy operations allowed
|
||||
*
|
||||
* +- btrfs_tree_read_unlock_blocking
|
||||
* |
|
||||
* +- btrfs_tree_read_unlock
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
|
||||
{
|
||||
WARN_ON(eb->spinning_writers);
|
||||
eb->spinning_writers++;
|
||||
}
|
||||
|
||||
static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
|
||||
{
|
||||
WARN_ON(eb->spinning_writers != 1);
|
||||
eb->spinning_writers--;
|
||||
}
|
||||
|
||||
static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
|
||||
{
|
||||
WARN_ON(eb->spinning_writers);
|
||||
}
|
||||
|
||||
static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
|
||||
{
|
||||
atomic_inc(&eb->spinning_readers);
|
||||
}
|
||||
|
||||
static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
|
||||
{
|
||||
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
|
||||
atomic_dec(&eb->spinning_readers);
|
||||
}
|
||||
|
||||
static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
|
||||
{
|
||||
atomic_inc(&eb->read_locks);
|
||||
}
|
||||
|
||||
static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
|
||||
{
|
||||
atomic_dec(&eb->read_locks);
|
||||
}
|
||||
|
||||
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
|
||||
{
|
||||
BUG_ON(!atomic_read(&eb->read_locks));
|
||||
}
|
||||
|
||||
static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
|
||||
{
|
||||
eb->write_locks++;
|
||||
}
|
||||
|
||||
static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
|
||||
static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
|
||||
{
|
||||
eb->write_locks--;
|
||||
}
|
||||
|
||||
void btrfs_assert_tree_locked(struct extent_buffer *eb)
|
||||
{
|
||||
BUG_ON(!eb->write_locks);
|
||||
}
|
||||
|
||||
#else
|
||||
static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
|
||||
static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
|
||||
|
@ -81,11 +180,19 @@ static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
|
|||
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
|
||||
static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
|
||||
static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
|
||||
void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
|
||||
static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
|
||||
static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mark already held read lock as blocking. Can be nested in write lock by the
|
||||
* same thread.
|
||||
*
|
||||
* Use when there are potentially long operations ahead so other thread waiting
|
||||
* on the lock will not actively spin but sleep instead.
|
||||
*
|
||||
* The rwlock is released and blocking reader counter is increased.
|
||||
*/
|
||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_set_lock_blocking_read(eb);
|
||||
|
@ -102,6 +209,14 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
|||
read_unlock(&eb->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark already held write lock as blocking.
|
||||
*
|
||||
* Use when there are potentially long operations ahead so other threads
|
||||
* waiting on the lock will not actively spin but sleep instead.
|
||||
*
|
||||
* The rwlock is released and blocking writers is set.
|
||||
*/
|
||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_set_lock_blocking_write(eb);
|
||||
|
@ -115,14 +230,19 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
|||
if (eb->blocking_writers == 0) {
|
||||
btrfs_assert_spinning_writers_put(eb);
|
||||
btrfs_assert_tree_locked(eb);
|
||||
eb->blocking_writers++;
|
||||
WRITE_ONCE(eb->blocking_writers, 1);
|
||||
write_unlock(&eb->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* take a spinning read lock. This will wait for any blocking
|
||||
* writers
|
||||
* Lock the extent buffer for read. Wait for any writers (spinning or blocking).
|
||||
* Can be nested in write lock by the same thread.
|
||||
*
|
||||
* Use when the locked section does only lightweight actions and busy waiting
|
||||
* would be cheaper than making other threads do the wait/wake loop.
|
||||
*
|
||||
* The rwlock is held upon exit.
|
||||
*/
|
||||
void btrfs_tree_read_lock(struct extent_buffer *eb)
|
||||
{
|
||||
|
@ -134,23 +254,24 @@ again:
|
|||
read_lock(&eb->lock);
|
||||
BUG_ON(eb->blocking_writers == 0 &&
|
||||
current->pid == eb->lock_owner);
|
||||
if (eb->blocking_writers && current->pid == eb->lock_owner) {
|
||||
/*
|
||||
* This extent is already write-locked by our thread. We allow
|
||||
* an additional read lock to be added because it's for the same
|
||||
* thread. btrfs_find_all_roots() depends on this as it may be
|
||||
* called on a partly (write-)locked tree.
|
||||
*/
|
||||
BUG_ON(eb->lock_nested);
|
||||
eb->lock_nested = true;
|
||||
read_unlock(&eb->lock);
|
||||
trace_btrfs_tree_read_lock(eb, start_ns);
|
||||
return;
|
||||
}
|
||||
if (eb->blocking_writers) {
|
||||
if (current->pid == eb->lock_owner) {
|
||||
/*
|
||||
* This extent is already write-locked by our thread.
|
||||
* We allow an additional read lock to be added because
|
||||
* it's for the same thread. btrfs_find_all_roots()
|
||||
* depends on this as it may be called on a partly
|
||||
* (write-)locked tree.
|
||||
*/
|
||||
BUG_ON(eb->lock_nested);
|
||||
eb->lock_nested = true;
|
||||
read_unlock(&eb->lock);
|
||||
trace_btrfs_tree_read_lock(eb, start_ns);
|
||||
return;
|
||||
}
|
||||
read_unlock(&eb->lock);
|
||||
wait_event(eb->write_lock_wq,
|
||||
eb->blocking_writers == 0);
|
||||
READ_ONCE(eb->blocking_writers) == 0);
|
||||
goto again;
|
||||
}
|
||||
btrfs_assert_tree_read_locks_get(eb);
|
||||
|
@ -159,17 +280,19 @@ again:
|
|||
}
|
||||
|
||||
/*
|
||||
* take a spinning read lock.
|
||||
* returns 1 if we get the read lock and 0 if we don't
|
||||
* this won't wait for blocking writers
|
||||
* Lock extent buffer for read, optimistically expecting that there are no
|
||||
* contending blocking writers. If there are, don't wait.
|
||||
*
|
||||
* Return 1 if the rwlock has been taken, 0 otherwise
|
||||
*/
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
||||
{
|
||||
if (eb->blocking_writers)
|
||||
if (READ_ONCE(eb->blocking_writers))
|
||||
return 0;
|
||||
|
||||
read_lock(&eb->lock);
|
||||
if (eb->blocking_writers) {
|
||||
/* Refetch value after lock */
|
||||
if (READ_ONCE(eb->blocking_writers)) {
|
||||
read_unlock(&eb->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -180,18 +303,20 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* returns 1 if we get the read lock and 0 if we don't
|
||||
* this won't wait for blocking writers
|
||||
* Try-lock for read. Don't block or wait for contending writers.
|
||||
*
|
||||
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
||||
*/
|
||||
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
||||
{
|
||||
if (eb->blocking_writers)
|
||||
if (READ_ONCE(eb->blocking_writers))
|
||||
return 0;
|
||||
|
||||
if (!read_trylock(&eb->lock))
|
||||
return 0;
|
||||
|
||||
if (eb->blocking_writers) {
|
||||
/* Refetch value after lock */
|
||||
if (READ_ONCE(eb->blocking_writers)) {
|
||||
read_unlock(&eb->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -202,16 +327,19 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* returns 1 if we get the read lock and 0 if we don't
|
||||
* this won't wait for blocking writers or readers
|
||||
* Try-lock for write. May block until the lock is uncontended, but does not
|
||||
* wait until it is free.
|
||||
*
|
||||
* Retrun 1 if the rwlock has been taken, 0 otherwise
|
||||
*/
|
||||
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
||||
{
|
||||
if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
|
||||
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
|
||||
return 0;
|
||||
|
||||
write_lock(&eb->lock);
|
||||
if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
|
||||
/* Refetch value after lock */
|
||||
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
|
||||
write_unlock(&eb->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,7 +351,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* drop a spinning read lock
|
||||
* Release read lock. Must be used only if the lock is in spinning mode. If
|
||||
* the read lock is nested, must pair with read lock before the write unlock.
|
||||
*
|
||||
* The rwlock is not held upon exit.
|
||||
*/
|
||||
void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
||||
{
|
||||
|
@ -245,7 +376,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* drop a blocking read lock
|
||||
* Release read lock, previously set to blocking by a pairing call to
|
||||
* btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
|
||||
* thread.
|
||||
*
|
||||
* State of rwlock is unchanged, last reader wakes waiting threads.
|
||||
*/
|
||||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
||||
{
|
||||
|
@ -269,8 +404,10 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* take a spinning write lock. This will wait for both
|
||||
* blocking readers or writers
|
||||
* Lock for write. Wait for all blocking and spinning readers and writers. This
|
||||
* starts context where reader lock could be nested by the same thread.
|
||||
*
|
||||
* The rwlock is held for write upon exit.
|
||||
*/
|
||||
void btrfs_tree_lock(struct extent_buffer *eb)
|
||||
{
|
||||
|
@ -282,9 +419,11 @@ void btrfs_tree_lock(struct extent_buffer *eb)
|
|||
WARN_ON(eb->lock_owner == current->pid);
|
||||
again:
|
||||
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
|
||||
wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
|
||||
wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
|
||||
write_lock(&eb->lock);
|
||||
if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
|
||||
/* Refetch value after lock */
|
||||
if (atomic_read(&eb->blocking_readers) ||
|
||||
READ_ONCE(eb->blocking_writers)) {
|
||||
write_unlock(&eb->lock);
|
||||
goto again;
|
||||
}
|
||||
|
@ -295,10 +434,19 @@ again:
|
|||
}
|
||||
|
||||
/*
|
||||
* drop a spinning or a blocking write lock.
|
||||
* Release the write lock, either blocking or spinning (ie. there's no need
|
||||
* for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
|
||||
* This also ends the context for nesting, the read lock must have been
|
||||
* released already.
|
||||
*
|
||||
* Tasks blocked and waiting are woken, rwlock is not held upon exit.
|
||||
*/
|
||||
void btrfs_tree_unlock(struct extent_buffer *eb)
|
||||
{
|
||||
/*
|
||||
* This is read both locked and unlocked but always by the same thread
|
||||
* that already owns the lock so we don't need to use READ_ONCE
|
||||
*/
|
||||
int blockers = eb->blocking_writers;
|
||||
|
||||
BUG_ON(blockers > 1);
|
||||
|
@ -310,7 +458,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|||
|
||||
if (blockers) {
|
||||
btrfs_assert_no_spinning_writers(eb);
|
||||
eb->blocking_writers--;
|
||||
/* Unlocked write */
|
||||
WRITE_ONCE(eb->blocking_writers, 0);
|
||||
/*
|
||||
* We need to order modifying blocking_writers above with
|
||||
* actually waking up the sleepers to ensure they see the
|
||||
|
@ -322,3 +471,55 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|||
write_unlock(&eb->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set all locked nodes in the path to blocking locks. This should be done
|
||||
* before scheduling
|
||||
*/
|
||||
void btrfs_set_path_blocking(struct btrfs_path *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
|
||||
if (!p->nodes[i] || !p->locks[i])
|
||||
continue;
|
||||
/*
|
||||
* If we currently have a spinning reader or writer lock this
|
||||
* will bump the count of blocking holders and drop the
|
||||
* spinlock.
|
||||
*/
|
||||
if (p->locks[i] == BTRFS_READ_LOCK) {
|
||||
btrfs_set_lock_blocking_read(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
|
||||
} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
|
||||
btrfs_set_lock_blocking_write(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This releases any locks held in the path starting at level and going all the
|
||||
* way up to the root.
|
||||
*
|
||||
* btrfs_search_slot will keep the lock held on higher nodes in a few corner
|
||||
* cases, such as COW of the block at slot zero in the node. This ignores
|
||||
* those rules, and it should only be called when there are no more updates to
|
||||
* be done higher up in the tree.
|
||||
*/
|
||||
void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (path->keep_locks)
|
||||
return;
|
||||
|
||||
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
|
||||
if (!path->nodes[i])
|
||||
continue;
|
||||
if (!path->locks[i])
|
||||
continue;
|
||||
btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
|
||||
path->locks[i] = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
#ifndef BTRFS_LOCKING_H
|
||||
#define BTRFS_LOCKING_H
|
||||
|
||||
#include "extent_io.h"
|
||||
|
||||
#define BTRFS_WRITE_LOCK 1
|
||||
#define BTRFS_READ_LOCK 2
|
||||
#define BTRFS_WRITE_LOCK_BLOCKING 3
|
||||
|
@ -19,11 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb);
|
|||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
|
||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
|
||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
|
||||
void btrfs_assert_tree_locked(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
|
||||
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
|
||||
BUG_ON(!eb->write_locks);
|
||||
}
|
||||
#else
|
||||
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
|
||||
#endif
|
||||
|
||||
void btrfs_set_path_blocking(struct btrfs_path *p);
|
||||
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
|
||||
|
||||
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
|
||||
{
|
||||
|
|
|
@ -63,27 +63,7 @@ struct workspace {
|
|||
|
||||
static struct workspace_manager wsm;
|
||||
|
||||
static void lzo_init_workspace_manager(void)
|
||||
{
|
||||
btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress);
|
||||
}
|
||||
|
||||
static void lzo_cleanup_workspace_manager(void)
|
||||
{
|
||||
btrfs_cleanup_workspace_manager(&wsm);
|
||||
}
|
||||
|
||||
static struct list_head *lzo_get_workspace(unsigned int level)
|
||||
{
|
||||
return btrfs_get_workspace(&wsm, level);
|
||||
}
|
||||
|
||||
static void lzo_put_workspace(struct list_head *ws)
|
||||
{
|
||||
btrfs_put_workspace(&wsm, ws);
|
||||
}
|
||||
|
||||
static void lzo_free_workspace(struct list_head *ws)
|
||||
void lzo_free_workspace(struct list_head *ws)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
|
||||
|
@ -93,7 +73,7 @@ static void lzo_free_workspace(struct list_head *ws)
|
|||
kfree(workspace);
|
||||
}
|
||||
|
||||
static struct list_head *lzo_alloc_workspace(unsigned int level)
|
||||
struct list_head *lzo_alloc_workspace(unsigned int level)
|
||||
{
|
||||
struct workspace *workspace;
|
||||
|
||||
|
@ -131,13 +111,9 @@ static inline size_t read_compress_length(const char *buf)
|
|||
return le32_to_cpu(dlen);
|
||||
}
|
||||
|
||||
static int lzo_compress_pages(struct list_head *ws,
|
||||
struct address_space *mapping,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out)
|
||||
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
|
@ -303,7 +279,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0, ret2;
|
||||
|
@ -444,10 +420,9 @@ done:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page,
|
||||
unsigned long start_byte,
|
||||
size_t srclen, size_t destlen)
|
||||
int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
size_t in_len;
|
||||
|
@ -508,15 +483,7 @@ out:
|
|||
}
|
||||
|
||||
const struct btrfs_compress_op btrfs_lzo_compress = {
|
||||
.init_workspace_manager = lzo_init_workspace_manager,
|
||||
.cleanup_workspace_manager = lzo_cleanup_workspace_manager,
|
||||
.get_workspace = lzo_get_workspace,
|
||||
.put_workspace = lzo_put_workspace,
|
||||
.alloc_workspace = lzo_alloc_workspace,
|
||||
.free_workspace = lzo_free_workspace,
|
||||
.compress_pages = lzo_compress_pages,
|
||||
.decompress_bio = lzo_decompress_bio,
|
||||
.decompress = lzo_decompress,
|
||||
.workspace_manager = &wsm,
|
||||
.max_level = 1,
|
||||
.default_level = 1,
|
||||
};
|
||||
|
|
|
@ -47,4 +47,15 @@ static inline u64 div_factor_fine(u64 num, int factor)
|
|||
return div_u64(num, 100);
|
||||
}
|
||||
|
||||
/* Copy of is_power_of_two that is 64bit safe */
|
||||
static inline bool is_power_of_two_u64(u64 n)
|
||||
{
|
||||
return n != 0 && (n & (n - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool has_single_bit_set(u64 n)
|
||||
{
|
||||
return is_power_of_two_u64(n);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
|||
spin_unlock(&root->ordered_extent_lock);
|
||||
|
||||
btrfs_init_work(&ordered->flush_work,
|
||||
btrfs_flush_delalloc_helper,
|
||||
btrfs_run_ordered_extent_work, NULL, NULL);
|
||||
list_add_tail(&ordered->work_list, &works);
|
||||
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
|
||||
|
@ -573,12 +572,11 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
|||
return count;
|
||||
}
|
||||
|
||||
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
const u64 range_start, const u64 range_len)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
u64 total_done = 0;
|
||||
u64 done;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
@ -598,7 +596,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
|||
done = btrfs_wait_ordered_extents(root, nr,
|
||||
range_start, range_len);
|
||||
btrfs_put_fs_root(root);
|
||||
total_done += done;
|
||||
|
||||
spin_lock(&fs_info->ordered_root_lock);
|
||||
if (nr != U64_MAX) {
|
||||
|
@ -608,8 +605,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
|||
list_splice_tail(&splice, &fs_info->ordered_roots);
|
||||
spin_unlock(&fs_info->ordered_root_lock);
|
||||
mutex_unlock(&fs_info->ordered_operations_mutex);
|
||||
|
||||
return total_done;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -186,7 +186,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
|||
u8 *sum, int len);
|
||||
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
||||
const u64 range_start, const u64 range_len);
|
||||
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
const u64 range_start, const u64 range_len);
|
||||
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
|
||||
struct btrfs_inode *inode, u64 start,
|
||||
|
|
|
@ -266,9 +266,9 @@ void btrfs_print_leaf(struct extent_buffer *l)
|
|||
struct btrfs_block_group_item);
|
||||
pr_info(
|
||||
"\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
|
||||
btrfs_disk_block_group_used(l, bi),
|
||||
btrfs_disk_block_group_chunk_objectid(l, bi),
|
||||
btrfs_disk_block_group_flags(l, bi));
|
||||
btrfs_block_group_used(l, bi),
|
||||
btrfs_block_group_chunk_objectid(l, bi),
|
||||
btrfs_block_group_flags(l, bi));
|
||||
break;
|
||||
case BTRFS_CHUNK_ITEM_KEY:
|
||||
print_chunk(l, btrfs_item_ptr(l, i,
|
||||
|
|
|
@ -416,11 +416,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
parent_inode = btrfs_iget(sb, &key, parent_root, NULL);
|
||||
parent_inode = btrfs_iget(sb, &key, parent_root);
|
||||
if (IS_ERR(parent_inode))
|
||||
return PTR_ERR(parent_inode);
|
||||
|
||||
child_inode = btrfs_iget(sb, &key, root, NULL);
|
||||
child_inode = btrfs_iget(sb, &key, root);
|
||||
if (IS_ERR(child_inode)) {
|
||||
iput(parent_inode);
|
||||
return PTR_ERR(child_inode);
|
||||
|
@ -437,8 +437,6 @@ void __init btrfs_props_init(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
hash_init(prop_handlers_ht);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
|
||||
struct prop_handler *p = &prop_handlers[i];
|
||||
u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name));
|
||||
|
|
|
@ -1811,7 +1811,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
|
|||
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
|
||||
|
||||
/* For src_path */
|
||||
extent_buffer_get(src_eb);
|
||||
atomic_inc(&src_eb->refs);
|
||||
src_path->nodes[root_level] = src_eb;
|
||||
src_path->slots[root_level] = dst_path->slots[root_level];
|
||||
src_path->locks[root_level] = 0;
|
||||
|
@ -2067,7 +2067,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
/* For dst_path */
|
||||
extent_buffer_get(dst_eb);
|
||||
atomic_inc(&dst_eb->refs);
|
||||
dst_path->nodes[level] = dst_eb;
|
||||
dst_path->slots[level] = 0;
|
||||
dst_path->locks[level] = 0;
|
||||
|
@ -2126,7 +2126,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
|||
* walk back up the tree (adjusting slot pointers as we go)
|
||||
* and restart the search process.
|
||||
*/
|
||||
extent_buffer_get(root_eb); /* For path */
|
||||
atomic_inc(&root_eb->refs); /* For path */
|
||||
path->nodes[root_level] = root_eb;
|
||||
path->slots[root_level] = 0;
|
||||
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
|
||||
|
@ -3277,10 +3277,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
|||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
|
||||
memset(&fs_info->qgroup_rescan_work, 0,
|
||||
sizeof(fs_info->qgroup_rescan_work));
|
||||
btrfs_init_work(&fs_info->qgroup_rescan_work,
|
||||
btrfs_qgroup_rescan_helper,
|
||||
btrfs_qgroup_rescan_worker, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3826,7 +3823,7 @@ out:
|
|||
*/
|
||||
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *subvol_root,
|
||||
struct btrfs_block_group_cache *bg,
|
||||
struct btrfs_block_group *bg,
|
||||
struct extent_buffer *subvol_parent, int subvol_slot,
|
||||
struct extent_buffer *reloc_parent, int reloc_slot,
|
||||
u64 last_snapshot)
|
||||
|
|
|
@ -408,7 +408,7 @@ void btrfs_qgroup_init_swapped_blocks(
|
|||
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
|
||||
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *subvol_root,
|
||||
struct btrfs_block_group_cache *bg,
|
||||
struct btrfs_block_group *bg,
|
||||
struct extent_buffer *subvol_parent, int subvol_slot,
|
||||
struct extent_buffer *reloc_parent, int reloc_slot,
|
||||
u64 last_snapshot);
|
||||
|
|
|
@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work);
|
|||
|
||||
static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
|
||||
{
|
||||
btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
|
||||
btrfs_init_work(&rbio->work, work_func, NULL, NULL);
|
||||
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
|
||||
}
|
||||
|
||||
|
@ -671,8 +671,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
|
|||
*/
|
||||
static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
int bucket = rbio_bucket(rbio);
|
||||
struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
|
||||
struct btrfs_stripe_hash *h;
|
||||
struct btrfs_raid_bio *cur;
|
||||
struct btrfs_raid_bio *pending;
|
||||
unsigned long flags;
|
||||
|
@ -680,64 +679,63 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
|
|||
struct btrfs_raid_bio *cache_drop = NULL;
|
||||
int ret = 0;
|
||||
|
||||
h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
|
||||
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
list_for_each_entry(cur, &h->hash_list, hash_list) {
|
||||
if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
|
||||
spin_lock(&cur->bio_list_lock);
|
||||
if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
|
||||
continue;
|
||||
|
||||
/* can we steal this cached rbio's pages? */
|
||||
if (bio_list_empty(&cur->bio_list) &&
|
||||
list_empty(&cur->plug_list) &&
|
||||
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
|
||||
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
|
||||
list_del_init(&cur->hash_list);
|
||||
refcount_dec(&cur->refs);
|
||||
spin_lock(&cur->bio_list_lock);
|
||||
|
||||
steal_rbio(cur, rbio);
|
||||
cache_drop = cur;
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
/* Can we steal this cached rbio's pages? */
|
||||
if (bio_list_empty(&cur->bio_list) &&
|
||||
list_empty(&cur->plug_list) &&
|
||||
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
|
||||
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
|
||||
list_del_init(&cur->hash_list);
|
||||
refcount_dec(&cur->refs);
|
||||
|
||||
goto lockit;
|
||||
}
|
||||
steal_rbio(cur, rbio);
|
||||
cache_drop = cur;
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
|
||||
/* can we merge into the lock owner? */
|
||||
if (rbio_can_merge(cur, rbio)) {
|
||||
merge_rbio(cur, rbio);
|
||||
goto lockit;
|
||||
}
|
||||
|
||||
/* Can we merge into the lock owner? */
|
||||
if (rbio_can_merge(cur, rbio)) {
|
||||
merge_rbio(cur, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We couldn't merge with the running rbio, see if we can merge
|
||||
* with the pending ones. We don't have to check for rmw_locked
|
||||
* because there is no way they are inside finish_rmw right now
|
||||
*/
|
||||
list_for_each_entry(pending, &cur->plug_list, plug_list) {
|
||||
if (rbio_can_merge(pending, rbio)) {
|
||||
merge_rbio(pending, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* we couldn't merge with the running
|
||||
* rbio, see if we can merge with the
|
||||
* pending ones. We don't have to
|
||||
* check for rmw_locked because there
|
||||
* is no way they are inside finish_rmw
|
||||
* right now
|
||||
*/
|
||||
list_for_each_entry(pending, &cur->plug_list,
|
||||
plug_list) {
|
||||
if (rbio_can_merge(pending, rbio)) {
|
||||
merge_rbio(pending, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* no merging, put us on the tail of the plug list,
|
||||
* our rbio will be started with the currently
|
||||
* running rbio unlocks
|
||||
*/
|
||||
list_add_tail(&rbio->plug_list, &cur->plug_list);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* No merging, put us on the tail of the plug list, our rbio
|
||||
* will be started with the currently running rbio unlocks
|
||||
*/
|
||||
list_add_tail(&rbio->plug_list, &cur->plug_list);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
lockit:
|
||||
refcount_inc(&rbio->refs);
|
||||
|
@ -1743,8 +1741,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
plug = container_of(cb, struct btrfs_plug_cb, cb);
|
||||
|
||||
if (from_schedule) {
|
||||
btrfs_init_work(&plug->work, btrfs_rmw_helper,
|
||||
unplug_work, NULL, NULL);
|
||||
btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
|
||||
btrfs_queue_work(plug->info->rmw_workers,
|
||||
&plug->work);
|
||||
return;
|
||||
|
|
|
@ -227,7 +227,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
|
|||
struct btrfs_fs_info *fs_info = dev->fs_info;
|
||||
int ret;
|
||||
struct reada_zone *zone;
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
struct btrfs_block_group *cache = NULL;
|
||||
u64 start;
|
||||
u64 end;
|
||||
int i;
|
||||
|
@ -248,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
|
|||
if (!cache)
|
||||
return NULL;
|
||||
|
||||
start = cache->key.objectid;
|
||||
end = start + cache->key.offset - 1;
|
||||
start = cache->start;
|
||||
end = start + cache->length - 1;
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
|
@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
|
|||
static void reada_start_machine_worker(struct btrfs_work *work)
|
||||
{
|
||||
struct reada_machine_work *rmw;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
int old_ioprio;
|
||||
|
||||
rmw = container_of(work, struct reada_machine_work, work);
|
||||
fs_info = rmw->fs_info;
|
||||
|
||||
kfree(rmw);
|
||||
|
||||
old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
|
||||
task_nice_ioprio(current));
|
||||
set_task_ioprio(current, BTRFS_IOPRIO_READA);
|
||||
__reada_start_machine(fs_info);
|
||||
__reada_start_machine(rmw->fs_info);
|
||||
set_task_ioprio(current, old_ioprio);
|
||||
|
||||
atomic_dec(&fs_info->reada_works_cnt);
|
||||
atomic_dec(&rmw->fs_info->reada_works_cnt);
|
||||
|
||||
kfree(rmw);
|
||||
}
|
||||
|
||||
static void __reada_start_machine(struct btrfs_fs_info *fs_info)
|
||||
|
@ -821,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
|
|||
/* FIXME we cannot handle this properly right now */
|
||||
BUG();
|
||||
}
|
||||
btrfs_init_work(&rmw->work, btrfs_readahead_helper,
|
||||
reada_start_machine_worker, NULL, NULL);
|
||||
btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
|
||||
rmw->fs_info = fs_info;
|
||||
|
||||
btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
|
||||
|
|
|
@ -147,7 +147,7 @@ struct file_extent_cluster {
|
|||
|
||||
struct reloc_control {
|
||||
/* block group to relocate */
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
/* extent tree */
|
||||
struct btrfs_root *extent_root;
|
||||
/* inode for moving data */
|
||||
|
@ -1560,11 +1560,10 @@ again:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int in_block_group(u64 bytenr,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
|
||||
{
|
||||
if (bytenr >= block_group->key.objectid &&
|
||||
bytenr < block_group->key.objectid + block_group->key.offset)
|
||||
if (bytenr >= block_group->start &&
|
||||
bytenr < block_group->start + block_group->length)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2246,7 +2245,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
|
|||
|
||||
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
|
||||
level = btrfs_root_level(root_item);
|
||||
extent_buffer_get(reloc_root->node);
|
||||
atomic_inc(&reloc_root->node->refs);
|
||||
path->nodes[level] = reloc_root->node;
|
||||
path->slots[level] = 0;
|
||||
} else {
|
||||
|
@ -3195,7 +3194,6 @@ static noinline_for_stack
|
|||
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
|
||||
u64 block_start)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct extent_map *em;
|
||||
int ret = 0;
|
||||
|
@ -3208,7 +3206,6 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
|
|||
em->len = end + 1 - start;
|
||||
em->block_len = em->len;
|
||||
em->block_start = block_start;
|
||||
em->bdev = fs_info->fs_devices->latest_bdev;
|
||||
set_bit(EXTENT_FLAG_PINNED, &em->flags);
|
||||
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
|
@ -3544,7 +3541,7 @@ static int block_use_full_backref(struct reloc_control *rc,
|
|||
}
|
||||
|
||||
static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_block_group *block_group,
|
||||
struct inode *inode,
|
||||
u64 ino)
|
||||
{
|
||||
|
@ -3560,7 +3557,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
if (IS_ERR(inode))
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -3863,7 +3860,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
|
|||
u64 start, end, last;
|
||||
int ret;
|
||||
|
||||
last = rc->block_group->key.objectid + rc->block_group->key.offset;
|
||||
last = rc->block_group->start + rc->block_group->length;
|
||||
while (1) {
|
||||
cond_resched();
|
||||
if (rc->search_start >= last) {
|
||||
|
@ -3980,7 +3977,7 @@ int prepare_to_relocate(struct reloc_control *rc)
|
|||
return -ENOMEM;
|
||||
|
||||
memset(&rc->cluster, 0, sizeof(rc->cluster));
|
||||
rc->search_start = rc->block_group->key.objectid;
|
||||
rc->search_start = rc->block_group->start;
|
||||
rc->extents_found = 0;
|
||||
rc->nodes_relocated = 0;
|
||||
rc->merging_rsv_size = 0;
|
||||
|
@ -4219,7 +4216,7 @@ out:
|
|||
*/
|
||||
static noinline_for_stack
|
||||
struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *group)
|
||||
struct btrfs_block_group *group)
|
||||
{
|
||||
struct inode *inode = NULL;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
@ -4246,9 +4243,9 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
|
|||
key.objectid = objectid;
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
BUG_ON(IS_ERR(inode));
|
||||
BTRFS_I(inode)->index_cnt = group->key.objectid;
|
||||
BTRFS_I(inode)->index_cnt = group->start;
|
||||
|
||||
err = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
out:
|
||||
|
@ -4283,7 +4280,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
|
|||
* Print the block group being relocated
|
||||
*/
|
||||
static void describe_relocation(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *block_group)
|
||||
struct btrfs_block_group *block_group)
|
||||
{
|
||||
char buf[128] = {'\0'};
|
||||
|
||||
|
@ -4291,7 +4288,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
|
|||
|
||||
btrfs_info(fs_info,
|
||||
"relocating block group %llu flags %s",
|
||||
block_group->key.objectid, buf);
|
||||
block_group->start, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4299,7 +4296,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
||||
{
|
||||
struct btrfs_block_group_cache *bg;
|
||||
struct btrfs_block_group *bg;
|
||||
struct btrfs_root *extent_root = fs_info->extent_root;
|
||||
struct reloc_control *rc;
|
||||
struct inode *inode;
|
||||
|
@ -4326,7 +4323,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
|||
rc->extent_root = extent_root;
|
||||
rc->block_group = bg;
|
||||
|
||||
ret = btrfs_inc_block_group_ro(rc->block_group);
|
||||
ret = btrfs_inc_block_group_ro(rc->block_group, true);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto out;
|
||||
|
@ -4364,8 +4361,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
|||
btrfs_wait_block_group_reservations(rc->block_group);
|
||||
btrfs_wait_nocow_writers(rc->block_group);
|
||||
btrfs_wait_ordered_roots(fs_info, U64_MAX,
|
||||
rc->block_group->key.objectid,
|
||||
rc->block_group->key.offset);
|
||||
rc->block_group->start,
|
||||
rc->block_group->length);
|
||||
|
||||
while (1) {
|
||||
mutex_lock(&fs_info->cleaner_mutex);
|
||||
|
@ -4405,7 +4402,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
|||
|
||||
WARN_ON(rc->block_group->pinned > 0);
|
||||
WARN_ON(rc->block_group->reserved > 0);
|
||||
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
|
||||
WARN_ON(rc->block_group->used > 0);
|
||||
out:
|
||||
if (err && rw)
|
||||
btrfs_dec_block_group_ro(rc->block_group);
|
||||
|
@ -4688,7 +4685,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
|
|||
node->new_bytenr != buf->start);
|
||||
|
||||
drop_node_buffer(node);
|
||||
extent_buffer_get(cow);
|
||||
atomic_inc(&cow->refs);
|
||||
node->eb = cow;
|
||||
node->new_bytenr = cow->start;
|
||||
|
||||
|
|
|
@ -389,8 +389,7 @@ static struct full_stripe_lock *search_full_stripe_lock(
|
|||
*
|
||||
* Caller must ensure @cache is a RAID56 block group.
|
||||
*/
|
||||
static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
|
||||
u64 bytenr)
|
||||
static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
|
||||
{
|
||||
u64 ret;
|
||||
|
||||
|
@ -404,8 +403,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
|
|||
* round_down() can only handle power of 2, while RAID56 full
|
||||
* stripe length can be 64KiB * n, so we need to manually round down.
|
||||
*/
|
||||
ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
|
||||
cache->full_stripe_len + cache->key.objectid;
|
||||
ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
|
||||
cache->full_stripe_len + cache->start;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -423,7 +422,7 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
|
|||
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||
bool *locked_ret)
|
||||
{
|
||||
struct btrfs_block_group_cache *bg_cache;
|
||||
struct btrfs_block_group *bg_cache;
|
||||
struct btrfs_full_stripe_locks_tree *locks_root;
|
||||
struct full_stripe_lock *existing;
|
||||
u64 fstripe_start;
|
||||
|
@ -470,7 +469,7 @@ out:
|
|||
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||
bool locked)
|
||||
{
|
||||
struct btrfs_block_group_cache *bg_cache;
|
||||
struct btrfs_block_group *bg_cache;
|
||||
struct btrfs_full_stripe_locks_tree *locks_root;
|
||||
struct full_stripe_lock *fstripe_lock;
|
||||
u64 fstripe_start;
|
||||
|
@ -598,8 +597,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
|
|||
sbio->index = i;
|
||||
sbio->sctx = sctx;
|
||||
sbio->page_count = 0;
|
||||
btrfs_init_work(&sbio->work, btrfs_scrub_helper,
|
||||
scrub_bio_end_io_worker, NULL, NULL);
|
||||
btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
|
||||
NULL);
|
||||
|
||||
if (i != SCRUB_BIOS_PER_SCTX - 1)
|
||||
sctx->bios[i]->next_free = i + 1;
|
||||
|
@ -1720,8 +1719,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
|
|||
sbio->status = bio->bi_status;
|
||||
sbio->bio = bio;
|
||||
|
||||
btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
|
||||
scrub_wr_bio_end_io_worker, NULL, NULL);
|
||||
btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
|
||||
btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
|
||||
}
|
||||
|
||||
|
@ -2149,14 +2147,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
|
|||
scrub_write_block_to_dev_replace(sblock);
|
||||
}
|
||||
|
||||
scrub_block_put(sblock);
|
||||
|
||||
if (sctx->is_dev_replace && sctx->flush_all_writes) {
|
||||
mutex_lock(&sctx->wr_lock);
|
||||
scrub_wr_submit(sctx);
|
||||
mutex_unlock(&sctx->wr_lock);
|
||||
}
|
||||
|
||||
scrub_block_put(sblock);
|
||||
scrub_pending_bio_dec(sctx);
|
||||
}
|
||||
|
||||
|
@ -2204,8 +2201,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
|
|||
raid56_add_scrub_pages(rbio, spage->page, spage->logical);
|
||||
}
|
||||
|
||||
btrfs_init_work(&sblock->work, btrfs_scrub_helper,
|
||||
scrub_missing_raid56_worker, NULL, NULL);
|
||||
btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
|
||||
scrub_block_get(sblock);
|
||||
scrub_pending_bio_inc(sctx);
|
||||
raid56_submit_missing_rbio(rbio);
|
||||
|
@ -2743,8 +2739,8 @@ static void scrub_parity_bio_endio(struct bio *bio)
|
|||
|
||||
bio_put(bio);
|
||||
|
||||
btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
|
||||
scrub_parity_bio_endio_worker, NULL, NULL);
|
||||
btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
|
||||
NULL);
|
||||
btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
|
||||
}
|
||||
|
||||
|
@ -3420,7 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
|
|||
struct btrfs_device *scrub_dev,
|
||||
u64 chunk_offset, u64 length,
|
||||
u64 dev_offset,
|
||||
struct btrfs_block_group_cache *cache)
|
||||
struct btrfs_block_group *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
|
||||
|
@ -3484,7 +3480,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
struct extent_buffer *l;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
|
@ -3563,46 +3559,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
* -> btrfs_scrub_pause()
|
||||
*/
|
||||
scrub_pause_on(fs_info);
|
||||
ret = btrfs_inc_block_group_ro(cache);
|
||||
if (!ret && sctx->is_dev_replace) {
|
||||
/*
|
||||
* If we are doing a device replace wait for any tasks
|
||||
* that started delalloc right before we set the block
|
||||
* group to RO mode, as they might have just allocated
|
||||
* an extent from it or decided they could do a nocow
|
||||
* write. And if any such tasks did that, wait for their
|
||||
* ordered extents to complete and then commit the
|
||||
* current transaction, so that we can later see the new
|
||||
* extent items in the extent tree - the ordered extents
|
||||
* create delayed data references (for cow writes) when
|
||||
* they complete, which will be run and insert the
|
||||
* corresponding extent items into the extent tree when
|
||||
* we commit the transaction they used when running
|
||||
* inode.c:btrfs_finish_ordered_io(). We later use
|
||||
* the commit root of the extent tree to find extents
|
||||
* to copy from the srcdev into the tgtdev, and we don't
|
||||
* want to miss any new extents.
|
||||
*/
|
||||
btrfs_wait_block_group_reservations(cache);
|
||||
btrfs_wait_nocow_writers(cache);
|
||||
ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
if (ret > 0) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans))
|
||||
ret = PTR_ERR(trans);
|
||||
else
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
if (ret) {
|
||||
scrub_pause_off(fs_info);
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Don't do chunk preallocation for scrub.
|
||||
*
|
||||
* This is especially important for SYSTEM bgs, or we can hit
|
||||
* -EFBIG from btrfs_finish_chunk_alloc() like:
|
||||
* 1. The only SYSTEM bg is marked RO.
|
||||
* Since SYSTEM bg is small, that's pretty common.
|
||||
* 2. New SYSTEM bg will be allocated
|
||||
* Due to regular version will allocate new chunk.
|
||||
* 3. New SYSTEM bg is empty and will get cleaned up
|
||||
* Before cleanup really happens, it's marked RO again.
|
||||
* 4. Empty SYSTEM bg get scrubbed
|
||||
* We go back to 2.
|
||||
*
|
||||
* This can easily boost the amount of SYSTEM chunks if cleaner
|
||||
* thread can't be triggered fast enough, and use up all space
|
||||
* of btrfs_super_block::sys_chunk_array
|
||||
*/
|
||||
ret = btrfs_inc_block_group_ro(cache, false);
|
||||
scrub_pause_off(fs_info);
|
||||
|
||||
if (ret == 0) {
|
||||
|
@ -3623,7 +3599,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
break;
|
||||
}
|
||||
|
||||
down_write(&fs_info->dev_replace.rwsem);
|
||||
down_write(&dev_replace->rwsem);
|
||||
dev_replace->cursor_right = found_key.offset + length;
|
||||
dev_replace->cursor_left = found_key.offset;
|
||||
dev_replace->item_needs_writeback = 1;
|
||||
|
@ -3664,10 +3640,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
|
||||
scrub_pause_off(fs_info);
|
||||
|
||||
down_write(&fs_info->dev_replace.rwsem);
|
||||
down_write(&dev_replace->rwsem);
|
||||
dev_replace->cursor_left = dev_replace->cursor_right;
|
||||
dev_replace->item_needs_writeback = 1;
|
||||
up_write(&fs_info->dev_replace.rwsem);
|
||||
up_write(&dev_replace->rwsem);
|
||||
|
||||
if (ro_set)
|
||||
btrfs_dec_block_group_ro(cache);
|
||||
|
@ -3681,7 +3657,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
*/
|
||||
spin_lock(&cache->lock);
|
||||
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
|
||||
btrfs_block_group_used(&cache->item) == 0) {
|
||||
cache->used == 0) {
|
||||
spin_unlock(&cache->lock);
|
||||
btrfs_mark_bg_unused(cache);
|
||||
} else {
|
||||
|
|
|
@ -24,6 +24,14 @@
|
|||
#include "transaction.h"
|
||||
#include "compression.h"
|
||||
|
||||
/*
|
||||
* Maximum number of references an extent can have in order for us to attempt to
|
||||
* issue clone operations instead of write operations. This currently exists to
|
||||
* avoid hitting limitations of the backreference walking code (taking a lot of
|
||||
* time and using too much memory for extents with large number of references).
|
||||
*/
|
||||
#define SEND_MAX_EXTENT_REFS 64
|
||||
|
||||
/*
|
||||
* A fs_path is a helper to dynamically build path names with unknown size.
|
||||
* It reallocates the internal buffer on demand.
|
||||
|
@ -1248,12 +1256,20 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
|
|||
*/
|
||||
if (found->root == bctx->sctx->send_root) {
|
||||
/*
|
||||
* TODO for the moment we don't accept clones from the inode
|
||||
* that is currently send. We may change this when
|
||||
* BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
|
||||
* file.
|
||||
* If the source inode was not yet processed we can't issue a
|
||||
* clone operation, as the source extent does not exist yet at
|
||||
* the destination of the stream.
|
||||
*/
|
||||
if (ino >= bctx->cur_objectid)
|
||||
if (ino > bctx->cur_objectid)
|
||||
return 0;
|
||||
/*
|
||||
* We clone from the inode currently being sent as long as the
|
||||
* source extent is already processed, otherwise we could try
|
||||
* to clone from an extent that does not exist yet at the
|
||||
* destination of the stream.
|
||||
*/
|
||||
if (ino == bctx->cur_objectid &&
|
||||
offset >= bctx->sctx->cur_inode_next_write_offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1302,6 +1318,7 @@ static int find_extent_clone(struct send_ctx *sctx,
|
|||
struct clone_root *cur_clone_root;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_path *tmp_path;
|
||||
struct btrfs_extent_item *ei;
|
||||
int compressed;
|
||||
u32 i;
|
||||
|
||||
|
@ -1349,7 +1366,6 @@ static int find_extent_clone(struct send_ctx *sctx,
|
|||
ret = extent_from_logical(fs_info, disk_byte, tmp_path,
|
||||
&found_key, &flags);
|
||||
up_read(&fs_info->commit_root_sem);
|
||||
btrfs_release_path(tmp_path);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1358,6 +1374,21 @@ static int find_extent_clone(struct send_ctx *sctx,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
|
||||
struct btrfs_extent_item);
|
||||
/*
|
||||
* Backreference walking (iterate_extent_inodes() below) is currently
|
||||
* too expensive when an extent has a large number of references, both
|
||||
* in time spent and used memory. So for now just fallback to write
|
||||
* operations instead of clone operations when an extent has more than
|
||||
* a certain amount of references.
|
||||
*/
|
||||
if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
btrfs_release_path(tmp_path);
|
||||
|
||||
/*
|
||||
* Setup the clone roots.
|
||||
*/
|
||||
|
@ -4779,7 +4810,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "transaction.h"
|
||||
#include "block-group.h"
|
||||
|
||||
u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||
u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||
bool may_use_included)
|
||||
{
|
||||
ASSERT(s_info);
|
||||
|
@ -58,7 +58,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
|
|||
spin_lock_init(&space_info->lock);
|
||||
space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
|
||||
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||
init_waitqueue_head(&space_info->wait);
|
||||
INIT_LIST_HEAD(&space_info->ro_bgs);
|
||||
INIT_LIST_HEAD(&space_info->tickets);
|
||||
INIT_LIST_HEAD(&space_info->priority_tickets);
|
||||
|
@ -285,7 +284,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_space_info *info, u64 bytes,
|
||||
int dump_block_groups)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
int index = 0;
|
||||
|
||||
spin_lock(&info->lock);
|
||||
|
@ -301,8 +300,7 @@ again:
|
|||
spin_lock(&cache->lock);
|
||||
btrfs_info(fs_info,
|
||||
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
|
||||
cache->key.objectid, cache->key.offset,
|
||||
btrfs_block_group_used(&cache->item), cache->pinned,
|
||||
cache->start, cache->length, cache->used, cache->pinned,
|
||||
cache->reserved, cache->ro ? "[readonly]" : "");
|
||||
btrfs_dump_free_space(cache, bytes);
|
||||
spin_unlock(&cache->lock);
|
||||
|
|
|
@ -63,7 +63,6 @@ struct btrfs_space_info {
|
|||
struct rw_semaphore groups_sem;
|
||||
/* for block groups in our same type */
|
||||
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
|
||||
wait_queue_head_t wait;
|
||||
|
||||
struct kobject kobj;
|
||||
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
|
||||
|
@ -116,7 +115,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
struct btrfs_space_info **space_info);
|
||||
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
|
||||
u64 flags);
|
||||
u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||
u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||
bool may_use_included);
|
||||
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
|
||||
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
|
||||
|
|
|
@ -66,7 +66,7 @@ static struct file_system_type btrfs_root_fs_type;
|
|||
|
||||
static int btrfs_remount(struct super_block *sb, int *flags, char *data);
|
||||
|
||||
const char *btrfs_decode_error(int errno)
|
||||
const char * __attribute_const__ btrfs_decode_error(int errno)
|
||||
{
|
||||
char *errstr = "unknown";
|
||||
|
||||
|
@ -187,7 +187,7 @@ static struct ratelimit_state printk_limits[] = {
|
|||
RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
|
||||
};
|
||||
|
||||
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
|
||||
void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
|
||||
{
|
||||
char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
|
||||
struct va_format vaf;
|
||||
|
@ -1219,7 +1219,7 @@ static int btrfs_fill_super(struct super_block *sb,
|
|||
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
|
||||
inode = btrfs_iget(sb, &key, fs_info->fs_root);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto fail_close;
|
||||
|
@ -1669,7 +1669,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
|
|||
|
||||
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
|
||||
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
|
||||
btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
|
||||
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
|
||||
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
|
||||
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
|
||||
|
@ -1936,6 +1935,10 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
|
|||
num_stripes = nr_devices;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID1)
|
||||
num_stripes = 2;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
|
||||
num_stripes = 3;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
|
||||
num_stripes = 4;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
num_stripes = 4;
|
||||
|
||||
|
@ -2022,7 +2025,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
|
||||
struct btrfs_super_block *disk_super = fs_info->super_copy;
|
||||
struct list_head *head = &fs_info->space_info;
|
||||
struct btrfs_space_info *found;
|
||||
u64 total_used = 0;
|
||||
u64 total_free_data = 0;
|
||||
|
@ -2036,7 +2038,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
int mixed = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(found, head, list) {
|
||||
list_for_each_entry_rcu(found, &fs_info->space_info, list) {
|
||||
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
|
||||
int i;
|
||||
|
||||
|
@ -2360,10 +2362,14 @@ static int __init init_btrfs_fs(void)
|
|||
if (err)
|
||||
goto free_cachep;
|
||||
|
||||
err = extent_map_init();
|
||||
err = extent_state_cache_init();
|
||||
if (err)
|
||||
goto free_extent_io;
|
||||
|
||||
err = extent_map_init();
|
||||
if (err)
|
||||
goto free_extent_state_cache;
|
||||
|
||||
err = ordered_data_init();
|
||||
if (err)
|
||||
goto free_extent_map;
|
||||
|
@ -2422,6 +2428,8 @@ free_ordered_data:
|
|||
ordered_data_exit();
|
||||
free_extent_map:
|
||||
extent_map_exit();
|
||||
free_extent_state_cache:
|
||||
extent_state_cache_exit();
|
||||
free_extent_io:
|
||||
extent_io_exit();
|
||||
free_cachep:
|
||||
|
@ -2442,6 +2450,7 @@ static void __exit exit_btrfs_fs(void)
|
|||
btrfs_prelim_ref_exit();
|
||||
ordered_data_exit();
|
||||
extent_map_exit();
|
||||
extent_state_cache_exit();
|
||||
extent_io_exit();
|
||||
btrfs_interface_exit();
|
||||
btrfs_end_io_wq_exit();
|
||||
|
@ -2456,3 +2465,6 @@ module_exit(exit_btrfs_fs)
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_SOFTDEP("pre: crc32c");
|
||||
MODULE_SOFTDEP("pre: xxhash64");
|
||||
MODULE_SOFTDEP("pre: sha256");
|
||||
MODULE_SOFTDEP("pre: blake2b-256");
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/bug.h>
|
||||
#include <crypto/hash.h>
|
||||
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
|
@ -258,6 +259,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
|
|||
BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
|
||||
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
|
||||
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
|
||||
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
|
||||
|
||||
static struct attribute *btrfs_supported_feature_attrs[] = {
|
||||
BTRFS_FEAT_ATTR_PTR(mixed_backref),
|
||||
|
@ -272,6 +274,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
|
|||
BTRFS_FEAT_ATTR_PTR(no_holes),
|
||||
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
|
||||
BTRFS_FEAT_ATTR_PTR(free_space_tree),
|
||||
BTRFS_FEAT_ATTR_PTR(raid1c34),
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -295,8 +298,30 @@ static ssize_t rmdir_subvol_show(struct kobject *kobj,
|
|||
}
|
||||
BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show);
|
||||
|
||||
static ssize_t supported_checksums_show(struct kobject *kobj,
|
||||
struct kobj_attribute *a, char *buf)
|
||||
{
|
||||
ssize_t ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < btrfs_get_num_csums(); i++) {
|
||||
/*
|
||||
* This "trick" only works as long as 'enum btrfs_csum_type' has
|
||||
* no holes in it
|
||||
*/
|
||||
ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
|
||||
(i == 0 ? "" : " "), btrfs_super_csum_name(i));
|
||||
|
||||
}
|
||||
|
||||
ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
|
||||
return ret;
|
||||
}
|
||||
BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show);
|
||||
|
||||
static struct attribute *btrfs_supported_static_feature_attrs[] = {
|
||||
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
|
||||
BTRFS_ATTR_PTR(static_feature, supported_checksums),
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -372,16 +397,16 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
|
|||
|
||||
{
|
||||
struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_block_group *block_group;
|
||||
int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags);
|
||||
u64 val = 0;
|
||||
|
||||
down_read(&sinfo->groups_sem);
|
||||
list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
|
||||
if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
|
||||
val += block_group->key.offset;
|
||||
val += block_group->length;
|
||||
else
|
||||
val += btrfs_block_group_used(&block_group->item);
|
||||
val += block_group->used;
|
||||
}
|
||||
up_read(&sinfo->groups_sem);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", val);
|
||||
|
@ -604,6 +629,19 @@ static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj,
|
|||
|
||||
BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show);
|
||||
|
||||
static ssize_t btrfs_checksum_show(struct kobject *kobj,
|
||||
struct kobj_attribute *a, char *buf)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||
u16 csum_type = btrfs_super_csum_type(fs_info->super_copy);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s (%s)\n",
|
||||
btrfs_super_csum_name(csum_type),
|
||||
crypto_shash_driver_name(fs_info->csum_shash));
|
||||
}
|
||||
|
||||
BTRFS_ATTR(, checksum, btrfs_checksum_show);
|
||||
|
||||
static const struct attribute *btrfs_attrs[] = {
|
||||
BTRFS_ATTR_PTR(, label),
|
||||
BTRFS_ATTR_PTR(, nodesize),
|
||||
|
@ -611,6 +649,7 @@ static const struct attribute *btrfs_attrs[] = {
|
|||
BTRFS_ATTR_PTR(, clone_alignment),
|
||||
BTRFS_ATTR_PTR(, quota_override),
|
||||
BTRFS_ATTR_PTR(, metadata_uuid),
|
||||
BTRFS_ATTR_PTR(, checksum),
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -822,7 +861,7 @@ static void init_feature_attrs(void)
|
|||
* Create a sysfs entry for a given block group type at path
|
||||
* /sys/fs/btrfs/UUID/allocation/data/TYPE
|
||||
*/
|
||||
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache)
|
||||
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
|
|
|
@ -32,7 +32,7 @@ int __init btrfs_init_sysfs(void);
|
|||
void __cold btrfs_exit_sysfs(void);
|
||||
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache);
|
||||
int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *space_info);
|
||||
void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info);
|
||||
|
|
|
@ -202,11 +202,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
|
|||
kfree(root);
|
||||
}
|
||||
|
||||
struct btrfs_block_group_cache *
|
||||
struct btrfs_block_group *
|
||||
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
|
||||
unsigned long length)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
|
@ -218,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
cache->key.objectid = 0;
|
||||
cache->key.offset = length;
|
||||
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
||||
cache->start = 0;
|
||||
cache->length = length;
|
||||
cache->full_stripe_len = fs_info->sectorsize;
|
||||
cache->fs_info = fs_info;
|
||||
|
||||
|
@ -233,7 +232,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
|
|||
return cache;
|
||||
}
|
||||
|
||||
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
|
||||
void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
|
||||
{
|
||||
if (!cache)
|
||||
return;
|
||||
|
|
|
@ -41,9 +41,9 @@ struct inode *btrfs_new_test_inode(void);
|
|||
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
|
||||
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_free_dummy_root(struct btrfs_root *root);
|
||||
struct btrfs_block_group_cache *
|
||||
struct btrfs_block_group *
|
||||
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
|
||||
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
|
||||
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
#else
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* entry and remove space from either end and the middle, and make sure we can
|
||||
* remove space that covers adjacent extent entries.
|
||||
*/
|
||||
static int test_extents(struct btrfs_block_group_cache *cache)
|
||||
static int test_extents(struct btrfs_block_group *cache)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -87,8 +87,7 @@ static int test_extents(struct btrfs_block_group_cache *cache)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int test_bitmaps(struct btrfs_block_group_cache *cache,
|
||||
u32 sectorsize)
|
||||
static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
|
||||
{
|
||||
u64 next_bitmap_offset;
|
||||
int ret;
|
||||
|
@ -156,7 +155,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache,
|
|||
}
|
||||
|
||||
/* This is the high grade jackassery */
|
||||
static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
|
||||
static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
|
||||
u32 sectorsize)
|
||||
{
|
||||
u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
|
||||
|
@ -331,7 +330,7 @@ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
|
||||
/* Used by test_steal_space_from_bitmap_to_extent(). */
|
||||
static int
|
||||
check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
|
||||
check_num_extents_and_bitmaps(const struct btrfs_block_group *cache,
|
||||
const int num_extents,
|
||||
const int num_bitmaps)
|
||||
{
|
||||
|
@ -351,7 +350,7 @@ check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
|
|||
}
|
||||
|
||||
/* Used by test_steal_space_from_bitmap_to_extent(). */
|
||||
static int check_cache_empty(struct btrfs_block_group_cache *cache)
|
||||
static int check_cache_empty(struct btrfs_block_group *cache)
|
||||
{
|
||||
u64 offset;
|
||||
u64 max_extent_size;
|
||||
|
@ -393,7 +392,7 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
|
|||
* requests.
|
||||
*/
|
||||
static int
|
||||
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
|
||||
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
|
||||
u32 sectorsize)
|
||||
{
|
||||
int ret;
|
||||
|
@ -829,7 +828,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
|
|||
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
struct btrfs_root *root = NULL;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ struct free_space_extent {
|
|||
|
||||
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
const struct free_space_extent * const extents,
|
||||
unsigned int num_extents)
|
||||
|
@ -48,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
|
|||
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
|
||||
if (path->slots[0] != 0)
|
||||
goto invalid;
|
||||
end = cache->key.objectid + cache->key.offset;
|
||||
end = cache->start + cache->length;
|
||||
i = 0;
|
||||
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
|
@ -107,7 +107,7 @@ invalid:
|
|||
|
||||
static int check_free_space_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
const struct free_space_extent * const extents,
|
||||
unsigned int num_extents)
|
||||
|
@ -150,12 +150,12 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_empty_block_group(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, cache->key.offset},
|
||||
{cache->start, cache->length},
|
||||
};
|
||||
|
||||
return check_free_space_extents(trans, fs_info, cache, path,
|
||||
|
@ -164,7 +164,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_remove_all(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
|
@ -172,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
|
|||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
cache->start,
|
||||
cache->length);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
|
@ -185,18 +185,17 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_remove_beginning(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid + alignment,
|
||||
cache->key.offset - alignment},
|
||||
{cache->start + alignment, cache->length - alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid, alignment);
|
||||
cache->start, alignment);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
|
@ -209,19 +208,18 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_remove_end(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, cache->key.offset - alignment},
|
||||
{cache->start, cache->length - alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid +
|
||||
cache->key.offset - alignment,
|
||||
alignment);
|
||||
cache->start + cache->length - alignment,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
|
@ -233,19 +231,18 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_remove_middle(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, alignment},
|
||||
{cache->key.objectid + 2 * alignment,
|
||||
cache->key.offset - 2 * alignment},
|
||||
{cache->start, alignment},
|
||||
{cache->start + 2 * alignment, cache->length - 2 * alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + alignment,
|
||||
cache->start + alignment,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
|
@ -258,24 +255,23 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_merge_left(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, 2 * alignment},
|
||||
{cache->start, 2 * alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
cache->start, cache->length);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -283,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + alignment,
|
||||
cache->start + alignment,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -296,25 +292,24 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_merge_right(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid + alignment, 2 * alignment},
|
||||
{cache->start + alignment, 2 * alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
cache->start, cache->length);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + 2 * alignment,
|
||||
cache->start + 2 * alignment,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -322,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + alignment,
|
||||
cache->start + alignment,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -335,24 +330,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_merge_both(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, 3 * alignment},
|
||||
{cache->start, 3 * alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
cache->start, cache->length);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -360,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + 2 * alignment,
|
||||
alignment);
|
||||
cache->start + 2 * alignment, alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + alignment,
|
||||
alignment);
|
||||
cache->start + alignment, alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
return ret;
|
||||
|
@ -381,26 +373,25 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int test_merge_none(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
struct btrfs_block_group *cache,
|
||||
struct btrfs_path *path,
|
||||
u32 alignment)
|
||||
{
|
||||
const struct free_space_extent extents[] = {
|
||||
{cache->key.objectid, alignment},
|
||||
{cache->key.objectid + 2 * alignment, alignment},
|
||||
{cache->key.objectid + 4 * alignment, alignment},
|
||||
{cache->start, alignment},
|
||||
{cache->start + 2 * alignment, alignment},
|
||||
{cache->start + 4 * alignment, alignment},
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = __remove_from_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid,
|
||||
cache->key.offset);
|
||||
cache->start, cache->length);
|
||||
if (ret) {
|
||||
test_err("could not remove free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
|
||||
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
|
||||
alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
|
@ -408,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + 4 * alignment,
|
||||
alignment);
|
||||
cache->start + 4 * alignment, alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __add_to_free_space_tree(trans, cache, path,
|
||||
cache->key.objectid + 2 * alignment,
|
||||
alignment);
|
||||
cache->start + 2 * alignment, alignment);
|
||||
if (ret) {
|
||||
test_err("could not add free space");
|
||||
return ret;
|
||||
|
@ -429,7 +418,7 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
|
|||
|
||||
typedef int (*test_func_t)(struct btrfs_trans_handle *,
|
||||
struct btrfs_fs_info *,
|
||||
struct btrfs_block_group_cache *,
|
||||
struct btrfs_block_group *,
|
||||
struct btrfs_path *,
|
||||
u32 alignment);
|
||||
|
||||
|
@ -438,7 +427,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
|
|||
{
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_root *root = NULL;
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
struct btrfs_block_group *cache = NULL;
|
||||
struct btrfs_trans_handle trans;
|
||||
struct btrfs_path *path = NULL;
|
||||
int ret;
|
||||
|
|
|
@ -24,9 +24,79 @@
|
|||
|
||||
#define BTRFS_ROOT_TRANS_TAG 0
|
||||
|
||||
/*
|
||||
* Transaction states and transitions
|
||||
*
|
||||
* No running transaction (fs tree blocks are not modified)
|
||||
* |
|
||||
* | To next stage:
|
||||
* | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
|
||||
* V
|
||||
* Transaction N [[TRANS_STATE_RUNNING]]
|
||||
* |
|
||||
* | New trans handles can be attached to transaction N by calling all
|
||||
* | start_transaction() variants.
|
||||
* |
|
||||
* | To next stage:
|
||||
* | Call btrfs_commit_transaction() on any trans handle attached to
|
||||
* | transaction N
|
||||
* V
|
||||
* Transaction N [[TRANS_STATE_COMMIT_START]]
|
||||
* |
|
||||
* | Will wait for previous running transaction to completely finish if there
|
||||
* | is one
|
||||
* |
|
||||
* | Then one of the following happes:
|
||||
* | - Wait for all other trans handle holders to release.
|
||||
* | The btrfs_commit_transaction() caller will do the commit work.
|
||||
* | - Wait for current transaction to be committed by others.
|
||||
* | Other btrfs_commit_transaction() caller will do the commit work.
|
||||
* |
|
||||
* | At this stage, only btrfs_join_transaction*() variants can attach
|
||||
* | to this running transaction.
|
||||
* | All other variants will wait for current one to finish and attach to
|
||||
* | transaction N+1.
|
||||
* |
|
||||
* | To next stage:
|
||||
* | Caller is chosen to commit transaction N, and all other trans handle
|
||||
* | haven been released.
|
||||
* V
|
||||
* Transaction N [[TRANS_STATE_COMMIT_DOING]]
|
||||
* |
|
||||
* | The heavy lifting transaction work is started.
|
||||
* | From running delayed refs (modifying extent tree) to creating pending
|
||||
* | snapshots, running qgroups.
|
||||
* | In short, modify supporting trees to reflect modifications of subvolume
|
||||
* | trees.
|
||||
* |
|
||||
* | At this stage, all start_transaction() calls will wait for this
|
||||
* | transaction to finish and attach to transaction N+1.
|
||||
* |
|
||||
* | To next stage:
|
||||
* | Until all supporting trees are updated.
|
||||
* V
|
||||
* Transaction N [[TRANS_STATE_UNBLOCKED]]
|
||||
* | Transaction N+1
|
||||
* | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
|
||||
* | need to write them back to disk and update |
|
||||
* | super blocks. |
|
||||
* | |
|
||||
* | At this stage, new transaction is allowed to |
|
||||
* | start. |
|
||||
* | All new start_transaction() calls will be |
|
||||
* | attached to transid N+1. |
|
||||
* | |
|
||||
* | To next stage: |
|
||||
* | Until all tree blocks are super blocks are |
|
||||
* | written to block devices |
|
||||
* V |
|
||||
* Transaction N [[TRANS_STATE_COMPLETED]] V
|
||||
* All tree blocks and super blocks are written. Transaction N+1
|
||||
* This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
|
||||
* data structures will be cleaned up. | Life goes on
|
||||
*/
|
||||
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
|
||||
[TRANS_STATE_RUNNING] = 0U,
|
||||
[TRANS_STATE_BLOCKED] = __TRANS_START,
|
||||
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
|
||||
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
|
@ -63,10 +133,10 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
|
|||
* discard the physical locations of the block groups.
|
||||
*/
|
||||
while (!list_empty(&transaction->deleted_bgs)) {
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
|
||||
cache = list_first_entry(&transaction->deleted_bgs,
|
||||
struct btrfs_block_group_cache,
|
||||
struct btrfs_block_group,
|
||||
bg_list);
|
||||
list_del_init(&cache->bg_list);
|
||||
btrfs_put_block_group_trimming(cache);
|
||||
|
@ -383,7 +453,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
|
|||
|
||||
static inline int is_transaction_blocked(struct btrfs_transaction *trans)
|
||||
{
|
||||
return (trans->state >= TRANS_STATE_BLOCKED &&
|
||||
return (trans->state >= TRANS_STATE_COMMIT_START &&
|
||||
trans->state < TRANS_STATE_UNBLOCKED &&
|
||||
!trans->aborted);
|
||||
}
|
||||
|
@ -570,7 +640,7 @@ again:
|
|||
INIT_LIST_HEAD(&h->new_bgs);
|
||||
|
||||
smp_mb();
|
||||
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
|
||||
if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
|
||||
may_wait_transaction(fs_info, type)) {
|
||||
current->journal_info = h;
|
||||
btrfs_commit_transaction(h);
|
||||
|
@ -659,7 +729,7 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
|
|||
true);
|
||||
}
|
||||
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
|
||||
{
|
||||
return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
|
||||
BTRFS_RESERVE_NO_FLUSH, true);
|
||||
|
@ -798,7 +868,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
|
|||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
|
||||
smp_mb();
|
||||
if (cur_trans->state >= TRANS_STATE_BLOCKED ||
|
||||
if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
|
||||
cur_trans->delayed_refs.flushing)
|
||||
return 1;
|
||||
|
||||
|
@ -831,7 +901,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
struct btrfs_fs_info *info = trans->fs_info;
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
int lock = (trans->type != TRANS_JOIN_NOLOCK);
|
||||
int err = 0;
|
||||
|
||||
if (refcount_read(&trans->use_count) > 1) {
|
||||
|
@ -847,13 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_trans_release_chunk_metadata(trans);
|
||||
|
||||
if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
|
||||
if (throttle)
|
||||
return btrfs_commit_transaction(trans);
|
||||
else
|
||||
wake_up_process(info->transaction_kthread);
|
||||
}
|
||||
|
||||
if (trans->type & __TRANS_FREEZABLE)
|
||||
sb_end_intwrite(info->sb);
|
||||
|
||||
|
@ -990,7 +1052,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
|
|||
return werr;
|
||||
}
|
||||
|
||||
int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
|
||||
static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *dirty_pages)
|
||||
{
|
||||
bool errors = false;
|
||||
|
@ -1875,7 +1937,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
|
|||
static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_block_group_cache *block_group, *tmp;
|
||||
struct btrfs_block_group *block_group, *tmp;
|
||||
|
||||
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
|
||||
btrfs_delayed_refs_rsv_release(fs_info, 1);
|
||||
|
@ -1949,6 +2011,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
|||
struct btrfs_transaction *prev_trans = NULL;
|
||||
int ret;
|
||||
|
||||
ASSERT(refcount_read(&trans->use_count) == 1);
|
||||
|
||||
/* Stop the commit early if ->aborted is set */
|
||||
if (unlikely(READ_ONCE(cur_trans->aborted))) {
|
||||
ret = cur_trans->aborted;
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
enum btrfs_trans_state {
|
||||
TRANS_STATE_RUNNING,
|
||||
TRANS_STATE_BLOCKED,
|
||||
TRANS_STATE_COMMIT_START,
|
||||
TRANS_STATE_COMMIT_DOING,
|
||||
TRANS_STATE_UNBLOCKED,
|
||||
|
@ -184,7 +183,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
|
|||
unsigned int num_items,
|
||||
int min_factor);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
|
||||
|
@ -218,8 +217,6 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root);
|
||||
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *dirty_pages, int mark);
|
||||
int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
|
||||
struct extent_io_tree *dirty_pages);
|
||||
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
|
||||
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
|
||||
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "disk-io.h"
|
||||
#include "compression.h"
|
||||
#include "volumes.h"
|
||||
#include "misc.h"
|
||||
|
||||
/*
|
||||
* Error message should follow the following format:
|
||||
|
@ -124,6 +125,74 @@ static u64 file_extent_end(struct extent_buffer *leaf,
|
|||
return end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Customized report for dir_item, the only new important information is
|
||||
* key->objectid, which represents inode number
|
||||
*/
|
||||
__printf(3, 4)
|
||||
__cold
|
||||
static void dir_item_err(const struct extent_buffer *eb, int slot,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
const struct btrfs_fs_info *fs_info = eb->fs_info;
|
||||
struct btrfs_key key;
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
btrfs_crit(fs_info,
|
||||
"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
|
||||
btrfs_header_level(eb) == 0 ? "leaf" : "node",
|
||||
btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
|
||||
key.objectid, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/*
|
||||
* This functions checks prev_key->objectid, to ensure current key and prev_key
|
||||
* share the same objectid as inode number.
|
||||
*
|
||||
* This is to detect missing INODE_ITEM in subvolume trees.
|
||||
*
|
||||
* Return true if everything is OK or we don't need to check.
|
||||
* Return false if anything is wrong.
|
||||
*/
|
||||
static bool check_prev_ino(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot,
|
||||
struct btrfs_key *prev_key)
|
||||
{
|
||||
/* No prev key, skip check */
|
||||
if (slot == 0)
|
||||
return true;
|
||||
|
||||
/* Only these key->types needs to be checked */
|
||||
ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
|
||||
key->type == BTRFS_INODE_REF_KEY ||
|
||||
key->type == BTRFS_DIR_INDEX_KEY ||
|
||||
key->type == BTRFS_DIR_ITEM_KEY ||
|
||||
key->type == BTRFS_EXTENT_DATA_KEY);
|
||||
|
||||
/*
|
||||
* Only subvolume trees along with their reloc trees need this check.
|
||||
* Things like log tree doesn't follow this ino requirement.
|
||||
*/
|
||||
if (!is_fstree(btrfs_header_owner(leaf)))
|
||||
return true;
|
||||
|
||||
if (key->objectid == prev_key->objectid)
|
||||
return true;
|
||||
|
||||
/* Error found */
|
||||
dir_item_err(leaf, slot,
|
||||
"invalid previous key objectid, have %llu expect %llu",
|
||||
prev_key->objectid, key->objectid);
|
||||
return false;
|
||||
}
|
||||
static int check_extent_data_item(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot,
|
||||
struct btrfs_key *prev_key)
|
||||
|
@ -141,13 +210,33 @@ static int check_extent_data_item(struct extent_buffer *leaf,
|
|||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Previous key must have the same key->objectid (ino).
|
||||
* It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
|
||||
* But if objectids mismatch, it means we have a missing
|
||||
* INODE_ITEM.
|
||||
*/
|
||||
if (!check_prev_ino(leaf, key, slot, prev_key))
|
||||
return -EUCLEAN;
|
||||
|
||||
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
||||
|
||||
if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
|
||||
/*
|
||||
* Make sure the item contains at least inline header, so the file
|
||||
* extent type is not some garbage.
|
||||
*/
|
||||
if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
|
||||
file_extent_err(leaf, slot,
|
||||
"invalid item size, have %u expect [%lu, %u)",
|
||||
item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
|
||||
SZ_4K);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
|
||||
file_extent_err(leaf, slot,
|
||||
"invalid type for file extent, have %u expect range [0, %u]",
|
||||
btrfs_file_extent_type(leaf, fi),
|
||||
BTRFS_FILE_EXTENT_TYPES);
|
||||
BTRFS_NR_FILE_EXTENT_TYPES - 1);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
|
@ -155,11 +244,11 @@ static int check_extent_data_item(struct extent_buffer *leaf,
|
|||
* Support for new compression/encryption must introduce incompat flag,
|
||||
* and must be caught in open_ctree().
|
||||
*/
|
||||
if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
|
||||
if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
|
||||
file_extent_err(leaf, slot,
|
||||
"invalid compression for file extent, have %u expect range [0, %u]",
|
||||
btrfs_file_extent_compression(leaf, fi),
|
||||
BTRFS_COMPRESS_TYPES);
|
||||
BTRFS_NR_COMPRESS_TYPES - 1);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (btrfs_file_extent_encryption(leaf, fi)) {
|
||||
|
@ -270,42 +359,17 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Customized reported for dir_item, only important new info is key->objectid,
|
||||
* which represents inode number
|
||||
*/
|
||||
__printf(3, 4)
|
||||
__cold
|
||||
static void dir_item_err(const struct extent_buffer *eb, int slot,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
const struct btrfs_fs_info *fs_info = eb->fs_info;
|
||||
struct btrfs_key key;
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
btrfs_crit(fs_info,
|
||||
"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
|
||||
btrfs_header_level(eb) == 0 ? "leaf" : "node",
|
||||
btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
|
||||
key.objectid, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static int check_dir_item(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot)
|
||||
struct btrfs_key *key, struct btrfs_key *prev_key,
|
||||
int slot)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = leaf->fs_info;
|
||||
struct btrfs_dir_item *di;
|
||||
u32 item_size = btrfs_item_size_nr(leaf, slot);
|
||||
u32 cur = 0;
|
||||
|
||||
if (!check_prev_ino(leaf, key, slot, prev_key))
|
||||
return -EUCLEAN;
|
||||
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
|
||||
while (cur < item_size) {
|
||||
u32 name_len;
|
||||
|
@ -459,23 +523,23 @@ static int check_block_group_item(struct extent_buffer *leaf,
|
|||
|
||||
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
|
||||
sizeof(bgi));
|
||||
if (btrfs_block_group_chunk_objectid(&bgi) !=
|
||||
if (btrfs_stack_block_group_chunk_objectid(&bgi) !=
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
|
||||
block_group_err(leaf, slot,
|
||||
"invalid block group chunk objectid, have %llu expect %llu",
|
||||
btrfs_block_group_chunk_objectid(&bgi),
|
||||
btrfs_stack_block_group_chunk_objectid(&bgi),
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
if (btrfs_block_group_used(&bgi) > key->offset) {
|
||||
if (btrfs_stack_block_group_used(&bgi) > key->offset) {
|
||||
block_group_err(leaf, slot,
|
||||
"invalid block group used, have %llu expect [0, %llu)",
|
||||
btrfs_block_group_used(&bgi), key->offset);
|
||||
btrfs_stack_block_group_used(&bgi), key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
flags = btrfs_block_group_flags(&bgi);
|
||||
flags = btrfs_stack_block_group_flags(&bgi);
|
||||
if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
|
||||
block_group_err(leaf, slot,
|
||||
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
|
||||
|
@ -609,7 +673,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
|
|||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
|
||||
if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
|
||||
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
|
||||
chunk_err(leaf, chunk, logical,
|
||||
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
|
||||
|
@ -785,11 +849,11 @@ static int check_inode_item(struct extent_buffer *leaf,
|
|||
}
|
||||
|
||||
/*
|
||||
* S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
|
||||
* but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
|
||||
* Only needs to check BLK, LNK and SOCKS
|
||||
* S_IFMT is not bit mapped so we can't completely rely on
|
||||
* is_power_of_2/has_single_bit_set, but it can save us from checking
|
||||
* FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS
|
||||
*/
|
||||
if (!is_power_of_2(mode & S_IFMT)) {
|
||||
if (!has_single_bit_set(mode & S_IFMT)) {
|
||||
if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
|
||||
inode_item_err(fs_info, leaf, slot,
|
||||
"invalid mode: has 0%o expect valid S_IF* bit(s)",
|
||||
|
@ -1010,8 +1074,8 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
btrfs_super_generation(fs_info->super_copy) + 1);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA |
|
||||
BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
|
||||
if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
|
||||
BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
|
||||
extent_err(leaf, slot,
|
||||
"invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
|
||||
flags, BTRFS_EXTENT_FLAG_DATA |
|
||||
|
@ -1224,6 +1288,58 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define inode_ref_err(fs_info, eb, slot, fmt, args...) \
|
||||
inode_item_err(fs_info, eb, slot, fmt, ##args)
|
||||
static int check_inode_ref(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, struct btrfs_key *prev_key,
|
||||
int slot)
|
||||
{
|
||||
struct btrfs_inode_ref *iref;
|
||||
unsigned long ptr;
|
||||
unsigned long end;
|
||||
|
||||
if (!check_prev_ino(leaf, key, slot, prev_key))
|
||||
return -EUCLEAN;
|
||||
/* namelen can't be 0, so item_size == sizeof() is also invalid */
|
||||
if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
|
||||
inode_ref_err(fs_info, leaf, slot,
|
||||
"invalid item size, have %u expect (%zu, %u)",
|
||||
btrfs_item_size_nr(leaf, slot),
|
||||
sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
ptr = btrfs_item_ptr_offset(leaf, slot);
|
||||
end = ptr + btrfs_item_size_nr(leaf, slot);
|
||||
while (ptr < end) {
|
||||
u16 namelen;
|
||||
|
||||
if (ptr + sizeof(iref) > end) {
|
||||
inode_ref_err(fs_info, leaf, slot,
|
||||
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
|
||||
ptr, end, sizeof(iref));
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
iref = (struct btrfs_inode_ref *)ptr;
|
||||
namelen = btrfs_inode_ref_name_len(leaf, iref);
|
||||
if (ptr + sizeof(*iref) + namelen > end) {
|
||||
inode_ref_err(fs_info, leaf, slot,
|
||||
"inode ref overflow, ptr %lu end %lu namelen %u",
|
||||
ptr, end, namelen);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: In theory we should record all found index numbers
|
||||
* to find any duplicated indexes, but that will be too time
|
||||
* consuming for inodes with too many hard links.
|
||||
*/
|
||||
ptr += sizeof(*iref) + namelen;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common point to switch the item-specific validation.
|
||||
*/
|
||||
|
@ -1244,7 +1360,10 @@ static int check_leaf_item(struct extent_buffer *leaf,
|
|||
case BTRFS_DIR_ITEM_KEY:
|
||||
case BTRFS_DIR_INDEX_KEY:
|
||||
case BTRFS_XATTR_ITEM_KEY:
|
||||
ret = check_dir_item(leaf, key, slot);
|
||||
ret = check_dir_item(leaf, key, prev_key, slot);
|
||||
break;
|
||||
case BTRFS_INODE_REF_KEY:
|
||||
ret = check_inode_ref(leaf, key, prev_key, slot);
|
||||
break;
|
||||
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
||||
ret = check_block_group_item(leaf, key, slot);
|
||||
|
|
|
@ -559,7 +559,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
|
|||
key.objectid = objectid;
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(root->fs_info->sb, &key, root);
|
||||
if (IS_ERR(inode))
|
||||
inode = NULL;
|
||||
return inode;
|
||||
|
@ -945,54 +945,32 @@ static noinline int backref_in_log(struct btrfs_root *log,
|
|||
const char *name, int namelen)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_inode_ref *ref;
|
||||
unsigned long ptr;
|
||||
unsigned long ptr_end;
|
||||
unsigned long name_ptr;
|
||||
int found_name_len;
|
||||
int item_size;
|
||||
int ret;
|
||||
int match = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
|
||||
if (ret != 0)
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
} else if (ret == 1) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
|
||||
|
||||
if (key->type == BTRFS_INODE_EXTREF_KEY) {
|
||||
if (btrfs_find_name_in_ext_backref(path->nodes[0],
|
||||
if (key->type == BTRFS_INODE_EXTREF_KEY)
|
||||
ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
|
||||
path->slots[0],
|
||||
ref_objectid,
|
||||
name, namelen);
|
||||
else
|
||||
ret = !!btrfs_find_name_in_backref(path->nodes[0],
|
||||
path->slots[0],
|
||||
ref_objectid,
|
||||
name, namelen))
|
||||
match = 1;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
|
||||
ptr_end = ptr + item_size;
|
||||
while (ptr < ptr_end) {
|
||||
ref = (struct btrfs_inode_ref *)ptr;
|
||||
found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
|
||||
if (found_name_len == namelen) {
|
||||
name_ptr = (unsigned long)(ref + 1);
|
||||
ret = memcmp_extent_buffer(path->nodes[0], name,
|
||||
name_ptr, namelen);
|
||||
if (ret == 0) {
|
||||
match = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ptr = (unsigned long)(ref + 1) + found_name_len;
|
||||
}
|
||||
name, namelen);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return match;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
|
||||
|
@ -1050,10 +1028,13 @@ again:
|
|||
(unsigned long)(victim_ref + 1),
|
||||
victim_name_len);
|
||||
|
||||
if (!backref_in_log(log_root, &search_key,
|
||||
parent_objectid,
|
||||
victim_name,
|
||||
victim_name_len)) {
|
||||
ret = backref_in_log(log_root, &search_key,
|
||||
parent_objectid, victim_name,
|
||||
victim_name_len);
|
||||
if (ret < 0) {
|
||||
kfree(victim_name);
|
||||
return ret;
|
||||
} else if (!ret) {
|
||||
inc_nlink(&inode->vfs_inode);
|
||||
btrfs_release_path(path);
|
||||
|
||||
|
@ -1115,10 +1096,12 @@ again:
|
|||
search_key.offset = btrfs_extref_hash(parent_objectid,
|
||||
victim_name,
|
||||
victim_name_len);
|
||||
ret = 0;
|
||||
if (!backref_in_log(log_root, &search_key,
|
||||
parent_objectid, victim_name,
|
||||
victim_name_len)) {
|
||||
ret = backref_in_log(log_root, &search_key,
|
||||
parent_objectid, victim_name,
|
||||
victim_name_len);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
} else if (!ret) {
|
||||
ret = -ENOENT;
|
||||
victim_parent = read_one_inode(root,
|
||||
parent_objectid);
|
||||
|
@ -1884,30 +1867,6 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if an inode reference exists in the log for the given name,
|
||||
* inode and parent inode.
|
||||
*/
|
||||
static bool name_in_log_ref(struct btrfs_root *log_root,
|
||||
const char *name, const int name_len,
|
||||
const u64 dirid, const u64 ino)
|
||||
{
|
||||
struct btrfs_key search_key;
|
||||
|
||||
search_key.objectid = ino;
|
||||
search_key.type = BTRFS_INODE_REF_KEY;
|
||||
search_key.offset = dirid;
|
||||
if (backref_in_log(log_root, &search_key, dirid, name, name_len))
|
||||
return true;
|
||||
|
||||
search_key.type = BTRFS_INODE_EXTREF_KEY;
|
||||
search_key.offset = btrfs_extref_hash(dirid, name, name_len);
|
||||
if (backref_in_log(log_root, &search_key, dirid, name, name_len))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* take a single entry in a log directory item and replay it into
|
||||
* the subvolume.
|
||||
|
@ -2024,8 +1983,31 @@ out:
|
|||
return ret;
|
||||
|
||||
insert:
|
||||
if (name_in_log_ref(root->log_root, name, name_len,
|
||||
key->objectid, log_key.objectid)) {
|
||||
/*
|
||||
* Check if the inode reference exists in the log for the given name,
|
||||
* inode and parent inode
|
||||
*/
|
||||
found_key.objectid = log_key.objectid;
|
||||
found_key.type = BTRFS_INODE_REF_KEY;
|
||||
found_key.offset = key->objectid;
|
||||
ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
} else if (ret) {
|
||||
/* The dentry will be added later. */
|
||||
ret = 0;
|
||||
update_size = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
found_key.objectid = log_key.objectid;
|
||||
found_key.type = BTRFS_INODE_EXTREF_KEY;
|
||||
found_key.offset = key->objectid;
|
||||
ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
|
||||
name_len);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
} else if (ret) {
|
||||
/* The dentry will be added later. */
|
||||
ret = 0;
|
||||
update_size = false;
|
||||
|
@ -2869,7 +2851,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
|
|||
level = btrfs_header_level(log->node);
|
||||
orig_level = level;
|
||||
path->nodes[level] = log->node;
|
||||
extent_buffer_get(log->node);
|
||||
atomic_inc(&log->node->refs);
|
||||
path->slots[level] = 0;
|
||||
|
||||
while (1) {
|
||||
|
@ -4983,7 +4965,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
key.objectid = ino;
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
/*
|
||||
* If the other inode that had a conflicting dir entry was
|
||||
* deleted in the current transaction, we need to log its parent
|
||||
|
@ -4993,8 +4975,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
ret = PTR_ERR(inode);
|
||||
if (ret == -ENOENT) {
|
||||
key.objectid = parent;
|
||||
inode = btrfs_iget(fs_info->sb, &key, root,
|
||||
NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &key, root);
|
||||
if (IS_ERR(inode)) {
|
||||
ret = PTR_ERR(inode);
|
||||
} else {
|
||||
|
@ -5699,7 +5680,7 @@ process_leaf:
|
|||
continue;
|
||||
|
||||
btrfs_release_path(path);
|
||||
di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
|
||||
di_inode = btrfs_iget(fs_info->sb, &di_key, root);
|
||||
if (IS_ERR(di_inode)) {
|
||||
ret = PTR_ERR(di_inode);
|
||||
goto next_dir_inode;
|
||||
|
@ -5825,8 +5806,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
|
|||
cur_offset = item_size;
|
||||
}
|
||||
|
||||
dir_inode = btrfs_iget(fs_info->sb, &inode_key,
|
||||
root, NULL);
|
||||
dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
|
||||
/*
|
||||
* If the parent inode was deleted, return an error to
|
||||
* fallback to a transaction commit. This is to prevent
|
||||
|
@ -5900,7 +5880,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
|
|||
search_key.objectid = found_key.offset;
|
||||
search_key.type = BTRFS_INODE_ITEM_KEY;
|
||||
search_key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
|
||||
inode = btrfs_iget(fs_info->sb, &search_key, root);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
|
|
|
@ -58,6 +58,30 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
|
|||
.bg_flag = BTRFS_BLOCK_GROUP_RAID1,
|
||||
.mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
|
||||
},
|
||||
[BTRFS_RAID_RAID1C3] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 3,
|
||||
.tolerated_failures = 2,
|
||||
.devs_increment = 3,
|
||||
.ncopies = 3,
|
||||
.raid_name = "raid1c3",
|
||||
.bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
|
||||
.mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
|
||||
},
|
||||
[BTRFS_RAID_RAID1C4] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 4,
|
||||
.tolerated_failures = 3,
|
||||
.devs_increment = 4,
|
||||
.ncopies = 4,
|
||||
.raid_name = "raid1c4",
|
||||
.bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
|
||||
.mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
|
||||
},
|
||||
[BTRFS_RAID_DUP] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 2,
|
||||
|
@ -297,7 +321,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
|||
|
||||
DEFINE_MUTEX(uuid_mutex);
|
||||
static LIST_HEAD(fs_uuids);
|
||||
struct list_head *btrfs_get_fs_uuids(void)
|
||||
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
|
||||
{
|
||||
return &fs_uuids;
|
||||
}
|
||||
|
@ -397,8 +421,6 @@ static struct btrfs_device *__alloc_device(void)
|
|||
INIT_LIST_HEAD(&dev->dev_alloc_list);
|
||||
INIT_LIST_HEAD(&dev->post_commit_list);
|
||||
|
||||
spin_lock_init(&dev->io_lock);
|
||||
|
||||
atomic_set(&dev->reada_in_flight, 0);
|
||||
atomic_set(&dev->dev_stats_ccnt, 0);
|
||||
btrfs_device_data_ordered_init(dev);
|
||||
|
@ -501,212 +523,6 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void requeue_list(struct btrfs_pending_bios *pending_bios,
|
||||
struct bio *head, struct bio *tail)
|
||||
{
|
||||
|
||||
struct bio *old_head;
|
||||
|
||||
old_head = pending_bios->head;
|
||||
pending_bios->head = head;
|
||||
if (pending_bios->tail)
|
||||
tail->bi_next = old_head;
|
||||
else
|
||||
pending_bios->tail = tail;
|
||||
}
|
||||
|
||||
/*
|
||||
* we try to collect pending bios for a device so we don't get a large
|
||||
* number of procs sending bios down to the same device. This greatly
|
||||
* improves the schedulers ability to collect and merge the bios.
|
||||
*
|
||||
* But, it also turns into a long list of bios to process and that is sure
|
||||
* to eventually make the worker thread block. The solution here is to
|
||||
* make some progress and then put this work struct back at the end of
|
||||
* the list if the block device is congested. This way, multiple devices
|
||||
* can make progress from a single worker thread.
|
||||
*/
|
||||
static noinline void run_scheduled_bios(struct btrfs_device *device)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = device->fs_info;
|
||||
struct bio *pending;
|
||||
struct backing_dev_info *bdi;
|
||||
struct btrfs_pending_bios *pending_bios;
|
||||
struct bio *tail;
|
||||
struct bio *cur;
|
||||
int again = 0;
|
||||
unsigned long num_run;
|
||||
unsigned long batch_run = 0;
|
||||
unsigned long last_waited = 0;
|
||||
int force_reg = 0;
|
||||
int sync_pending = 0;
|
||||
struct blk_plug plug;
|
||||
|
||||
/*
|
||||
* this function runs all the bios we've collected for
|
||||
* a particular device. We don't want to wander off to
|
||||
* another device without first sending all of these down.
|
||||
* So, setup a plug here and finish it off before we return
|
||||
*/
|
||||
blk_start_plug(&plug);
|
||||
|
||||
bdi = device->bdev->bd_bdi;
|
||||
|
||||
loop:
|
||||
spin_lock(&device->io_lock);
|
||||
|
||||
loop_lock:
|
||||
num_run = 0;
|
||||
|
||||
/* take all the bios off the list at once and process them
|
||||
* later on (without the lock held). But, remember the
|
||||
* tail and other pointers so the bios can be properly reinserted
|
||||
* into the list if we hit congestion
|
||||
*/
|
||||
if (!force_reg && device->pending_sync_bios.head) {
|
||||
pending_bios = &device->pending_sync_bios;
|
||||
force_reg = 1;
|
||||
} else {
|
||||
pending_bios = &device->pending_bios;
|
||||
force_reg = 0;
|
||||
}
|
||||
|
||||
pending = pending_bios->head;
|
||||
tail = pending_bios->tail;
|
||||
WARN_ON(pending && !tail);
|
||||
|
||||
/*
|
||||
* if pending was null this time around, no bios need processing
|
||||
* at all and we can stop. Otherwise it'll loop back up again
|
||||
* and do an additional check so no bios are missed.
|
||||
*
|
||||
* device->running_pending is used to synchronize with the
|
||||
* schedule_bio code.
|
||||
*/
|
||||
if (device->pending_sync_bios.head == NULL &&
|
||||
device->pending_bios.head == NULL) {
|
||||
again = 0;
|
||||
device->running_pending = 0;
|
||||
} else {
|
||||
again = 1;
|
||||
device->running_pending = 1;
|
||||
}
|
||||
|
||||
pending_bios->head = NULL;
|
||||
pending_bios->tail = NULL;
|
||||
|
||||
spin_unlock(&device->io_lock);
|
||||
|
||||
while (pending) {
|
||||
|
||||
rmb();
|
||||
/* we want to work on both lists, but do more bios on the
|
||||
* sync list than the regular list
|
||||
*/
|
||||
if ((num_run > 32 &&
|
||||
pending_bios != &device->pending_sync_bios &&
|
||||
device->pending_sync_bios.head) ||
|
||||
(num_run > 64 && pending_bios == &device->pending_sync_bios &&
|
||||
device->pending_bios.head)) {
|
||||
spin_lock(&device->io_lock);
|
||||
requeue_list(pending_bios, pending, tail);
|
||||
goto loop_lock;
|
||||
}
|
||||
|
||||
cur = pending;
|
||||
pending = pending->bi_next;
|
||||
cur->bi_next = NULL;
|
||||
|
||||
BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
|
||||
|
||||
/*
|
||||
* if we're doing the sync list, record that our
|
||||
* plug has some sync requests on it
|
||||
*
|
||||
* If we're doing the regular list and there are
|
||||
* sync requests sitting around, unplug before
|
||||
* we add more
|
||||
*/
|
||||
if (pending_bios == &device->pending_sync_bios) {
|
||||
sync_pending = 1;
|
||||
} else if (sync_pending) {
|
||||
blk_finish_plug(&plug);
|
||||
blk_start_plug(&plug);
|
||||
sync_pending = 0;
|
||||
}
|
||||
|
||||
btrfsic_submit_bio(cur);
|
||||
num_run++;
|
||||
batch_run++;
|
||||
|
||||
cond_resched();
|
||||
|
||||
/*
|
||||
* we made progress, there is more work to do and the bdi
|
||||
* is now congested. Back off and let other work structs
|
||||
* run instead
|
||||
*/
|
||||
if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
|
||||
fs_info->fs_devices->open_devices > 1) {
|
||||
struct io_context *ioc;
|
||||
|
||||
ioc = current->io_context;
|
||||
|
||||
/*
|
||||
* the main goal here is that we don't want to
|
||||
* block if we're going to be able to submit
|
||||
* more requests without blocking.
|
||||
*
|
||||
* This code does two great things, it pokes into
|
||||
* the elevator code from a filesystem _and_
|
||||
* it makes assumptions about how batching works.
|
||||
*/
|
||||
if (ioc && ioc->nr_batch_requests > 0 &&
|
||||
time_before(jiffies, ioc->last_waited + HZ/50UL) &&
|
||||
(last_waited == 0 ||
|
||||
ioc->last_waited == last_waited)) {
|
||||
/*
|
||||
* we want to go through our batch of
|
||||
* requests and stop. So, we copy out
|
||||
* the ioc->last_waited time and test
|
||||
* against it before looping
|
||||
*/
|
||||
last_waited = ioc->last_waited;
|
||||
cond_resched();
|
||||
continue;
|
||||
}
|
||||
spin_lock(&device->io_lock);
|
||||
requeue_list(pending_bios, pending, tail);
|
||||
device->running_pending = 1;
|
||||
|
||||
spin_unlock(&device->io_lock);
|
||||
btrfs_queue_work(fs_info->submit_workers,
|
||||
&device->work);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
if (again)
|
||||
goto loop;
|
||||
|
||||
spin_lock(&device->io_lock);
|
||||
if (device->pending_bios.head || device->pending_sync_bios.head)
|
||||
goto loop_lock;
|
||||
spin_unlock(&device->io_lock);
|
||||
|
||||
done:
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static void pending_bios_fn(struct btrfs_work *work)
|
||||
{
|
||||
struct btrfs_device *device;
|
||||
|
||||
device = container_of(work, struct btrfs_device, work);
|
||||
run_scheduled_bios(device);
|
||||
}
|
||||
|
||||
static bool device_path_matched(const char *path, struct btrfs_device *device)
|
||||
{
|
||||
int found;
|
||||
|
@ -818,7 +634,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
|
|||
}
|
||||
|
||||
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
|
||||
fs_devices->seeding = 1;
|
||||
fs_devices->seeding = true;
|
||||
} else {
|
||||
if (bdev_read_only(bdev))
|
||||
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
|
||||
|
@ -828,7 +644,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
|
|||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!blk_queue_nonrot(q))
|
||||
fs_devices->rotating = 1;
|
||||
fs_devices->rotating = true;
|
||||
|
||||
device->bdev = bdev;
|
||||
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
|
||||
|
@ -1005,11 +821,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
|
|||
*new_device_added = true;
|
||||
|
||||
if (disk_super->label[0])
|
||||
pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
|
||||
disk_super->label, devid, found_transid, path);
|
||||
pr_info(
|
||||
"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
|
||||
disk_super->label, devid, found_transid, path,
|
||||
current->comm, task_pid_nr(current));
|
||||
else
|
||||
pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
|
||||
disk_super->fsid, devid, found_transid, path);
|
||||
pr_info(
|
||||
"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
|
||||
disk_super->fsid, devid, found_transid, path,
|
||||
current->comm, task_pid_nr(current));
|
||||
|
||||
} else if (!device->name || strcmp(device->name->str, path)) {
|
||||
/*
|
||||
|
@ -1295,7 +1115,7 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
|
|||
WARN_ON(fs_devices->open_devices);
|
||||
WARN_ON(fs_devices->rw_devices);
|
||||
fs_devices->opened = 0;
|
||||
fs_devices->seeding = 0;
|
||||
fs_devices->seeding = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2048,7 +1868,7 @@ static struct btrfs_device * btrfs_find_next_active_device(
|
|||
* where this function called, there should be always be another device (or
|
||||
* this_dev) which is active.
|
||||
*/
|
||||
void btrfs_assign_next_active_device(struct btrfs_device *device,
|
||||
void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
|
||||
struct btrfs_device *this_dev)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = device->fs_info;
|
||||
|
@ -2450,11 +2270,11 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
|
|||
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
|
||||
mutex_unlock(&fs_info->chunk_mutex);
|
||||
|
||||
fs_devices->seeding = 0;
|
||||
fs_devices->seeding = false;
|
||||
fs_devices->num_devices = 0;
|
||||
fs_devices->open_devices = 0;
|
||||
fs_devices->missing_devices = 0;
|
||||
fs_devices->rotating = 0;
|
||||
fs_devices->rotating = false;
|
||||
fs_devices->seed = seed_devices;
|
||||
|
||||
generate_random_uuid(fs_devices->fsid);
|
||||
|
@ -2649,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
|
|||
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
|
||||
|
||||
if (!blk_queue_nonrot(q))
|
||||
fs_devices->rotating = 1;
|
||||
fs_devices->rotating = true;
|
||||
|
||||
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
|
||||
btrfs_set_super_total_bytes(fs_info->super_copy,
|
||||
|
@ -3177,7 +2997,7 @@ error:
|
|||
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
|
||||
u64 chunk_offset)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
u64 bytes_used;
|
||||
u64 chunk_type;
|
||||
|
||||
|
@ -3186,27 +3006,28 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
|
|||
chunk_type = cache->flags;
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
|
||||
spin_lock(&fs_info->data_sinfo->lock);
|
||||
bytes_used = fs_info->data_sinfo->bytes_used;
|
||||
spin_unlock(&fs_info->data_sinfo->lock);
|
||||
if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
|
||||
return 0;
|
||||
|
||||
if (!bytes_used) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
int ret;
|
||||
spin_lock(&fs_info->data_sinfo->lock);
|
||||
bytes_used = fs_info->data_sinfo->bytes_used;
|
||||
spin_unlock(&fs_info->data_sinfo->lock);
|
||||
|
||||
trans = btrfs_join_transaction(fs_info->tree_root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
if (!bytes_used) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
int ret;
|
||||
|
||||
ret = btrfs_force_chunk_alloc(trans,
|
||||
BTRFS_BLOCK_GROUP_DATA);
|
||||
btrfs_end_transaction(trans);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
trans = btrfs_join_transaction(fs_info->tree_root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
|
||||
btrfs_end_transaction(trans);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3385,28 +3206,28 @@ static int chunk_profiles_filter(u64 chunk_type,
|
|||
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
|
||||
struct btrfs_balance_args *bargs)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
u64 chunk_used;
|
||||
u64 user_thresh_min;
|
||||
u64 user_thresh_max;
|
||||
int ret = 1;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
||||
chunk_used = btrfs_block_group_used(&cache->item);
|
||||
chunk_used = cache->used;
|
||||
|
||||
if (bargs->usage_min == 0)
|
||||
user_thresh_min = 0;
|
||||
else
|
||||
user_thresh_min = div_factor_fine(cache->key.offset,
|
||||
bargs->usage_min);
|
||||
user_thresh_min = div_factor_fine(cache->length,
|
||||
bargs->usage_min);
|
||||
|
||||
if (bargs->usage_max == 0)
|
||||
user_thresh_max = 1;
|
||||
else if (bargs->usage_max > 100)
|
||||
user_thresh_max = cache->key.offset;
|
||||
user_thresh_max = cache->length;
|
||||
else
|
||||
user_thresh_max = div_factor_fine(cache->key.offset,
|
||||
bargs->usage_max);
|
||||
user_thresh_max = div_factor_fine(cache->length,
|
||||
bargs->usage_max);
|
||||
|
||||
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
|
||||
ret = 0;
|
||||
|
@ -3418,20 +3239,19 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
|
|||
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
|
||||
u64 chunk_offset, struct btrfs_balance_args *bargs)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_block_group *cache;
|
||||
u64 chunk_used, user_thresh;
|
||||
int ret = 1;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
||||
chunk_used = btrfs_block_group_used(&cache->item);
|
||||
chunk_used = cache->used;
|
||||
|
||||
if (bargs->usage_min == 0)
|
||||
user_thresh = 1;
|
||||
else if (bargs->usage > 100)
|
||||
user_thresh = cache->key.offset;
|
||||
user_thresh = cache->length;
|
||||
else
|
||||
user_thresh = div_factor_fine(cache->key.offset,
|
||||
bargs->usage);
|
||||
user_thresh = div_factor_fine(cache->length, bargs->usage);
|
||||
|
||||
if (chunk_used < user_thresh)
|
||||
ret = 0;
|
||||
|
@ -3844,12 +3664,7 @@ static int alloc_profile_is_valid(u64 flags, int extended)
|
|||
if (flags == 0)
|
||||
return !extended; /* "0" is valid for usual profiles */
|
||||
|
||||
/* true if exactly one bit set */
|
||||
/*
|
||||
* Don't use is_power_of_2(unsigned long) because it won't work
|
||||
* for the single profile (1ULL << 48) on 32-bit CPUs.
|
||||
*/
|
||||
return flags != 0 && (flags & (flags - 1)) == 0;
|
||||
return has_single_bit_set(flags);
|
||||
}
|
||||
|
||||
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
|
||||
|
@ -4036,7 +3851,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
|||
int ret;
|
||||
u64 num_devices;
|
||||
unsigned seq;
|
||||
bool reducing_integrity;
|
||||
bool reducing_redundancy;
|
||||
int i;
|
||||
|
||||
if (btrfs_fs_closing(fs_info) ||
|
||||
|
@ -4119,9 +3934,9 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
|||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(fs_info->avail_metadata_alloc_bits & allowed) &&
|
||||
!(bctl->meta.target & allowed)))
|
||||
reducing_integrity = true;
|
||||
reducing_redundancy = true;
|
||||
else
|
||||
reducing_integrity = false;
|
||||
reducing_redundancy = false;
|
||||
|
||||
/* if we're not converting, the target field is uninitialized */
|
||||
meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
|
||||
|
@ -4130,13 +3945,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
|||
bctl->data.target : fs_info->avail_data_alloc_bits;
|
||||
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
||||
|
||||
if (reducing_integrity) {
|
||||
if (reducing_redundancy) {
|
||||
if (bctl->flags & BTRFS_BALANCE_FORCE) {
|
||||
btrfs_info(fs_info,
|
||||
"balance: force reducing metadata integrity");
|
||||
"balance: force reducing metadata redundancy");
|
||||
} else {
|
||||
btrfs_err(fs_info,
|
||||
"balance: reduces metadata integrity, use --force if you want this");
|
||||
"balance: reduces metadata redundancy, use --force if you want this");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -4902,6 +4717,14 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
|||
btrfs_set_fs_incompat(info, RAID56);
|
||||
}
|
||||
|
||||
static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
||||
{
|
||||
if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
|
||||
return;
|
||||
|
||||
btrfs_set_fs_incompat(info, RAID1C34);
|
||||
}
|
||||
|
||||
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
u64 start, u64 type)
|
||||
{
|
||||
|
@ -5048,8 +4871,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
|
||||
btrfs_cmp_device_info, NULL);
|
||||
|
||||
/* round down to number of usable stripes */
|
||||
ndevs = round_down(ndevs, devs_increment);
|
||||
/*
|
||||
* Round down to number of usable stripes, devs_increment can be any
|
||||
* number so we can't use round_down()
|
||||
*/
|
||||
ndevs -= ndevs % devs_increment;
|
||||
|
||||
if (ndevs < devs_min) {
|
||||
ret = -ENOSPC;
|
||||
|
@ -5165,6 +4991,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
|
||||
free_extent_map(em);
|
||||
check_raid56_incompat_flag(info, type);
|
||||
check_raid1c34_incompat_flag(info, type);
|
||||
|
||||
kfree(devices_info);
|
||||
return 0;
|
||||
|
@ -5583,12 +5410,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
|
|||
* replace.
|
||||
*/
|
||||
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
|
||||
u64 logical, u64 length,
|
||||
u64 logical, u64 *length_ret,
|
||||
struct btrfs_bio **bbio_ret)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
struct btrfs_bio *bbio;
|
||||
u64 length = *length_ret;
|
||||
u64 offset;
|
||||
u64 stripe_nr;
|
||||
u64 stripe_nr_end;
|
||||
|
@ -5621,7 +5449,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
offset = logical - em->start;
|
||||
length = min_t(u64, em->len - offset, length);
|
||||
length = min_t(u64, em->start + em->len - logical, length);
|
||||
*length_ret = length;
|
||||
|
||||
stripe_len = map->stripe_len;
|
||||
/*
|
||||
|
@ -6036,7 +5865,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
|||
|
||||
if (op == BTRFS_MAP_DISCARD)
|
||||
return __btrfs_map_block_for_discard(fs_info, logical,
|
||||
*length, bbio_ret);
|
||||
length, bbio_ret);
|
||||
|
||||
ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
|
||||
if (ret < 0)
|
||||
|
@ -6416,52 +6245,8 @@ static void btrfs_end_bio(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* see run_scheduled_bios for a description of why bios are collected for
|
||||
* async submit.
|
||||
*
|
||||
* This will add one bio to the pending list for a device and make sure
|
||||
* the work struct is scheduled.
|
||||
*/
|
||||
static noinline void btrfs_schedule_bio(struct btrfs_device *device,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = device->fs_info;
|
||||
int should_queue = 1;
|
||||
struct btrfs_pending_bios *pending_bios;
|
||||
|
||||
/* don't bother with additional async steps for reads, right now */
|
||||
if (bio_op(bio) == REQ_OP_READ) {
|
||||
btrfsic_submit_bio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(bio->bi_next);
|
||||
bio->bi_next = NULL;
|
||||
|
||||
spin_lock(&device->io_lock);
|
||||
if (op_is_sync(bio->bi_opf))
|
||||
pending_bios = &device->pending_sync_bios;
|
||||
else
|
||||
pending_bios = &device->pending_bios;
|
||||
|
||||
if (pending_bios->tail)
|
||||
pending_bios->tail->bi_next = bio;
|
||||
|
||||
pending_bios->tail = bio;
|
||||
if (!pending_bios->head)
|
||||
pending_bios->head = bio;
|
||||
if (device->running_pending)
|
||||
should_queue = 0;
|
||||
|
||||
spin_unlock(&device->io_lock);
|
||||
|
||||
if (should_queue)
|
||||
btrfs_queue_work(fs_info->submit_workers, &device->work);
|
||||
}
|
||||
|
||||
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
|
||||
u64 physical, int dev_nr, int async)
|
||||
u64 physical, int dev_nr)
|
||||
{
|
||||
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
|
||||
struct btrfs_fs_info *fs_info = bbio->fs_info;
|
||||
|
@ -6479,10 +6264,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
|
|||
|
||||
btrfs_bio_counter_inc_noblocked(fs_info);
|
||||
|
||||
if (async)
|
||||
btrfs_schedule_bio(dev, bio);
|
||||
else
|
||||
btrfsic_submit_bio(bio);
|
||||
btrfsic_submit_bio(bio);
|
||||
}
|
||||
|
||||
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
||||
|
@ -6503,7 +6285,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
|||
}
|
||||
|
||||
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit)
|
||||
int mirror_num)
|
||||
{
|
||||
struct btrfs_device *dev;
|
||||
struct bio *first_bio = bio;
|
||||
|
@ -6572,7 +6354,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
bio = first_bio;
|
||||
|
||||
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
|
||||
dev_nr, async_submit);
|
||||
dev_nr);
|
||||
}
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return BLK_STS_OK;
|
||||
|
@ -6676,9 +6458,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
|
|||
else
|
||||
generate_random_uuid(dev->uuid);
|
||||
|
||||
btrfs_init_work(&dev->work, btrfs_submit_helper,
|
||||
pending_bios_fn, NULL, NULL);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
@ -6875,7 +6654,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
|
|||
if (IS_ERR(fs_devices))
|
||||
return fs_devices;
|
||||
|
||||
fs_devices->seeding = 1;
|
||||
fs_devices->seeding = true;
|
||||
fs_devices->opened = 1;
|
||||
return fs_devices;
|
||||
}
|
||||
|
@ -7064,48 +6843,49 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
|
|||
sb_array_offset += len;
|
||||
cur_offset += len;
|
||||
|
||||
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
|
||||
chunk = (struct btrfs_chunk *)sb_array_offset;
|
||||
/*
|
||||
* At least one btrfs_chunk with one stripe must be
|
||||
* present, exact stripe count check comes afterwards
|
||||
*/
|
||||
len = btrfs_chunk_item_size(1);
|
||||
if (cur_offset + len > array_size)
|
||||
goto out_short_read;
|
||||
|
||||
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
||||
if (!num_stripes) {
|
||||
btrfs_err(fs_info,
|
||||
"invalid number of stripes %u in sys_array at offset %u",
|
||||
num_stripes, cur_offset);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
type = btrfs_chunk_type(sb, chunk);
|
||||
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
|
||||
btrfs_err(fs_info,
|
||||
"invalid chunk type %llu in sys_array at offset %u",
|
||||
type, cur_offset);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
len = btrfs_chunk_item_size(num_stripes);
|
||||
if (cur_offset + len > array_size)
|
||||
goto out_short_read;
|
||||
|
||||
ret = read_one_chunk(&key, sb, chunk);
|
||||
if (ret)
|
||||
break;
|
||||
} else {
|
||||
if (key.type != BTRFS_CHUNK_ITEM_KEY) {
|
||||
btrfs_err(fs_info,
|
||||
"unexpected item type %u in sys_array at offset %u",
|
||||
(u32)key.type, cur_offset);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
chunk = (struct btrfs_chunk *)sb_array_offset;
|
||||
/*
|
||||
* At least one btrfs_chunk with one stripe must be present,
|
||||
* exact stripe count check comes afterwards
|
||||
*/
|
||||
len = btrfs_chunk_item_size(1);
|
||||
if (cur_offset + len > array_size)
|
||||
goto out_short_read;
|
||||
|
||||
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
||||
if (!num_stripes) {
|
||||
btrfs_err(fs_info,
|
||||
"invalid number of stripes %u in sys_array at offset %u",
|
||||
num_stripes, cur_offset);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
type = btrfs_chunk_type(sb, chunk);
|
||||
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
|
||||
btrfs_err(fs_info,
|
||||
"invalid chunk type %llu in sys_array at offset %u",
|
||||
type, cur_offset);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
len = btrfs_chunk_item_size(num_stripes);
|
||||
if (cur_offset + len > array_size)
|
||||
goto out_short_read;
|
||||
|
||||
ret = read_one_chunk(&key, sb, chunk);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
array_ptr += len;
|
||||
sb_array_offset += len;
|
||||
cur_offset += len;
|
||||
|
|
|
@ -18,10 +18,6 @@ extern struct mutex uuid_mutex;
|
|||
#define BTRFS_STRIPE_LEN SZ_64K
|
||||
|
||||
struct buffer_head;
|
||||
struct btrfs_pending_bios {
|
||||
struct bio *head;
|
||||
struct bio *tail;
|
||||
};
|
||||
|
||||
struct btrfs_io_geometry {
|
||||
/* remaining bytes before crossing a stripe */
|
||||
|
@ -68,13 +64,6 @@ struct btrfs_device {
|
|||
|
||||
u64 generation;
|
||||
|
||||
spinlock_t io_lock ____cacheline_aligned;
|
||||
int running_pending;
|
||||
/* regular prio bios */
|
||||
struct btrfs_pending_bios pending_bios;
|
||||
/* sync bios */
|
||||
struct btrfs_pending_bios pending_sync_bios;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
||||
/* the mode sent to blkdev_get */
|
||||
|
@ -254,14 +243,14 @@ struct btrfs_fs_devices {
|
|||
struct list_head alloc_list;
|
||||
|
||||
struct btrfs_fs_devices *seed;
|
||||
int seeding;
|
||||
bool seeding;
|
||||
|
||||
int opened;
|
||||
|
||||
/* set when we find or add a device that doesn't have the
|
||||
* nonrot flag set
|
||||
*/
|
||||
int rotating;
|
||||
bool rotating;
|
||||
|
||||
struct btrfs_fs_info *fs_info;
|
||||
/* sysfs kobjects */
|
||||
|
@ -330,7 +319,6 @@ struct btrfs_bio {
|
|||
u64 map_type; /* get from map_lookup->type */
|
||||
bio_end_io_t *end_io;
|
||||
struct bio *orig_bio;
|
||||
unsigned long flags;
|
||||
void *private;
|
||||
atomic_t error;
|
||||
int max_errors;
|
||||
|
@ -436,7 +424,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
|
|||
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
|
||||
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
|
||||
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit);
|
||||
int mirror_num);
|
||||
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
||||
fmode_t flags, void *holder);
|
||||
struct btrfs_device *btrfs_scan_one_device(const char *path,
|
||||
|
@ -557,6 +545,10 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
|
|||
return BTRFS_RAID_RAID10;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
|
||||
return BTRFS_RAID_RAID1;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
|
||||
return BTRFS_RAID_RAID1C3;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
|
||||
return BTRFS_RAID_RAID1C4;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_DUP)
|
||||
return BTRFS_RAID_DUP;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
|
||||
|
@ -571,7 +563,7 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
|
|||
|
||||
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
|
||||
|
||||
struct list_head *btrfs_get_fs_uuids(void);
|
||||
struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
|
||||
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
|
||||
|
|
|
@ -29,19 +29,9 @@ struct workspace {
|
|||
|
||||
static struct workspace_manager wsm;
|
||||
|
||||
static void zlib_init_workspace_manager(void)
|
||||
struct list_head *zlib_get_workspace(unsigned int level)
|
||||
{
|
||||
btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
|
||||
}
|
||||
|
||||
static void zlib_cleanup_workspace_manager(void)
|
||||
{
|
||||
btrfs_cleanup_workspace_manager(&wsm);
|
||||
}
|
||||
|
||||
static struct list_head *zlib_get_workspace(unsigned int level)
|
||||
{
|
||||
struct list_head *ws = btrfs_get_workspace(&wsm, level);
|
||||
struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
|
||||
workspace->level = level;
|
||||
|
@ -49,12 +39,7 @@ static struct list_head *zlib_get_workspace(unsigned int level)
|
|||
return ws;
|
||||
}
|
||||
|
||||
static void zlib_put_workspace(struct list_head *ws)
|
||||
{
|
||||
btrfs_put_workspace(&wsm, ws);
|
||||
}
|
||||
|
||||
static void zlib_free_workspace(struct list_head *ws)
|
||||
void zlib_free_workspace(struct list_head *ws)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
|
||||
|
@ -63,7 +48,7 @@ static void zlib_free_workspace(struct list_head *ws)
|
|||
kfree(workspace);
|
||||
}
|
||||
|
||||
static struct list_head *zlib_alloc_workspace(unsigned int level)
|
||||
struct list_head *zlib_alloc_workspace(unsigned int level)
|
||||
{
|
||||
struct workspace *workspace;
|
||||
int workspacesize;
|
||||
|
@ -88,13 +73,9 @@ fail:
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int zlib_compress_pages(struct list_head *ws,
|
||||
struct address_space *mapping,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out)
|
||||
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret;
|
||||
|
@ -228,7 +209,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0, ret2;
|
||||
|
@ -319,10 +300,9 @@ done:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page,
|
||||
unsigned long start_byte,
|
||||
size_t srclen, size_t destlen)
|
||||
int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
|
@ -419,15 +399,7 @@ next:
|
|||
}
|
||||
|
||||
const struct btrfs_compress_op btrfs_zlib_compress = {
|
||||
.init_workspace_manager = zlib_init_workspace_manager,
|
||||
.cleanup_workspace_manager = zlib_cleanup_workspace_manager,
|
||||
.get_workspace = zlib_get_workspace,
|
||||
.put_workspace = zlib_put_workspace,
|
||||
.alloc_workspace = zlib_alloc_workspace,
|
||||
.free_workspace = zlib_free_workspace,
|
||||
.compress_pages = zlib_compress_pages,
|
||||
.decompress_bio = zlib_decompress_bio,
|
||||
.decompress = zlib_decompress,
|
||||
.workspace_manager = &wsm,
|
||||
.max_level = 9,
|
||||
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
|
||||
};
|
||||
|
|
|
@ -91,9 +91,8 @@ static inline struct workspace *list_to_workspace(struct list_head *list)
|
|||
return container_of(list, struct workspace, list);
|
||||
}
|
||||
|
||||
static void zstd_free_workspace(struct list_head *ws);
|
||||
static struct list_head *zstd_alloc_workspace(unsigned int level);
|
||||
|
||||
void zstd_free_workspace(struct list_head *ws);
|
||||
struct list_head *zstd_alloc_workspace(unsigned int level);
|
||||
/*
|
||||
* zstd_reclaim_timer_fn - reclaim timer
|
||||
* @t: timer
|
||||
|
@ -168,7 +167,7 @@ static void zstd_calc_ws_mem_sizes(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void zstd_init_workspace_manager(void)
|
||||
void zstd_init_workspace_manager(void)
|
||||
{
|
||||
struct list_head *ws;
|
||||
int i;
|
||||
|
@ -194,7 +193,7 @@ static void zstd_init_workspace_manager(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void zstd_cleanup_workspace_manager(void)
|
||||
void zstd_cleanup_workspace_manager(void)
|
||||
{
|
||||
struct workspace *workspace;
|
||||
int i;
|
||||
|
@ -261,7 +260,7 @@ static struct list_head *zstd_find_workspace(unsigned int level)
|
|||
* attempt to allocate a new workspace. If we fail to allocate one due to
|
||||
* memory pressure, go to sleep waiting for the max level workspace to free up.
|
||||
*/
|
||||
static struct list_head *zstd_get_workspace(unsigned int level)
|
||||
struct list_head *zstd_get_workspace(unsigned int level)
|
||||
{
|
||||
struct list_head *ws;
|
||||
unsigned int nofs_flag;
|
||||
|
@ -302,7 +301,7 @@ again:
|
|||
* isn't set, it is also set here. Only the max level workspace tries and wakes
|
||||
* up waiting workspaces.
|
||||
*/
|
||||
static void zstd_put_workspace(struct list_head *ws)
|
||||
void zstd_put_workspace(struct list_head *ws)
|
||||
{
|
||||
struct workspace *workspace = list_to_workspace(ws);
|
||||
|
||||
|
@ -332,7 +331,7 @@ static void zstd_put_workspace(struct list_head *ws)
|
|||
cond_wake_up(&wsm.wait);
|
||||
}
|
||||
|
||||
static void zstd_free_workspace(struct list_head *ws)
|
||||
void zstd_free_workspace(struct list_head *ws)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
|
||||
|
@ -341,7 +340,7 @@ static void zstd_free_workspace(struct list_head *ws)
|
|||
kfree(workspace);
|
||||
}
|
||||
|
||||
static struct list_head *zstd_alloc_workspace(unsigned int level)
|
||||
struct list_head *zstd_alloc_workspace(unsigned int level)
|
||||
{
|
||||
struct workspace *workspace;
|
||||
|
||||
|
@ -367,13 +366,9 @@ fail:
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int zstd_compress_pages(struct list_head *ws,
|
||||
struct address_space *mapping,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out)
|
||||
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u64 start, struct page **pages, unsigned long *out_pages,
|
||||
unsigned long *total_in, unsigned long *total_out)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
ZSTD_CStream *stream;
|
||||
|
@ -548,7 +543,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
struct page **pages_in = cb->compressed_pages;
|
||||
|
@ -626,10 +621,9 @@ done:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page,
|
||||
unsigned long start_byte,
|
||||
size_t srclen, size_t destlen)
|
||||
int zstd_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
||||
size_t destlen)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
ZSTD_DStream *stream;
|
||||
|
@ -712,15 +706,8 @@ finish:
|
|||
}
|
||||
|
||||
const struct btrfs_compress_op btrfs_zstd_compress = {
|
||||
.init_workspace_manager = zstd_init_workspace_manager,
|
||||
.cleanup_workspace_manager = zstd_cleanup_workspace_manager,
|
||||
.get_workspace = zstd_get_workspace,
|
||||
.put_workspace = zstd_put_workspace,
|
||||
.alloc_workspace = zstd_alloc_workspace,
|
||||
.free_workspace = zstd_free_workspace,
|
||||
.compress_pages = zstd_compress_pages,
|
||||
.decompress_bio = zstd_decompress_bio,
|
||||
.decompress = zstd_decompress,
|
||||
/* ZSTD uses own workspace manager */
|
||||
.workspace_manager = NULL,
|
||||
.max_level = ZSTD_BTRFS_MAX_LEVEL,
|
||||
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
|
||||
};
|
||||
|
|
|
@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node;
|
|||
struct btrfs_delayed_tree_ref;
|
||||
struct btrfs_delayed_data_ref;
|
||||
struct btrfs_delayed_ref_head;
|
||||
struct btrfs_block_group_cache;
|
||||
struct btrfs_block_group;
|
||||
struct btrfs_free_cluster;
|
||||
struct map_lookup;
|
||||
struct extent_buffer;
|
||||
|
@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
|
|||
|
||||
TP_STRUCT__entry_btrfs(
|
||||
__field( u64, ino )
|
||||
__field( blkcnt_t, blocks )
|
||||
__field( u64, blocks )
|
||||
__field( u64, disk_i_size )
|
||||
__field( u64, generation )
|
||||
__field( u64, last_trans )
|
||||
|
@ -194,7 +194,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
|
|||
show_root_type(__entry->root_objectid),
|
||||
__entry->generation,
|
||||
__entry->ino,
|
||||
(unsigned long long)__entry->blocks,
|
||||
__entry->blocks,
|
||||
__entry->disk_i_size,
|
||||
__entry->last_trans,
|
||||
__entry->logged_trans)
|
||||
|
@ -292,7 +292,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
|
|||
|
||||
TRACE_EVENT(btrfs_handle_em_exist,
|
||||
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info,
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct extent_map *existing, const struct extent_map *map,
|
||||
u64 start, u64 len),
|
||||
|
||||
|
@ -330,8 +330,8 @@ TRACE_EVENT(btrfs_handle_em_exist,
|
|||
/* file extent item */
|
||||
DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, start),
|
||||
|
||||
|
@ -385,8 +385,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
|
|||
DECLARE_EVENT_CLASS(
|
||||
btrfs__file_extent_item_inline,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, slot, start),
|
||||
|
||||
|
@ -426,8 +426,8 @@ DECLARE_EVENT_CLASS(
|
|||
DEFINE_EVENT(
|
||||
btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, start)
|
||||
);
|
||||
|
@ -435,8 +435,8 @@ DEFINE_EVENT(
|
|||
DEFINE_EVENT(
|
||||
btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, start)
|
||||
);
|
||||
|
@ -444,8 +444,8 @@ DEFINE_EVENT(
|
|||
DEFINE_EVENT(
|
||||
btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, slot, start)
|
||||
);
|
||||
|
@ -453,8 +453,8 @@ DEFINE_EVENT(
|
|||
DEFINE_EVENT(
|
||||
btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline,
|
||||
|
||||
TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
|
||||
struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
|
||||
const struct btrfs_file_extent_item *fi, int slot, u64 start),
|
||||
|
||||
TP_ARGS(bi, l, fi, slot, start)
|
||||
);
|
||||
|
@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
|||
__field( char, for_kupdate )
|
||||
__field( char, for_reclaim )
|
||||
__field( char, range_cyclic )
|
||||
__field( pgoff_t, writeback_index )
|
||||
__field( unsigned long, writeback_index )
|
||||
__field( u64, root_objectid )
|
||||
),
|
||||
|
||||
|
@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
|||
__entry->range_start, __entry->range_end,
|
||||
__entry->for_kupdate,
|
||||
__entry->for_reclaim, __entry->range_cyclic,
|
||||
(unsigned long)__entry->writeback_index)
|
||||
__entry->writeback_index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__writepage, __extent_writepage,
|
||||
|
@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
|
|||
|
||||
TP_STRUCT__entry_btrfs(
|
||||
__field( u64, ino )
|
||||
__field( pgoff_t, index )
|
||||
__field( unsigned long, index )
|
||||
__field( u64, start )
|
||||
__field( u64, end )
|
||||
__field( int, uptodate )
|
||||
|
@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
|
|||
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
|
||||
"end=%llu uptodate=%d",
|
||||
show_root_type(__entry->root_objectid),
|
||||
__entry->ino, (unsigned long)__entry->index,
|
||||
__entry->ino, __entry->index,
|
||||
__entry->start,
|
||||
__entry->end, __entry->uptodate)
|
||||
);
|
||||
|
@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs,
|
|||
TRACE_EVENT(btrfs_add_block_group,
|
||||
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_block_group_cache *block_group, int create),
|
||||
const struct btrfs_block_group *block_group, int create),
|
||||
|
||||
TP_ARGS(fs_info, block_group, create),
|
||||
|
||||
|
@ -713,11 +713,10 @@ TRACE_EVENT(btrfs_add_block_group,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(fs_info,
|
||||
__entry->offset = block_group->key.objectid;
|
||||
__entry->size = block_group->key.offset;
|
||||
__entry->offset = block_group->start;
|
||||
__entry->size = block_group->length;
|
||||
__entry->flags = block_group->flags;
|
||||
__entry->bytes_used =
|
||||
btrfs_block_group_used(&block_group->item);
|
||||
__entry->bytes_used = block_group->used;
|
||||
__entry->bytes_super = block_group->bytes_super;
|
||||
__entry->create = create;
|
||||
),
|
||||
|
@ -1018,7 +1017,7 @@ TRACE_EVENT(btrfs_cow_block,
|
|||
|
||||
TRACE_EVENT(btrfs_space_reservation,
|
||||
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val,
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val,
|
||||
u64 bytes, int reserve),
|
||||
|
||||
TP_ARGS(fs_info, type, val, bytes, reserve),
|
||||
|
@ -1051,7 +1050,7 @@ TRACE_EVENT(btrfs_space_reservation,
|
|||
TRACE_EVENT(btrfs_trigger_flush,
|
||||
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
|
||||
int flush, char *reason),
|
||||
int flush, const char *reason),
|
||||
|
||||
TP_ARGS(fs_info, flags, bytes, flush, reason),
|
||||
|
||||
|
@ -1185,7 +1184,7 @@ TRACE_EVENT(find_free_extent,
|
|||
|
||||
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
|
||||
TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
|
||||
u64 len),
|
||||
|
||||
TP_ARGS(block_group, start, len),
|
||||
|
@ -1198,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(block_group->fs_info,
|
||||
__entry->bg_objectid = block_group->key.objectid;
|
||||
__entry->bg_objectid = block_group->start;
|
||||
__entry->flags = block_group->flags;
|
||||
__entry->start = start;
|
||||
__entry->len = len;
|
||||
|
@ -1215,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
|
|||
|
||||
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
|
||||
TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
|
||||
u64 len),
|
||||
|
||||
TP_ARGS(block_group, start, len)
|
||||
|
@ -1223,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
|
|||
|
||||
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
|
||||
TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
|
||||
u64 len),
|
||||
|
||||
TP_ARGS(block_group, start, len)
|
||||
|
@ -1231,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
|
|||
|
||||
TRACE_EVENT(btrfs_find_cluster,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
|
||||
TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
|
||||
u64 bytes, u64 empty_size, u64 min_bytes),
|
||||
|
||||
TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
|
||||
|
@ -1246,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(block_group->fs_info,
|
||||
__entry->bg_objectid = block_group->key.objectid;
|
||||
__entry->bg_objectid = block_group->start;
|
||||
__entry->flags = block_group->flags;
|
||||
__entry->start = start;
|
||||
__entry->bytes = bytes;
|
||||
|
@ -1264,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster,
|
|||
|
||||
TRACE_EVENT(btrfs_failed_cluster_setup,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group),
|
||||
TP_PROTO(const struct btrfs_block_group *block_group),
|
||||
|
||||
TP_ARGS(block_group),
|
||||
|
||||
|
@ -1273,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(block_group->fs_info,
|
||||
__entry->bg_objectid = block_group->key.objectid;
|
||||
__entry->bg_objectid = block_group->start;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
|
||||
|
@ -1281,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
|
|||
|
||||
TRACE_EVENT(btrfs_setup_cluster,
|
||||
|
||||
TP_PROTO(const struct btrfs_block_group_cache *block_group,
|
||||
TP_PROTO(const struct btrfs_block_group *block_group,
|
||||
const struct btrfs_free_cluster *cluster,
|
||||
u64 size, int bitmap),
|
||||
|
||||
|
@ -1297,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(block_group->fs_info,
|
||||
__entry->bg_objectid = block_group->key.objectid;
|
||||
__entry->bg_objectid = block_group->start;
|
||||
__entry->flags = block_group->flags;
|
||||
__entry->start = cluster->window_start;
|
||||
__entry->max_size = cluster->max_size;
|
||||
|
@ -1325,17 +1324,17 @@ TRACE_EVENT(alloc_extent_state,
|
|||
TP_STRUCT__entry(
|
||||
__field(const struct extent_state *, state)
|
||||
__field(gfp_t, mask)
|
||||
__field(unsigned long, ip)
|
||||
__field(const void*, ip)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->state = state,
|
||||
__entry->mask = mask,
|
||||
__entry->ip = IP
|
||||
__entry->ip = (const void *)IP
|
||||
),
|
||||
|
||||
TP_printk("state=%p mask=%s caller=%pS", __entry->state,
|
||||
show_gfp_flags(__entry->mask), (const void *)__entry->ip)
|
||||
show_gfp_flags(__entry->mask), __entry->ip)
|
||||
);
|
||||
|
||||
TRACE_EVENT(free_extent_state,
|
||||
|
@ -1346,16 +1345,15 @@ TRACE_EVENT(free_extent_state,
|
|||
|
||||
TP_STRUCT__entry(
|
||||
__field(const struct extent_state *, state)
|
||||
__field(unsigned long, ip)
|
||||
__field(const void*, ip)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->state = state,
|
||||
__entry->ip = IP
|
||||
__entry->ip = (const void *)IP
|
||||
),
|
||||
|
||||
TP_printk("state=%p caller=%pS", __entry->state,
|
||||
(const void *)__entry->ip)
|
||||
TP_printk("state=%p caller=%pS", __entry->state, __entry->ip)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs__work,
|
||||
|
@ -1389,9 +1387,9 @@ DECLARE_EVENT_CLASS(btrfs__work,
|
|||
);
|
||||
|
||||
/*
|
||||
* For situiations when the work is freed, we pass fs_info and a tag that that
|
||||
* matches address of the work structure so it can be paired with the
|
||||
* scheduling event.
|
||||
* For situations when the work is freed, we pass fs_info and a tag that matches
|
||||
* the address of the work structure so it can be paired with the scheduling
|
||||
* event. DO NOT add anything here that dereferences wtag.
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(btrfs__work__done,
|
||||
|
||||
|
@ -1567,8 +1565,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
|
|||
),
|
||||
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes)
|
||||
__entry->bytenr, __entry->num_bytes)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
|
||||
|
@ -1644,7 +1641,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
|
|||
TRACE_EVENT(qgroup_update_counters,
|
||||
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_qgroup *qgroup,
|
||||
const struct btrfs_qgroup *qgroup,
|
||||
u64 cur_old_count, u64 cur_new_count),
|
||||
|
||||
TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count),
|
||||
|
@ -1825,7 +1822,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
|
|||
);
|
||||
|
||||
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
|
||||
TP_PROTO(struct btrfs_root *root, u64 ino, int mod),
|
||||
TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
|
||||
|
||||
TP_ARGS(root, ino, mod),
|
||||
|
||||
|
@ -1847,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
|
|||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs__block_group,
|
||||
TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
|
||||
TP_PROTO(const struct btrfs_block_group *bg_cache),
|
||||
|
||||
TP_ARGS(bg_cache),
|
||||
|
||||
|
@ -1859,9 +1856,9 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
|
|||
),
|
||||
|
||||
TP_fast_assign_btrfs(bg_cache->fs_info,
|
||||
__entry->bytenr = bg_cache->key.objectid,
|
||||
__entry->len = bg_cache->key.offset,
|
||||
__entry->used = btrfs_block_group_used(&bg_cache->item);
|
||||
__entry->bytenr = bg_cache->start,
|
||||
__entry->len = bg_cache->length,
|
||||
__entry->used = bg_cache->used;
|
||||
__entry->flags = bg_cache->flags;
|
||||
),
|
||||
|
||||
|
@ -1871,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
|
|||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group,
|
||||
TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
|
||||
TP_PROTO(const struct btrfs_block_group *bg_cache),
|
||||
|
||||
TP_ARGS(bg_cache)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
|
||||
TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
|
||||
TP_PROTO(const struct btrfs_block_group *bg_cache),
|
||||
|
||||
TP_ARGS(bg_cache)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
|
||||
TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
|
||||
TP_PROTO(const struct btrfs_block_group *bg_cache),
|
||||
|
||||
TP_ARGS(bg_cache)
|
||||
);
|
||||
|
@ -1906,7 +1903,7 @@ TRACE_EVENT(btrfs_set_extent_bit,
|
|||
TP_fast_assign_btrfs(tree->fs_info,
|
||||
__entry->owner = tree->owner;
|
||||
if (tree->private_data) {
|
||||
struct inode *inode = tree->private_data;
|
||||
const struct inode *inode = tree->private_data;
|
||||
|
||||
__entry->ino = btrfs_ino(BTRFS_I(inode));
|
||||
__entry->rootid =
|
||||
|
@ -1945,7 +1942,7 @@ TRACE_EVENT(btrfs_clear_extent_bit,
|
|||
TP_fast_assign_btrfs(tree->fs_info,
|
||||
__entry->owner = tree->owner;
|
||||
if (tree->private_data) {
|
||||
struct inode *inode = tree->private_data;
|
||||
const struct inode *inode = tree->private_data;
|
||||
|
||||
__entry->ino = btrfs_ino(BTRFS_I(inode));
|
||||
__entry->rootid =
|
||||
|
@ -1985,7 +1982,7 @@ TRACE_EVENT(btrfs_convert_extent_bit,
|
|||
TP_fast_assign_btrfs(tree->fs_info,
|
||||
__entry->owner = tree->owner;
|
||||
if (tree->private_data) {
|
||||
struct inode *inode = tree->private_data;
|
||||
const struct inode *inode = tree->private_data;
|
||||
|
||||
__entry->ino = btrfs_ino(BTRFS_I(inode));
|
||||
__entry->rootid =
|
||||
|
@ -2094,8 +2091,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
|
|||
|
||||
DECLARE_EVENT_CLASS(btrfs__space_info_update,
|
||||
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
|
||||
TP_ARGS(fs_info, sinfo, old, diff),
|
||||
|
||||
|
@ -2117,16 +2114,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update,
|
|||
|
||||
DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use,
|
||||
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
|
||||
TP_ARGS(fs_info, sinfo, old, diff)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
|
||||
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
|
||||
TP_ARGS(fs_info, sinfo, old, diff)
|
||||
);
|
||||
|
|
|
@ -270,6 +270,7 @@ struct btrfs_ioctl_fs_info_args {
|
|||
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
|
||||
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
|
||||
#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
|
||||
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
|
||||
|
||||
struct btrfs_ioctl_feature_flags {
|
||||
__u64 compat_flags;
|
||||
|
@ -831,7 +832,9 @@ enum btrfs_err_code {
|
|||
BTRFS_ERROR_DEV_TGT_REPLACE,
|
||||
BTRFS_ERROR_DEV_MISSING_NOT_FOUND,
|
||||
BTRFS_ERROR_DEV_ONLY_WRITABLE,
|
||||
BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
|
||||
BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS,
|
||||
BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
|
||||
BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
|
||||
};
|
||||
|
||||
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
|
||||
|
|
|
@ -302,6 +302,9 @@
|
|||
/* csum types */
|
||||
enum btrfs_csum_type {
|
||||
BTRFS_CSUM_TYPE_CRC32 = 0,
|
||||
BTRFS_CSUM_TYPE_XXHASH = 1,
|
||||
BTRFS_CSUM_TYPE_SHA256 = 2,
|
||||
BTRFS_CSUM_TYPE_BLAKE2 = 3,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -737,10 +740,12 @@ struct btrfs_balance_item {
|
|||
__le64 unused[4];
|
||||
} __attribute__ ((__packed__));
|
||||
|
||||
#define BTRFS_FILE_EXTENT_INLINE 0
|
||||
#define BTRFS_FILE_EXTENT_REG 1
|
||||
#define BTRFS_FILE_EXTENT_PREALLOC 2
|
||||
#define BTRFS_FILE_EXTENT_TYPES 2
|
||||
enum {
|
||||
BTRFS_FILE_EXTENT_INLINE = 0,
|
||||
BTRFS_FILE_EXTENT_REG = 1,
|
||||
BTRFS_FILE_EXTENT_PREALLOC = 2,
|
||||
BTRFS_NR_FILE_EXTENT_TYPES = 3,
|
||||
};
|
||||
|
||||
struct btrfs_file_extent_item {
|
||||
/*
|
||||
|
@ -836,6 +841,8 @@ struct btrfs_dev_replace_item {
|
|||
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
|
||||
#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
|
||||
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
|
||||
#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
|
||||
#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
|
||||
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
|
||||
BTRFS_SPACE_INFO_GLOBAL_RSV)
|
||||
|
||||
|
@ -847,6 +854,8 @@ enum btrfs_raid_types {
|
|||
BTRFS_RAID_SINGLE,
|
||||
BTRFS_RAID_RAID5,
|
||||
BTRFS_RAID_RAID6,
|
||||
BTRFS_RAID_RAID1C3,
|
||||
BTRFS_RAID_RAID1C4,
|
||||
BTRFS_NR_RAID_TYPES
|
||||
};
|
||||
|
||||
|
@ -856,6 +865,8 @@ enum btrfs_raid_types {
|
|||
|
||||
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1C3 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1C4 | \
|
||||
BTRFS_BLOCK_GROUP_RAID5 | \
|
||||
BTRFS_BLOCK_GROUP_RAID6 | \
|
||||
BTRFS_BLOCK_GROUP_DUP | \
|
||||
|
@ -863,7 +874,9 @@ enum btrfs_raid_types {
|
|||
#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
|
||||
BTRFS_BLOCK_GROUP_RAID6)
|
||||
|
||||
#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1)
|
||||
#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1C3 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1C4)
|
||||
|
||||
/*
|
||||
* We need a bit for restriper to be able to tell when chunks of type
|
||||
|
|
Загрузка…
Ссылка в новой задаче