2007-06-12 17:07:21 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
2007-07-11 18:00:37 +04:00
|
|
|
#include <linux/sched.h>
|
2007-12-22 00:27:24 +03:00
|
|
|
#include <linux/pagemap.h>
|
2008-04-28 23:29:52 +04:00
|
|
|
#include <linux/writeback.h>
|
2008-08-12 17:13:26 +04:00
|
|
|
#include <linux/blkdev.h>
|
2007-12-11 17:25:06 +03:00
|
|
|
#include "hash.h"
|
2008-04-11 23:45:51 +04:00
|
|
|
#include "crc32c.h"
|
2007-02-26 18:40:21 +03:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "disk-io.h"
|
|
|
|
#include "print-tree.h"
|
2007-03-16 23:20:31 +03:00
|
|
|
#include "transaction.h"
|
2008-03-24 22:01:56 +03:00
|
|
|
#include "volumes.h"
|
2008-06-26 00:01:30 +04:00
|
|
|
#include "locking.h"
|
2008-07-28 23:32:19 +04:00
|
|
|
#include "ref-cache.h"
|
2007-02-26 18:40:21 +03:00
|
|
|
|
2007-03-16 23:20:31 +03:00
|
|
|
static int finish_current_insert(struct btrfs_trans_handle *trans, struct
|
|
|
|
btrfs_root *extent_root);
|
2007-03-22 19:13:20 +03:00
|
|
|
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
|
|
|
|
btrfs_root *extent_root);
|
2008-06-26 00:01:30 +04:00
|
|
|
static struct btrfs_block_group_cache *
|
|
|
|
__btrfs_find_block_group(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache *hint,
|
|
|
|
u64 search_start, int data, int owner);
|
2008-01-03 21:56:30 +03:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
void maybe_lock_mutex(struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
if (root != root->fs_info->extent_root &&
|
|
|
|
root != root->fs_info->chunk_root &&
|
|
|
|
root != root->fs_info->dev_root) {
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void maybe_unlock_mutex(struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
if (root != root->fs_info->extent_root &&
|
|
|
|
root != root->fs_info->chunk_root &&
|
|
|
|
root != root->fs_info->dev_root) {
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
}
|
|
|
|
}
|
2007-02-26 18:40:21 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
|
|
|
{
|
|
|
|
return (cache->flags & bits) == bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this adds the block group to the fs_info rb tree for the block group
|
|
|
|
* cache
|
|
|
|
*/
|
|
|
|
int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
|
|
|
|
struct btrfs_block_group_cache *block_group)
|
|
|
|
{
|
|
|
|
struct rb_node **p;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
p = &info->block_group_cache_tree.rb_node;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
cache = rb_entry(parent, struct btrfs_block_group_cache,
|
|
|
|
cache_node);
|
|
|
|
if (block_group->key.objectid < cache->key.objectid) {
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
} else if (block_group->key.objectid > cache->key.objectid) {
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
} else {
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&block_group->cache_node, parent, p);
|
|
|
|
rb_insert_color(&block_group->cache_node,
|
|
|
|
&info->block_group_cache_tree);
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will return the block group at or after bytenr if contains is 0, else
|
|
|
|
* it will return the block group that contains the bytenr
|
|
|
|
*/
|
|
|
|
static struct btrfs_block_group_cache *
|
|
|
|
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
|
|
|
|
int contains)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group_cache *cache, *ret = NULL;
|
|
|
|
struct rb_node *n;
|
|
|
|
u64 end, start;
|
|
|
|
|
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
n = info->block_group_cache_tree.rb_node;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
cache = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
|
cache_node);
|
|
|
|
end = cache->key.objectid + cache->key.offset - 1;
|
|
|
|
start = cache->key.objectid;
|
|
|
|
|
|
|
|
if (bytenr < start) {
|
|
|
|
if (!contains && (!ret || start < ret->key.objectid))
|
|
|
|
ret = cache;
|
|
|
|
n = n->rb_left;
|
|
|
|
} else if (bytenr > start) {
|
|
|
|
if (contains && bytenr <= end) {
|
|
|
|
ret = cache;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
n = n->rb_right;
|
|
|
|
} else {
|
|
|
|
ret = cache;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is only called by cache_block_group, since we could have freed extents
|
|
|
|
* we need to check the pinned_extents for any extents that can't be used yet
|
|
|
|
* since their free space will be released as soon as the transaction commits.
|
|
|
|
*/
|
|
|
|
static int add_new_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
|
struct btrfs_fs_info *info, u64 start, u64 end)
|
|
|
|
{
|
|
|
|
u64 extent_start, extent_end, size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (start < end) {
|
|
|
|
ret = find_first_extent_bit(&info->pinned_extents, start,
|
|
|
|
&extent_start, &extent_end,
|
|
|
|
EXTENT_DIRTY);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (extent_start == start) {
|
|
|
|
start = extent_end + 1;
|
|
|
|
} else if (extent_start > start && extent_start < end) {
|
|
|
|
size = extent_start - start;
|
|
|
|
ret = btrfs_add_free_space(block_group, start, size);
|
|
|
|
BUG_ON(ret);
|
|
|
|
start = extent_end + 1;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start < end) {
|
|
|
|
size = end - start;
|
|
|
|
ret = btrfs_add_free_space(block_group, start, size);
|
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
static int cache_block_group(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
2008-09-23 21:14:11 +04:00
|
|
|
int ret = 0;
|
2007-05-10 04:13:14 +04:00
|
|
|
struct btrfs_key key;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *leaf;
|
2007-05-10 04:13:14 +04:00
|
|
|
int slot;
|
|
|
|
u64 last = 0;
|
2007-09-15 00:15:28 +04:00
|
|
|
u64 first_free;
|
2007-05-10 04:13:14 +04:00
|
|
|
int found = 0;
|
|
|
|
|
2007-11-30 18:09:33 +03:00
|
|
|
if (!block_group)
|
|
|
|
return 0;
|
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
root = root->fs_info->extent_root;
|
|
|
|
|
|
|
|
if (block_group->cached)
|
|
|
|
return 0;
|
2007-10-16 00:14:48 +04:00
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
2007-09-15 00:15:28 +04:00
|
|
|
|
2007-08-28 00:49:44 +04:00
|
|
|
path->reada = 2;
|
2008-06-26 00:01:30 +04:00
|
|
|
/*
|
|
|
|
* we get into deadlocks with paths held by callers of this function.
|
|
|
|
* since the alloc_mutex is protecting things right now, just
|
|
|
|
* skip the locking here
|
|
|
|
*/
|
|
|
|
path->skip_locking = 1;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
first_free = max_t(u64, block_group->key.objectid,
|
|
|
|
BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
|
2007-05-10 04:13:14 +04:00
|
|
|
key.objectid = block_group->key.objectid;
|
|
|
|
key.offset = 0;
|
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
2008-09-23 21:14:11 +04:00
|
|
|
goto err;
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
|
2008-01-03 21:56:30 +03:00
|
|
|
if (ret < 0)
|
2008-09-23 21:14:11 +04:00
|
|
|
goto err;
|
2008-01-03 21:56:30 +03:00
|
|
|
if (ret == 0) {
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
|
|
|
if (key.objectid + key.offset > first_free)
|
|
|
|
first_free = key.objectid + key.offset;
|
|
|
|
}
|
2007-05-10 04:13:14 +04:00
|
|
|
while(1) {
|
2007-10-16 00:14:19 +04:00
|
|
|
leaf = path->nodes[0];
|
2007-05-10 04:13:14 +04:00
|
|
|
slot = path->slots[0];
|
2007-10-16 00:14:19 +04:00
|
|
|
if (slot >= btrfs_header_nritems(leaf)) {
|
2007-05-10 04:13:14 +04:00
|
|
|
ret = btrfs_next_leaf(root, path);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (ret == 0)
|
2007-05-10 04:13:14 +04:00
|
|
|
continue;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
else
|
2007-05-10 04:13:14 +04:00
|
|
|
break;
|
|
|
|
}
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (key.objectid < block_group->key.objectid)
|
2007-09-15 00:15:28 +04:00
|
|
|
goto next;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
if (key.objectid >= block_group->key.objectid +
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
block_group->key.offset)
|
2007-05-10 04:13:14 +04:00
|
|
|
break;
|
2007-09-15 00:15:28 +04:00
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
|
|
|
|
if (!found) {
|
2007-09-15 00:15:28 +04:00
|
|
|
last = first_free;
|
2007-05-10 04:13:14 +04:00
|
|
|
found = 1;
|
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
add_new_free_space(block_group, root->fs_info, last,
|
|
|
|
key.objectid);
|
|
|
|
|
2007-09-15 00:15:28 +04:00
|
|
|
last = key.objectid + key.offset;
|
2007-05-10 04:13:14 +04:00
|
|
|
}
|
2007-09-15 00:15:28 +04:00
|
|
|
next:
|
2007-05-10 04:13:14 +04:00
|
|
|
path->slots[0]++;
|
|
|
|
}
|
|
|
|
|
2007-09-15 00:15:28 +04:00
|
|
|
if (!found)
|
|
|
|
last = first_free;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
add_new_free_space(block_group, root->fs_info, last,
|
|
|
|
block_group->key.objectid +
|
|
|
|
block_group->key.offset);
|
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
block_group->cached = 1;
|
2008-09-23 21:14:11 +04:00
|
|
|
ret = 0;
|
2007-06-22 22:16:25 +04:00
|
|
|
err:
|
2007-05-10 04:13:14 +04:00
|
|
|
btrfs_free_path(path);
|
2008-09-23 21:14:11 +04:00
|
|
|
return ret;
|
2007-05-10 04:13:14 +04:00
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
/*
|
|
|
|
* return the block group that starts at or after bytenr
|
|
|
|
*/
|
2008-05-24 22:04:53 +04:00
|
|
|
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
|
|
|
|
btrfs_fs_info *info,
|
|
|
|
u64 bytenr)
|
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
2008-05-24 22:04:53 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = block_group_cache_tree_search(info, bytenr, 0);
|
2008-05-24 22:04:53 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
return cache;
|
2008-05-24 22:04:53 +04:00
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
/*
|
|
|
|
* return the block group that contains teh given bytenr
|
|
|
|
*/
|
2007-06-12 05:33:38 +04:00
|
|
|
struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
|
|
|
|
btrfs_fs_info *info,
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 bytenr)
|
2007-05-06 18:15:01 +04:00
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
2007-05-06 18:15:01 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = block_group_cache_tree_search(info, bytenr, 1);
|
2007-10-16 00:15:19 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
return cache;
|
2007-05-06 18:15:01 +04:00
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
static int noinline find_free_space(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache **cache_ret,
|
|
|
|
u64 *start_ret, u64 num, int data)
|
2007-05-10 04:13:14 +04:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_block_group_cache *cache = *cache_ret;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_free_space *info = NULL;
|
2007-10-16 00:17:44 +04:00
|
|
|
u64 last;
|
2008-01-08 23:46:31 +03:00
|
|
|
u64 total_fs_bytes;
|
2008-03-24 22:01:56 +03:00
|
|
|
u64 search_start = *start_ret;
|
2007-05-10 04:13:14 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2008-01-08 23:46:31 +03:00
|
|
|
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
|
2008-02-18 20:12:38 +03:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (!cache)
|
|
|
|
goto out;
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
last = max(search_start, cache->key.objectid);
|
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
again:
|
2007-06-22 22:16:25 +04:00
|
|
|
ret = cache_block_group(root, cache);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (ret)
|
2007-06-22 22:16:25 +04:00
|
|
|
goto out;
|
2007-11-06 18:26:29 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (cache->ro || !block_group_bits(cache, data))
|
2008-03-24 22:01:56 +03:00
|
|
|
goto new_group;
|
2007-10-16 00:17:44 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
info = btrfs_find_free_space(cache, last, num);
|
|
|
|
if (info) {
|
|
|
|
*start_ret = info->offset;
|
2008-03-24 22:01:56 +03:00
|
|
|
return 0;
|
2008-04-04 00:29:03 +04:00
|
|
|
}
|
2007-05-10 04:13:14 +04:00
|
|
|
|
|
|
|
new_group:
|
2007-10-16 00:17:44 +04:00
|
|
|
last = cache->key.objectid + cache->key.offset;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
cache = btrfs_lookup_first_block_group(root->fs_info, last);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (!cache || cache->key.objectid >= total_fs_bytes)
|
2007-12-04 21:18:24 +03:00
|
|
|
goto out;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2007-05-10 04:13:14 +04:00
|
|
|
*cache_ret = cache;
|
|
|
|
goto again;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
out:
|
|
|
|
return -ENOSPC;
|
2007-05-10 04:13:14 +04:00
|
|
|
}
|
|
|
|
|
2007-06-12 15:43:08 +04:00
|
|
|
static u64 div_factor(u64 num, int factor)
|
|
|
|
{
|
2007-11-08 05:08:16 +03:00
|
|
|
if (factor == 10)
|
|
|
|
return num;
|
2007-06-12 15:43:08 +04:00
|
|
|
num *= factor;
|
|
|
|
do_div(num, 10);
|
|
|
|
return num;
|
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
|
|
|
u64 flags)
|
2008-03-24 22:01:59 +03:00
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct list_head *head = &info->space_info;
|
|
|
|
struct list_head *cur;
|
|
|
|
struct btrfs_space_info *found;
|
|
|
|
list_for_each(cur, head) {
|
|
|
|
found = list_entry(cur, struct btrfs_space_info, list);
|
|
|
|
if (found->flags == flags)
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
static struct btrfs_block_group_cache *
|
|
|
|
__btrfs_find_block_group(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache *hint,
|
|
|
|
u64 search_start, int data, int owner)
|
2007-04-27 18:08:34 +04:00
|
|
|
{
|
2007-10-16 00:15:19 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
2007-04-30 23:25:45 +04:00
|
|
|
struct btrfs_block_group_cache *found_group = NULL;
|
2007-04-27 18:08:34 +04:00
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_space_info *sinfo;
|
2007-04-27 18:08:34 +04:00
|
|
|
u64 used;
|
2007-04-30 23:25:45 +04:00
|
|
|
u64 last = 0;
|
2007-10-16 00:15:19 +04:00
|
|
|
u64 free_check;
|
2007-04-30 23:25:45 +04:00
|
|
|
int full_search = 0;
|
2008-04-24 22:42:46 +04:00
|
|
|
int factor = 10;
|
2008-05-24 22:04:53 +04:00
|
|
|
int wrapped = 0;
|
2007-05-18 21:28:27 +04:00
|
|
|
|
2008-04-29 17:38:00 +04:00
|
|
|
if (data & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
factor = 9;
|
2007-05-06 18:15:01 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (search_start) {
|
2007-05-06 18:15:01 +04:00
|
|
|
struct btrfs_block_group_cache *shint;
|
2008-05-24 22:04:53 +04:00
|
|
|
shint = btrfs_lookup_first_block_group(info, search_start);
|
2008-04-26 00:53:30 +04:00
|
|
|
if (shint && block_group_bits(shint, data) && !shint->ro) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&shint->lock);
|
2007-05-06 18:15:01 +04:00
|
|
|
used = btrfs_block_group_used(&shint->item);
|
2007-11-16 22:57:08 +03:00
|
|
|
if (used + shint->pinned <
|
|
|
|
div_factor(shint->key.offset, factor)) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&shint->lock);
|
2007-05-06 18:15:01 +04:00
|
|
|
return shint;
|
|
|
|
}
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&shint->lock);
|
2007-05-06 18:15:01 +04:00
|
|
|
}
|
|
|
|
}
|
2008-05-24 22:04:53 +04:00
|
|
|
if (hint && !hint->ro && block_group_bits(hint, data)) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&hint->lock);
|
2007-04-30 23:25:45 +04:00
|
|
|
used = btrfs_block_group_used(&hint->item);
|
2007-11-16 22:57:08 +03:00
|
|
|
if (used + hint->pinned <
|
|
|
|
div_factor(hint->key.offset, factor)) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&hint->lock);
|
2007-04-30 23:25:45 +04:00
|
|
|
return hint;
|
|
|
|
}
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&hint->lock);
|
2007-10-16 00:17:44 +04:00
|
|
|
last = hint->key.objectid + hint->key.offset;
|
2007-04-30 23:25:45 +04:00
|
|
|
} else {
|
2007-05-10 04:13:14 +04:00
|
|
|
if (hint)
|
2008-05-24 22:04:53 +04:00
|
|
|
last = max(hint->key.objectid, search_start);
|
2007-05-10 04:13:14 +04:00
|
|
|
else
|
2008-05-24 22:04:53 +04:00
|
|
|
last = search_start;
|
2007-04-30 23:25:45 +04:00
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
sinfo = __find_space_info(root->fs_info, data);
|
|
|
|
if (!sinfo)
|
|
|
|
goto found;
|
2007-04-30 23:25:45 +04:00
|
|
|
again:
|
2007-04-27 18:08:34 +04:00
|
|
|
while(1) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct list_head *l;
|
2007-10-16 00:15:19 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = NULL;
|
|
|
|
|
|
|
|
spin_lock(&sinfo->lock);
|
|
|
|
list_for_each(l, &sinfo->block_groups) {
|
|
|
|
struct btrfs_block_group_cache *entry;
|
|
|
|
entry = list_entry(l, struct btrfs_block_group_cache,
|
|
|
|
list);
|
|
|
|
if ((entry->key.objectid >= last) &&
|
|
|
|
(!cache || (entry->key.objectid <
|
|
|
|
cache->key.objectid)))
|
|
|
|
cache = entry;
|
2008-05-24 22:04:53 +04:00
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
spin_unlock(&sinfo->lock);
|
|
|
|
|
|
|
|
if (!cache)
|
|
|
|
break;
|
2007-10-16 00:15:19 +04:00
|
|
|
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&cache->lock);
|
2007-10-16 00:15:19 +04:00
|
|
|
last = cache->key.objectid + cache->key.offset;
|
|
|
|
used = btrfs_block_group_used(&cache->item);
|
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
if (!cache->ro && block_group_bits(cache, data)) {
|
2008-05-24 22:04:53 +04:00
|
|
|
free_check = div_factor(cache->key.offset, factor);
|
2008-04-04 00:29:03 +04:00
|
|
|
if (used + cache->pinned < free_check) {
|
|
|
|
found_group = cache;
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&cache->lock);
|
2008-04-04 00:29:03 +04:00
|
|
|
goto found;
|
|
|
|
}
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&cache->lock);
|
2007-05-18 21:28:27 +04:00
|
|
|
cond_resched();
|
2007-04-27 18:08:34 +04:00
|
|
|
}
|
2008-05-24 22:04:53 +04:00
|
|
|
if (!wrapped) {
|
|
|
|
last = search_start;
|
|
|
|
wrapped = 1;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
if (!full_search && factor < 10) {
|
2007-05-06 18:15:01 +04:00
|
|
|
last = search_start;
|
2007-04-30 23:25:45 +04:00
|
|
|
full_search = 1;
|
2008-05-24 22:04:53 +04:00
|
|
|
factor = 10;
|
2007-04-30 23:25:45 +04:00
|
|
|
goto again;
|
|
|
|
}
|
2007-05-06 18:15:01 +04:00
|
|
|
found:
|
2007-04-30 23:25:45 +04:00
|
|
|
return found_group;
|
2007-04-27 18:08:34 +04:00
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache
|
|
|
|
*hint, u64 search_start,
|
|
|
|
int data, int owner)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct btrfs_block_group_cache *ret;
|
|
|
|
ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
|
|
|
|
return ret;
|
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2007-12-11 17:25:06 +03:00
|
|
|
static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 owner, u64 owner_offset)
|
|
|
|
{
|
|
|
|
u32 high_crc = ~(u32)0;
|
|
|
|
u32 low_crc = ~(u32)0;
|
|
|
|
__le64 lenum;
|
|
|
|
lenum = cpu_to_le64(root_objectid);
|
2008-04-11 23:45:51 +04:00
|
|
|
high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
|
2007-12-11 17:25:06 +03:00
|
|
|
lenum = cpu_to_le64(ref_generation);
|
2008-04-11 23:45:51 +04:00
|
|
|
low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
|
2008-02-01 22:51:59 +03:00
|
|
|
if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
|
lenum = cpu_to_le64(owner);
|
2008-04-11 23:45:51 +04:00
|
|
|
low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
|
2008-02-01 22:51:59 +03:00
|
|
|
lenum = cpu_to_le64(owner_offset);
|
2008-04-11 23:45:51 +04:00
|
|
|
low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
|
2008-02-01 22:51:59 +03:00
|
|
|
}
|
2007-12-11 17:25:06 +03:00
|
|
|
return ((u64)high_crc << 32) | (u64)low_crc;
|
|
|
|
}
|
|
|
|
|
2007-12-11 17:25:06 +03:00
|
|
|
static int match_extent_ref(struct extent_buffer *leaf,
|
|
|
|
struct btrfs_extent_ref *disk_ref,
|
|
|
|
struct btrfs_extent_ref *cpu_ref)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (cpu_ref->objectid)
|
|
|
|
len = sizeof(*cpu_ref);
|
|
|
|
else
|
|
|
|
len = 2 * sizeof(u64);
|
|
|
|
ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
|
|
|
|
len);
|
|
|
|
return ret == 0;
|
|
|
|
}
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
/* simple helper to search for an existing extent at a given offset */
|
|
|
|
int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path,
|
|
|
|
u64 start, u64 len)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
key.objectid = start;
|
|
|
|
key.offset = len;
|
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
|
|
|
ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
|
|
|
|
0, 0);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path, u64 bytenr,
|
|
|
|
u64 root_objectid,
|
|
|
|
u64 ref_generation, u64 owner,
|
|
|
|
u64 owner_offset, int del)
|
2007-12-11 17:25:06 +03:00
|
|
|
{
|
|
|
|
u64 hash;
|
|
|
|
struct btrfs_key key;
|
2007-12-11 17:25:06 +03:00
|
|
|
struct btrfs_key found_key;
|
2007-12-11 17:25:06 +03:00
|
|
|
struct btrfs_extent_ref ref;
|
2007-12-11 17:25:06 +03:00
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_extent_ref *disk_ref;
|
|
|
|
int ret;
|
|
|
|
int ret2;
|
|
|
|
|
|
|
|
btrfs_set_stack_ref_root(&ref, root_objectid);
|
|
|
|
btrfs_set_stack_ref_generation(&ref, ref_generation);
|
|
|
|
btrfs_set_stack_ref_objectid(&ref, owner);
|
|
|
|
btrfs_set_stack_ref_offset(&ref, owner_offset);
|
|
|
|
|
|
|
|
hash = hash_extent_ref(root_objectid, ref_generation, owner,
|
|
|
|
owner_offset);
|
|
|
|
key.offset = hash;
|
|
|
|
key.objectid = bytenr;
|
|
|
|
key.type = BTRFS_EXTENT_REF_KEY;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path,
|
|
|
|
del ? -1 : 0, del);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
if (ret != 0) {
|
|
|
|
u32 nritems = btrfs_header_nritems(leaf);
|
|
|
|
if (path->slots[0] >= nritems) {
|
|
|
|
ret2 = btrfs_next_leaf(root, path);
|
|
|
|
if (ret2)
|
|
|
|
goto out;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
|
|
|
if (found_key.objectid != bytenr ||
|
|
|
|
found_key.type != BTRFS_EXTENT_REF_KEY)
|
|
|
|
goto out;
|
|
|
|
key.offset = found_key.offset;
|
|
|
|
if (del) {
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
disk_ref = btrfs_item_ptr(path->nodes[0],
|
|
|
|
path->slots[0],
|
|
|
|
struct btrfs_extent_ref);
|
|
|
|
if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
|
|
|
key.offset = found_key.offset + 1;
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-12-11 20:42:00 +03:00
|
|
|
/*
|
|
|
|
* Back reference rules. Back refs have three main goals:
|
|
|
|
*
|
|
|
|
* 1) differentiate between all holders of references to an extent so that
|
|
|
|
* when a reference is dropped we can make sure it was a valid reference
|
|
|
|
* before freeing the extent.
|
|
|
|
*
|
|
|
|
* 2) Provide enough information to quickly find the holders of an extent
|
|
|
|
* if we notice a given block is corrupted or bad.
|
|
|
|
*
|
|
|
|
* 3) Make it easy to migrate blocks for FS shrinking or storage pool
|
|
|
|
* maintenance. This is actually the same as #2, but with a slightly
|
|
|
|
* different use case.
|
|
|
|
*
|
|
|
|
* File extents can be referenced by:
|
|
|
|
*
|
|
|
|
* - multiple snapshots, subvolumes, or different generations in one subvol
|
|
|
|
* - different files inside a single subvolume (in theory, not implemented yet)
|
|
|
|
* - different offsets inside a file (bookend extents in file.c)
|
|
|
|
*
|
|
|
|
* The extent ref structure has fields for:
|
|
|
|
*
|
|
|
|
* - Objectid of the subvolume root
|
|
|
|
* - Generation number of the tree holding the reference
|
|
|
|
* - objectid of the file holding the reference
|
|
|
|
* - offset in the file corresponding to the key holding the reference
|
|
|
|
*
|
|
|
|
* When a file extent is allocated the fields are filled in:
|
|
|
|
* (root_key.objectid, trans->transid, inode objectid, offset in file)
|
|
|
|
*
|
|
|
|
* When a leaf is cow'd new references are added for every file extent found
|
|
|
|
* in the leaf. It looks the same as the create case, but trans->transid
|
|
|
|
* will be different when the block is cow'd.
|
|
|
|
*
|
|
|
|
* (root_key.objectid, trans->transid, inode objectid, offset in file)
|
|
|
|
*
|
|
|
|
* When a file extent is removed either during snapshot deletion or file
|
|
|
|
* truncation, the corresponding back reference is found
|
|
|
|
* by searching for:
|
|
|
|
*
|
|
|
|
* (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
|
|
|
|
* inode objectid, offset in file)
|
|
|
|
*
|
|
|
|
* Btree extents can be referenced by:
|
|
|
|
*
|
|
|
|
* - Different subvolumes
|
|
|
|
* - Different generations of the same subvolume
|
|
|
|
*
|
|
|
|
* Storing sufficient information for a full reverse mapping of a btree
|
|
|
|
* block would require storing the lowest key of the block in the backref,
|
|
|
|
* and it would require updating that lowest key either before write out or
|
|
|
|
* every time it changed. Instead, the objectid of the lowest key is stored
|
|
|
|
* along with the level of the tree block. This provides a hint
|
|
|
|
* about where in the btree the block can be found. Searches through the
|
|
|
|
* btree only need to look for a pointer to that block, so they stop one
|
|
|
|
* level higher than the level recorded in the backref.
|
|
|
|
*
|
|
|
|
* Some btrees do not do reference counting on their extents. These
|
|
|
|
* include the extent tree and the tree of tree roots. Backrefs for these
|
|
|
|
* trees always have a generation of zero.
|
|
|
|
*
|
|
|
|
* When a tree block is created, back references are inserted:
|
|
|
|
*
|
2007-12-13 19:13:32 +03:00
|
|
|
* (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
|
2007-12-11 20:42:00 +03:00
|
|
|
*
|
|
|
|
* When a tree block is cow'd in a reference counted root,
|
|
|
|
* new back references are added for all the blocks it points to.
|
|
|
|
* These are of the form (trans->transid will have increased since creation):
|
|
|
|
*
|
2007-12-13 19:13:32 +03:00
|
|
|
* (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
|
2007-12-11 20:42:00 +03:00
|
|
|
*
|
|
|
|
* Because the lowest_key_objectid and the level are just hints
|
|
|
|
* they are not used when backrefs are deleted. When a backref is deleted:
|
|
|
|
*
|
|
|
|
* if backref was for a tree root:
|
|
|
|
* root_objectid = root->root_key.objectid
|
|
|
|
* else
|
|
|
|
* root_objectid = btrfs_header_owner(parent)
|
|
|
|
*
|
|
|
|
* (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
|
|
|
|
*
|
|
|
|
* Back Reference Key hashing:
|
|
|
|
*
|
|
|
|
* Back references have four fields, each 64 bits long. Unfortunately,
|
|
|
|
* This is hashed into a single 64 bit number and placed into the key offset.
|
|
|
|
* The key objectid corresponds to the first byte in the extent, and the
|
|
|
|
* key type is set to BTRFS_EXTENT_REF_KEY
|
|
|
|
*/
|
2007-12-11 17:25:06 +03:00
|
|
|
int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path, u64 bytenr,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset)
|
|
|
|
{
|
|
|
|
u64 hash;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_extent_ref ref;
|
|
|
|
struct btrfs_extent_ref *disk_ref;
|
2007-12-11 17:25:06 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_set_stack_ref_root(&ref, root_objectid);
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_set_stack_ref_generation(&ref, ref_generation);
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_set_stack_ref_objectid(&ref, owner);
|
|
|
|
btrfs_set_stack_ref_offset(&ref, owner_offset);
|
|
|
|
|
2007-12-11 17:25:06 +03:00
|
|
|
hash = hash_extent_ref(root_objectid, ref_generation, owner,
|
|
|
|
owner_offset);
|
2007-12-11 17:25:06 +03:00
|
|
|
key.offset = hash;
|
|
|
|
key.objectid = bytenr;
|
|
|
|
key.type = BTRFS_EXTENT_REF_KEY;
|
|
|
|
|
|
|
|
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
|
|
|
|
while (ret == -EEXIST) {
|
2007-12-11 17:25:06 +03:00
|
|
|
disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_extent_ref);
|
|
|
|
if (match_extent_ref(path->nodes[0], disk_ref, &ref))
|
|
|
|
goto out;
|
|
|
|
key.offset++;
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
ret = btrfs_insert_empty_item(trans, root, path, &key,
|
|
|
|
sizeof(ref));
|
2007-12-11 17:25:06 +03:00
|
|
|
}
|
2007-12-11 17:25:06 +03:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_extent_ref);
|
|
|
|
write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
|
|
|
|
sizeof(ref));
|
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
|
|
|
out:
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
return ret;
|
2007-12-11 17:25:06 +03:00
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
2007-04-17 21:26:50 +04:00
|
|
|
struct btrfs_root *root,
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 bytenr, u64 num_bytes,
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 root_objectid, u64 ref_generation,
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 owner, u64 owner_offset)
|
2007-03-03 00:08:05 +03:00
|
|
|
{
|
2007-04-02 19:20:42 +04:00
|
|
|
struct btrfs_path *path;
|
2007-03-03 00:08:05 +03:00
|
|
|
int ret;
|
2007-03-12 23:22:34 +03:00
|
|
|
struct btrfs_key key;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *l;
|
2007-03-13 17:46:10 +03:00
|
|
|
struct btrfs_extent_item *item;
|
2007-03-13 16:49:06 +03:00
|
|
|
u32 refs;
|
2007-03-07 19:50:24 +03:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
2007-04-02 19:20:42 +04:00
|
|
|
path = btrfs_alloc_path();
|
2007-06-22 22:16:25 +04:00
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
2007-08-09 04:17:12 +04:00
|
|
|
|
2008-04-21 20:01:38 +04:00
|
|
|
path->reada = 1;
|
2007-10-16 00:15:53 +04:00
|
|
|
key.objectid = bytenr;
|
2007-03-15 19:56:47 +03:00
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
2007-10-16 00:15:53 +04:00
|
|
|
key.offset = num_bytes;
|
2007-04-02 19:20:42 +04:00
|
|
|
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
|
2007-03-20 21:38:32 +03:00
|
|
|
0, 1);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2007-04-19 00:15:28 +04:00
|
|
|
if (ret != 0) {
|
2007-03-07 04:08:01 +03:00
|
|
|
BUG();
|
2007-04-19 00:15:28 +04:00
|
|
|
}
|
2007-03-03 00:08:05 +03:00
|
|
|
BUG_ON(ret != 0);
|
2007-10-16 00:14:19 +04:00
|
|
|
l = path->nodes[0];
|
2007-04-02 19:20:42 +04:00
|
|
|
item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
refs = btrfs_extent_refs(l, item);
|
|
|
|
btrfs_set_extent_refs(l, item, refs + 1);
|
2007-04-02 19:20:42 +04:00
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
2007-03-07 04:08:01 +03:00
|
|
|
|
2007-04-02 19:20:42 +04:00
|
|
|
btrfs_release_path(root->fs_info->extent_root, path);
|
2007-12-11 17:25:06 +03:00
|
|
|
|
2008-04-21 20:01:38 +04:00
|
|
|
path->reada = 1;
|
2007-12-11 17:25:06 +03:00
|
|
|
ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
|
|
|
|
path, bytenr, root_objectid,
|
|
|
|
ref_generation, owner, owner_offset);
|
|
|
|
BUG_ON(ret);
|
2007-03-20 21:38:32 +03:00
|
|
|
finish_current_insert(trans, root->fs_info->extent_root);
|
2007-03-22 19:13:20 +03:00
|
|
|
del_pending_extents(trans, root->fs_info->extent_root);
|
2007-12-11 17:25:06 +03:00
|
|
|
|
|
|
|
btrfs_free_path(path);
|
2007-03-03 00:08:05 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 bytenr, u64 num_bytes,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
|
|
|
|
root_objectid, ref_generation,
|
|
|
|
owner, owner_offset);
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-08-10 22:06:19 +04:00
|
|
|
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
finish_current_insert(trans, root->fs_info->extent_root);
|
|
|
|
del_pending_extents(trans, root->fs_info->extent_root);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-04-17 21:26:50 +04:00
|
|
|
static int lookup_extent_ref(struct btrfs_trans_handle *trans,
|
2007-10-16 00:15:53 +04:00
|
|
|
struct btrfs_root *root, u64 bytenr,
|
|
|
|
u64 num_bytes, u32 *refs)
|
2007-03-07 04:08:01 +03:00
|
|
|
{
|
2007-04-02 19:20:42 +04:00
|
|
|
struct btrfs_path *path;
|
2007-03-07 04:08:01 +03:00
|
|
|
int ret;
|
2007-03-12 23:22:34 +03:00
|
|
|
struct btrfs_key key;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *l;
|
2007-03-13 17:46:10 +03:00
|
|
|
struct btrfs_extent_item *item;
|
2007-04-02 19:20:42 +04:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
2007-04-02 19:20:42 +04:00
|
|
|
path = btrfs_alloc_path();
|
2008-04-21 20:01:38 +04:00
|
|
|
path->reada = 1;
|
2007-10-16 00:15:53 +04:00
|
|
|
key.objectid = bytenr;
|
|
|
|
key.offset = num_bytes;
|
2007-03-15 19:56:47 +03:00
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
2007-04-02 19:20:42 +04:00
|
|
|
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
|
2007-03-20 21:38:32 +03:00
|
|
|
0, 0);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2007-10-16 00:14:19 +04:00
|
|
|
if (ret != 0) {
|
|
|
|
btrfs_print_leaf(root, path->nodes[0]);
|
2007-10-16 00:15:53 +04:00
|
|
|
printk("failed to find block number %Lu\n", bytenr);
|
2007-03-07 04:08:01 +03:00
|
|
|
BUG();
|
2007-10-16 00:14:19 +04:00
|
|
|
}
|
|
|
|
l = path->nodes[0];
|
2007-04-02 19:20:42 +04:00
|
|
|
item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
*refs = btrfs_extent_refs(l, item);
|
2007-06-22 22:16:25 +04:00
|
|
|
out:
|
2007-04-02 19:20:42 +04:00
|
|
|
btrfs_free_path(path);
|
2007-03-07 04:08:01 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
|
|
|
|
static int get_reference_status(struct btrfs_root *root, u64 bytenr,
|
|
|
|
u64 parent_gen, u64 ref_objectid,
|
|
|
|
u64 *min_generation, u32 *ref_count)
|
2007-12-18 04:14:01 +03:00
|
|
|
{
|
|
|
|
struct btrfs_root *extent_root = root->fs_info->extent_root;
|
|
|
|
struct btrfs_path *path;
|
2008-07-30 17:26:11 +04:00
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_extent_ref *ref_item;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
2008-01-03 17:08:27 +03:00
|
|
|
u64 root_objectid = root->root_key.objectid;
|
2008-07-30 17:26:11 +04:00
|
|
|
u64 ref_generation;
|
2007-12-18 04:14:01 +03:00
|
|
|
u32 nritems;
|
|
|
|
int ret;
|
2008-06-26 00:01:30 +04:00
|
|
|
|
2007-12-18 04:14:01 +03:00
|
|
|
key.objectid = bytenr;
|
|
|
|
key.offset = 0;
|
2008-07-30 17:26:11 +04:00
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
2007-12-18 04:14:01 +03:00
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2007-12-18 04:14:01 +03:00
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
BUG_ON(ret == 0);
|
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2007-12-18 04:14:01 +03:00
|
|
|
|
|
|
|
if (found_key.objectid != bytenr ||
|
|
|
|
found_key.type != BTRFS_EXTENT_ITEM_KEY) {
|
2008-07-30 17:26:11 +04:00
|
|
|
ret = 1;
|
2007-12-18 04:14:01 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
*ref_count = 0;
|
|
|
|
*min_generation = (u64)-1;
|
|
|
|
|
2007-12-18 04:14:01 +03:00
|
|
|
while (1) {
|
2008-07-30 17:26:11 +04:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
2007-12-18 04:14:01 +03:00
|
|
|
if (path->slots[0] >= nritems) {
|
|
|
|
ret = btrfs_next_leaf(extent_root, path);
|
2008-07-30 17:26:11 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2007-12-18 04:14:01 +03:00
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2008-07-30 17:26:11 +04:00
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2007-12-18 04:14:01 +03:00
|
|
|
if (found_key.objectid != bytenr)
|
|
|
|
break;
|
2008-01-03 21:23:19 +03:00
|
|
|
|
2007-12-18 04:14:01 +03:00
|
|
|
if (found_key.type != BTRFS_EXTENT_REF_KEY) {
|
|
|
|
path->slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
ref_item = btrfs_item_ptr(leaf, path->slots[0],
|
2007-12-18 04:14:01 +03:00
|
|
|
struct btrfs_extent_ref);
|
2008-07-30 17:26:11 +04:00
|
|
|
ref_generation = btrfs_ref_generation(leaf, ref_item);
|
|
|
|
/*
|
|
|
|
* For (parent_gen > 0 && parent_gen > ref_gen):
|
|
|
|
*
|
2008-07-31 00:29:20 +04:00
|
|
|
* we reach here through the oldest root, therefore
|
|
|
|
* all other reference from same snapshot should have
|
2008-07-30 17:26:11 +04:00
|
|
|
* a larger generation.
|
|
|
|
*/
|
|
|
|
if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
|
|
|
|
(parent_gen > 0 && parent_gen > ref_generation) ||
|
|
|
|
(ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
|
|
|
|
ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
|
|
|
|
if (ref_count)
|
|
|
|
*ref_count = 2;
|
|
|
|
break;
|
2008-05-08 22:11:56 +04:00
|
|
|
}
|
2008-07-30 17:26:11 +04:00
|
|
|
|
|
|
|
*ref_count = 1;
|
|
|
|
if (*min_generation > ref_generation)
|
|
|
|
*min_generation = ref_generation;
|
|
|
|
|
2007-12-18 04:14:01 +03:00
|
|
|
path->slots[0]++;
|
|
|
|
}
|
2008-07-30 17:26:11 +04:00
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-08-05 21:05:02 +04:00
|
|
|
int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
2008-07-30 17:26:11 +04:00
|
|
|
struct btrfs_key *key, u64 bytenr)
|
|
|
|
{
|
|
|
|
struct btrfs_root *old_root;
|
|
|
|
struct btrfs_path *path = NULL;
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
struct btrfs_file_extent_item *item;
|
|
|
|
u64 ref_generation;
|
|
|
|
u64 min_generation;
|
|
|
|
u64 extent_start;
|
|
|
|
u32 ref_count;
|
|
|
|
int level;
|
|
|
|
int ret;
|
|
|
|
|
2008-08-05 21:05:02 +04:00
|
|
|
BUG_ON(trans == NULL);
|
2008-07-30 17:26:11 +04:00
|
|
|
BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
|
|
|
|
ret = get_reference_status(root, bytenr, 0, key->objectid,
|
|
|
|
&min_generation, &ref_count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ref_count != 1)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
old_root = root->dirty_root->root;
|
|
|
|
ref_generation = old_root->root_key.offset;
|
|
|
|
|
|
|
|
/* all references are created in running transaction */
|
|
|
|
if (min_generation > ref_generation) {
|
|
|
|
ret = 0;
|
2008-05-09 00:31:21 +04:00
|
|
|
goto out;
|
|
|
|
}
|
2008-07-30 17:26:11 +04:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path) {
|
|
|
|
ret = -ENOMEM;
|
2007-12-18 04:14:01 +03:00
|
|
|
goto out;
|
|
|
|
}
|
2008-07-30 17:26:11 +04:00
|
|
|
|
|
|
|
path->skip_locking = 1;
|
|
|
|
/* if no item found, the extent is referenced by other snapshot */
|
|
|
|
ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
|
|
|
|
if (ret)
|
2007-12-18 04:14:01 +03:00
|
|
|
goto out;
|
|
|
|
|
2008-07-30 17:26:11 +04:00
|
|
|
eb = path->nodes[0];
|
|
|
|
item = btrfs_item_ptr(eb, path->slots[0],
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
|
|
|
|
btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
|
|
|
|
if (level >= 0) {
|
|
|
|
eb = path->nodes[level];
|
|
|
|
if (!eb)
|
|
|
|
continue;
|
|
|
|
extent_start = eb->start;
|
2008-07-31 00:29:20 +04:00
|
|
|
} else
|
2008-07-30 17:26:11 +04:00
|
|
|
extent_start = bytenr;
|
|
|
|
|
|
|
|
ret = get_reference_status(root, extent_start, ref_generation,
|
|
|
|
0, &min_generation, &ref_count);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ref_count != 1) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (level >= 0)
|
|
|
|
ref_generation = btrfs_header_generation(eb);
|
|
|
|
}
|
|
|
|
ret = 0;
|
2007-12-18 04:14:01 +03:00
|
|
|
out:
|
2008-07-30 17:26:11 +04:00
|
|
|
if (path)
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
2007-12-18 04:14:01 +03:00
|
|
|
}
|
2007-04-10 17:27:04 +04:00
|
|
|
|
2007-03-16 23:20:31 +03:00
|
|
|
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
2008-07-28 23:32:19 +04:00
|
|
|
struct extent_buffer *buf, int cache_ref)
|
2007-03-03 00:08:05 +03:00
|
|
|
{
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 bytenr;
|
2007-10-16 00:14:19 +04:00
|
|
|
u32 nritems;
|
|
|
|
struct btrfs_key key;
|
2007-03-27 14:33:00 +04:00
|
|
|
struct btrfs_file_extent_item *fi;
|
2007-03-03 00:08:05 +03:00
|
|
|
int i;
|
2007-10-16 00:15:53 +04:00
|
|
|
int level;
|
2007-03-27 14:33:00 +04:00
|
|
|
int ret;
|
2007-06-22 22:16:25 +04:00
|
|
|
int faili;
|
2008-07-28 23:32:19 +04:00
|
|
|
int nr_file_extents = 0;
|
2007-03-07 04:08:01 +03:00
|
|
|
|
2007-03-13 23:47:54 +03:00
|
|
|
if (!root->ref_cows)
|
2007-03-07 04:08:01 +03:00
|
|
|
return 0;
|
2007-10-16 00:14:19 +04:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
level = btrfs_header_level(buf);
|
2007-10-16 00:14:19 +04:00
|
|
|
nritems = btrfs_header_nritems(buf);
|
|
|
|
for (i = 0; i < nritems; i++) {
|
2008-07-22 20:08:37 +04:00
|
|
|
cond_resched();
|
2007-10-16 00:15:53 +04:00
|
|
|
if (level == 0) {
|
|
|
|
u64 disk_bytenr;
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_item_key_to_cpu(buf, &key, i);
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
|
2007-03-27 14:33:00 +04:00
|
|
|
continue;
|
2007-10-16 00:14:19 +04:00
|
|
|
fi = btrfs_item_ptr(buf, i,
|
2007-03-27 14:33:00 +04:00
|
|
|
struct btrfs_file_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
if (btrfs_file_extent_type(buf, fi) ==
|
2007-04-19 21:37:44 +04:00
|
|
|
BTRFS_FILE_EXTENT_INLINE)
|
|
|
|
continue;
|
2007-10-16 00:15:53 +04:00
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
|
|
|
|
if (disk_bytenr == 0)
|
2007-05-24 21:35:57 +04:00
|
|
|
continue;
|
2008-07-21 18:29:44 +04:00
|
|
|
|
2008-07-28 23:32:19 +04:00
|
|
|
if (buf != root->commit_root)
|
|
|
|
nr_file_extents++;
|
|
|
|
|
2008-07-21 18:29:44 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_file_extent_disk_num_bytes(buf, fi),
|
|
|
|
root->root_key.objectid, trans->transid,
|
|
|
|
key.objectid, key.offset);
|
2008-07-21 18:29:44 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret) {
|
|
|
|
faili = i;
|
2008-07-21 18:29:44 +04:00
|
|
|
WARN_ON(1);
|
2007-06-22 22:16:25 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2007-03-27 14:33:00 +04:00
|
|
|
} else {
|
2007-10-16 00:15:53 +04:00
|
|
|
bytenr = btrfs_node_blockptr(buf, i);
|
2007-12-13 17:48:07 +03:00
|
|
|
btrfs_node_key_to_cpu(buf, &key, i);
|
2008-07-21 18:29:44 +04:00
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = __btrfs_inc_extent_ref(trans, root, bytenr,
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_level_size(root, level - 1),
|
|
|
|
root->root_key.objectid,
|
2007-12-13 19:13:32 +03:00
|
|
|
trans->transid,
|
|
|
|
level - 1, key.objectid);
|
2008-07-21 18:29:44 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret) {
|
|
|
|
faili = i;
|
2008-07-21 18:29:44 +04:00
|
|
|
WARN_ON(1);
|
2007-06-22 22:16:25 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2007-03-27 14:33:00 +04:00
|
|
|
}
|
2007-03-03 00:08:05 +03:00
|
|
|
}
|
2008-07-28 23:32:19 +04:00
|
|
|
/* cache orignal leaf block's references */
|
|
|
|
if (level == 0 && cache_ref && buf != root->commit_root) {
|
|
|
|
struct btrfs_leaf_ref *ref;
|
|
|
|
struct btrfs_extent_info *info;
|
|
|
|
|
2008-07-31 00:29:20 +04:00
|
|
|
ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
|
2008-07-28 23:32:19 +04:00
|
|
|
if (!ref) {
|
|
|
|
WARN_ON(1);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-07-31 17:46:18 +04:00
|
|
|
ref->root_gen = root->root_key.offset;
|
2008-07-28 23:32:19 +04:00
|
|
|
ref->bytenr = buf->start;
|
|
|
|
ref->owner = btrfs_header_owner(buf);
|
|
|
|
ref->generation = btrfs_header_generation(buf);
|
|
|
|
ref->nritems = nr_file_extents;
|
|
|
|
info = ref->extents;
|
2008-07-31 00:29:20 +04:00
|
|
|
|
2008-07-28 23:32:19 +04:00
|
|
|
for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
|
|
|
|
u64 disk_bytenr;
|
|
|
|
btrfs_item_key_to_cpu(buf, &key, i);
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
|
|
|
|
continue;
|
|
|
|
fi = btrfs_item_ptr(buf, i,
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
if (btrfs_file_extent_type(buf, fi) ==
|
|
|
|
BTRFS_FILE_EXTENT_INLINE)
|
|
|
|
continue;
|
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
|
|
|
|
if (disk_bytenr == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
info->bytenr = disk_bytenr;
|
|
|
|
info->num_bytes =
|
|
|
|
btrfs_file_extent_disk_num_bytes(buf, fi);
|
|
|
|
info->objectid = key.objectid;
|
|
|
|
info->offset = key.offset;
|
|
|
|
info++;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!root->ref_tree);
|
|
|
|
ret = btrfs_add_leaf_ref(root, ref);
|
|
|
|
WARN_ON(ret);
|
2008-07-31 00:29:20 +04:00
|
|
|
btrfs_free_leaf_ref(root, ref);
|
2008-07-28 23:32:19 +04:00
|
|
|
}
|
|
|
|
out:
|
2007-03-03 00:08:05 +03:00
|
|
|
return 0;
|
2007-06-22 22:16:25 +04:00
|
|
|
fail:
|
2007-06-28 23:57:36 +04:00
|
|
|
WARN_ON(1);
|
2007-12-11 17:25:06 +03:00
|
|
|
#if 0
|
2007-06-22 22:16:25 +04:00
|
|
|
for (i =0; i < faili; i++) {
|
2007-10-16 00:15:53 +04:00
|
|
|
if (level == 0) {
|
|
|
|
u64 disk_bytenr;
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_item_key_to_cpu(buf, &key, i);
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
|
2007-06-22 22:16:25 +04:00
|
|
|
continue;
|
2007-10-16 00:14:19 +04:00
|
|
|
fi = btrfs_item_ptr(buf, i,
|
2007-06-22 22:16:25 +04:00
|
|
|
struct btrfs_file_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
if (btrfs_file_extent_type(buf, fi) ==
|
2007-06-22 22:16:25 +04:00
|
|
|
BTRFS_FILE_EXTENT_INLINE)
|
|
|
|
continue;
|
2007-10-16 00:15:53 +04:00
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
|
|
|
|
if (disk_bytenr == 0)
|
2007-06-22 22:16:25 +04:00
|
|
|
continue;
|
2007-10-16 00:15:53 +04:00
|
|
|
err = btrfs_free_extent(trans, root, disk_bytenr,
|
|
|
|
btrfs_file_extent_disk_num_bytes(buf,
|
2007-10-16 00:14:19 +04:00
|
|
|
fi), 0);
|
2007-06-22 22:16:25 +04:00
|
|
|
BUG_ON(err);
|
|
|
|
} else {
|
2007-10-16 00:15:53 +04:00
|
|
|
bytenr = btrfs_node_blockptr(buf, i);
|
|
|
|
err = btrfs_free_extent(trans, root, bytenr,
|
|
|
|
btrfs_level_size(root, level - 1), 0);
|
2007-06-22 22:16:25 +04:00
|
|
|
BUG_ON(err);
|
|
|
|
}
|
|
|
|
}
|
2007-12-11 17:25:06 +03:00
|
|
|
#endif
|
2007-06-22 22:16:25 +04:00
|
|
|
return ret;
|
2007-03-03 00:08:05 +03:00
|
|
|
}
|
|
|
|
|
2007-04-27 00:46:15 +04:00
|
|
|
static int write_one_cache_group(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_block_group_cache *cache)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int pending_ret;
|
|
|
|
struct btrfs_root *extent_root = root->fs_info->extent_root;
|
2007-10-16 00:14:19 +04:00
|
|
|
unsigned long bi;
|
|
|
|
struct extent_buffer *leaf;
|
2007-04-27 00:46:15 +04:00
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2007-04-27 00:46:15 +04:00
|
|
|
BUG_ON(ret);
|
2007-10-16 00:14:19 +04:00
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
|
|
|
|
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
2007-04-27 00:46:15 +04:00
|
|
|
btrfs_release_path(extent_root, path);
|
2007-06-22 22:16:25 +04:00
|
|
|
fail:
|
2007-04-27 00:46:15 +04:00
|
|
|
finish_current_insert(trans, extent_root);
|
|
|
|
pending_ret = del_pending_extents(trans, extent_root);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (pending_ret)
|
|
|
|
return pending_ret;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-10-16 00:15:19 +04:00
|
|
|
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root)
|
2007-04-27 00:46:15 +04:00
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache, *entry;
|
|
|
|
struct rb_node *n;
|
2007-04-27 00:46:15 +04:00
|
|
|
int err = 0;
|
|
|
|
int werr = 0;
|
|
|
|
struct btrfs_path *path;
|
2007-10-16 00:15:19 +04:00
|
|
|
u64 last = 0;
|
2007-04-27 00:46:15 +04:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2007-04-27 00:46:15 +04:00
|
|
|
while(1) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = NULL;
|
|
|
|
spin_lock(&root->fs_info->block_group_cache_lock);
|
|
|
|
for (n = rb_first(&root->fs_info->block_group_cache_tree);
|
|
|
|
n; n = rb_next(n)) {
|
|
|
|
entry = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
|
cache_node);
|
|
|
|
if (entry->dirty) {
|
|
|
|
cache = entry;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&root->fs_info->block_group_cache_lock);
|
2007-06-22 22:16:25 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (!cache)
|
2007-10-16 00:15:19 +04:00
|
|
|
break;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
last += cache->key.offset;
|
|
|
|
|
2007-10-16 00:15:19 +04:00
|
|
|
err = write_one_cache_group(trans, root,
|
|
|
|
path, cache);
|
|
|
|
/*
|
|
|
|
* if we fail to write the cache group, we want
|
|
|
|
* to keep it marked dirty in hopes that a later
|
|
|
|
* write will work
|
|
|
|
*/
|
|
|
|
if (err) {
|
|
|
|
werr = err;
|
|
|
|
continue;
|
2007-04-27 00:46:15 +04:00
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
cache->dirty = 0;
|
2007-04-27 00:46:15 +04:00
|
|
|
}
|
|
|
|
btrfs_free_path(path);
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-04-27 00:46:15 +04:00
|
|
|
return werr;
|
|
|
|
}
|
|
|
|
|
2008-03-25 23:50:33 +03:00
|
|
|
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|
|
|
u64 total_bytes, u64 bytes_used,
|
|
|
|
struct btrfs_space_info **space_info)
|
|
|
|
{
|
|
|
|
struct btrfs_space_info *found;
|
|
|
|
|
|
|
|
found = __find_space_info(info, flags);
|
|
|
|
if (found) {
|
|
|
|
found->total_bytes += total_bytes;
|
|
|
|
found->bytes_used += bytes_used;
|
2008-04-26 00:53:30 +04:00
|
|
|
found->full = 0;
|
2008-03-25 23:50:33 +03:00
|
|
|
*space_info = found;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
found = kmalloc(sizeof(*found), GFP_NOFS);
|
|
|
|
if (!found)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
list_add(&found->list, &info->space_info);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
INIT_LIST_HEAD(&found->block_groups);
|
|
|
|
spin_lock_init(&found->lock);
|
2008-03-25 23:50:33 +03:00
|
|
|
found->flags = flags;
|
|
|
|
found->total_bytes = total_bytes;
|
|
|
|
found->bytes_used = bytes_used;
|
|
|
|
found->bytes_pinned = 0;
|
|
|
|
found->full = 0;
|
2008-05-24 22:04:53 +04:00
|
|
|
found->force_alloc = 0;
|
2008-03-25 23:50:33 +03:00
|
|
|
*space_info = found;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-04 00:29:03 +04:00
|
|
|
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
|
|
|
|
{
|
|
|
|
u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
|
2008-04-04 00:29:03 +04:00
|
|
|
BTRFS_BLOCK_GROUP_RAID1 |
|
2008-04-16 18:49:51 +04:00
|
|
|
BTRFS_BLOCK_GROUP_RAID10 |
|
2008-04-04 00:29:03 +04:00
|
|
|
BTRFS_BLOCK_GROUP_DUP);
|
2008-04-04 00:29:03 +04:00
|
|
|
if (extra_flags) {
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
|
fs_info->avail_data_alloc_bits |= extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
|
fs_info->avail_metadata_alloc_bits |= extra_flags;
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
|
|
|
fs_info->avail_system_alloc_bits |= extra_flags;
|
|
|
|
}
|
|
|
|
}
|
2008-03-25 23:50:33 +03:00
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
|
2008-04-28 23:29:52 +04:00
|
|
|
{
|
2008-05-07 19:43:44 +04:00
|
|
|
u64 num_devices = root->fs_info->fs_devices->num_devices;
|
|
|
|
|
|
|
|
if (num_devices == 1)
|
|
|
|
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
|
|
|
|
if (num_devices < 4)
|
|
|
|
flags &= ~BTRFS_BLOCK_GROUP_RAID10;
|
|
|
|
|
2008-04-28 23:29:52 +04:00
|
|
|
if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
|
|
|
|
(flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
2008-05-07 19:43:44 +04:00
|
|
|
BTRFS_BLOCK_GROUP_RAID10))) {
|
2008-04-28 23:29:52 +04:00
|
|
|
flags &= ~BTRFS_BLOCK_GROUP_DUP;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2008-04-28 23:29:52 +04:00
|
|
|
|
|
|
|
if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
|
2008-05-07 19:43:44 +04:00
|
|
|
(flags & BTRFS_BLOCK_GROUP_RAID10)) {
|
2008-04-28 23:29:52 +04:00
|
|
|
flags &= ~BTRFS_BLOCK_GROUP_RAID1;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2008-04-28 23:29:52 +04:00
|
|
|
|
|
|
|
if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
|
|
|
|
((flags & BTRFS_BLOCK_GROUP_RAID1) |
|
|
|
|
(flags & BTRFS_BLOCK_GROUP_RAID10) |
|
|
|
|
(flags & BTRFS_BLOCK_GROUP_DUP)))
|
|
|
|
flags &= ~BTRFS_BLOCK_GROUP_RAID0;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *extent_root, u64 alloc_bytes,
|
2008-05-24 22:04:53 +04:00
|
|
|
u64 flags, int force)
|
2008-03-24 22:01:59 +03:00
|
|
|
{
|
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
u64 thresh;
|
|
|
|
u64 start;
|
|
|
|
u64 num_bytes;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
int ret = 0;
|
2008-03-24 22:01:59 +03:00
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
flags = reduce_alloc_profile(extent_root, flags);
|
2008-04-28 23:29:52 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
space_info = __find_space_info(extent_root->fs_info, flags);
|
2008-03-25 23:50:33 +03:00
|
|
|
if (!space_info) {
|
|
|
|
ret = update_space_info(extent_root->fs_info, flags,
|
|
|
|
0, 0, &space_info);
|
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
2008-03-24 22:01:59 +03:00
|
|
|
BUG_ON(!space_info);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (space_info->force_alloc) {
|
|
|
|
force = 1;
|
|
|
|
space_info->force_alloc = 0;
|
|
|
|
}
|
2008-03-24 22:01:59 +03:00
|
|
|
if (space_info->full)
|
2008-06-26 00:01:30 +04:00
|
|
|
goto out;
|
2008-03-24 22:01:59 +03:00
|
|
|
|
2008-04-04 00:29:03 +04:00
|
|
|
thresh = div_factor(space_info->total_bytes, 6);
|
2008-05-24 22:04:53 +04:00
|
|
|
if (!force &&
|
|
|
|
(space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
|
2008-03-24 22:01:59 +03:00
|
|
|
thresh)
|
2008-06-26 00:01:30 +04:00
|
|
|
goto out;
|
2008-03-24 22:01:59 +03:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_lock(&extent_root->fs_info->chunk_mutex);
|
2008-03-24 22:01:59 +03:00
|
|
|
ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
|
|
|
|
if (ret == -ENOSPC) {
|
|
|
|
printk("space info full %Lu\n", flags);
|
|
|
|
space_info->full = 1;
|
2008-06-26 00:01:31 +04:00
|
|
|
goto out_unlock;
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
|
2008-04-15 23:41:47 +04:00
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
|
2008-03-24 22:01:59 +03:00
|
|
|
BUG_ON(ret);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2008-06-26 00:01:31 +04:00
|
|
|
out_unlock:
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&extent_root->fs_info->chunk_mutex);
|
2008-06-26 00:01:31 +04:00
|
|
|
out:
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
return ret;
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
|
|
|
|
2007-04-27 00:46:15 +04:00
|
|
|
static int update_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 bytenr, u64 num_bytes, int alloc,
|
2008-03-24 22:01:56 +03:00
|
|
|
int mark_free)
|
2007-04-27 00:46:15 +04:00
|
|
|
{
|
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 total = num_bytes;
|
2007-04-27 00:46:15 +04:00
|
|
|
u64 old_val;
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 byte_in_group;
|
2007-05-08 04:03:49 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2007-04-27 00:46:15 +04:00
|
|
|
while(total) {
|
2007-10-16 00:15:53 +04:00
|
|
|
cache = btrfs_lookup_block_group(info, bytenr);
|
2007-05-08 04:03:49 +04:00
|
|
|
if (!cache) {
|
2007-04-27 00:46:15 +04:00
|
|
|
return -1;
|
2007-04-27 18:08:34 +04:00
|
|
|
}
|
2007-10-16 00:15:53 +04:00
|
|
|
byte_in_group = bytenr - cache->key.objectid;
|
|
|
|
WARN_ON(byte_in_group > cache->key.offset);
|
2007-04-27 00:46:15 +04:00
|
|
|
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&cache->lock);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache->dirty = 1;
|
2007-04-27 00:46:15 +04:00
|
|
|
old_val = btrfs_block_group_used(&cache->item);
|
2007-10-16 00:15:53 +04:00
|
|
|
num_bytes = min(total, cache->key.offset - byte_in_group);
|
2007-04-27 18:08:34 +04:00
|
|
|
if (alloc) {
|
2007-10-16 00:15:53 +04:00
|
|
|
old_val += num_bytes;
|
2008-03-24 22:01:59 +03:00
|
|
|
cache->space_info->bytes_used += num_bytes;
|
2008-07-23 07:06:41 +04:00
|
|
|
btrfs_set_block_group_used(&cache->item, old_val);
|
|
|
|
spin_unlock(&cache->lock);
|
2007-04-27 18:08:34 +04:00
|
|
|
} else {
|
2007-10-16 00:15:53 +04:00
|
|
|
old_val -= num_bytes;
|
2008-03-24 22:01:59 +03:00
|
|
|
cache->space_info->bytes_used -= num_bytes;
|
2008-07-23 07:06:41 +04:00
|
|
|
btrfs_set_block_group_used(&cache->item, old_val);
|
|
|
|
spin_unlock(&cache->lock);
|
2007-10-16 00:14:48 +04:00
|
|
|
if (mark_free) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
int ret;
|
|
|
|
ret = btrfs_add_free_space(cache, bytenr,
|
|
|
|
num_bytes);
|
|
|
|
if (ret)
|
|
|
|
return -1;
|
2007-05-10 04:13:14 +04:00
|
|
|
}
|
2007-04-27 18:08:34 +04:00
|
|
|
}
|
2007-10-16 00:15:53 +04:00
|
|
|
total -= num_bytes;
|
|
|
|
bytenr += num_bytes;
|
2007-04-27 00:46:15 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2008-03-24 22:01:59 +03:00
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
|
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
|
|
cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
|
|
|
|
if (!cache)
|
2008-05-07 19:43:44 +04:00
|
|
|
return 0;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
return cache->key.objectid;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
int btrfs_update_pinned_extents(struct btrfs_root *root,
|
2007-11-16 22:57:08 +03:00
|
|
|
u64 bytenr, u64 num, int pin)
|
|
|
|
{
|
|
|
|
u64 len;
|
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2007-11-16 22:57:08 +03:00
|
|
|
if (pin) {
|
|
|
|
set_extent_dirty(&fs_info->pinned_extents,
|
|
|
|
bytenr, bytenr + num - 1, GFP_NOFS);
|
|
|
|
} else {
|
|
|
|
clear_extent_dirty(&fs_info->pinned_extents,
|
|
|
|
bytenr, bytenr + num - 1, GFP_NOFS);
|
|
|
|
}
|
|
|
|
while (num > 0) {
|
|
|
|
cache = btrfs_lookup_block_group(fs_info, bytenr);
|
2008-05-07 19:43:44 +04:00
|
|
|
if (!cache) {
|
|
|
|
u64 first = first_logical_byte(root, bytenr);
|
|
|
|
WARN_ON(first < bytenr);
|
|
|
|
len = min(first - bytenr, num);
|
|
|
|
} else {
|
|
|
|
len = min(num, cache->key.offset -
|
|
|
|
(bytenr - cache->key.objectid));
|
|
|
|
}
|
2007-11-16 22:57:08 +03:00
|
|
|
if (pin) {
|
2008-05-07 19:43:44 +04:00
|
|
|
if (cache) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&cache->lock);
|
2008-05-07 19:43:44 +04:00
|
|
|
cache->pinned += len;
|
|
|
|
cache->space_info->bytes_pinned += len;
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&cache->lock);
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-11-16 22:57:08 +03:00
|
|
|
fs_info->total_pinned += len;
|
|
|
|
} else {
|
2008-05-07 19:43:44 +04:00
|
|
|
if (cache) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&cache->lock);
|
2008-05-07 19:43:44 +04:00
|
|
|
cache->pinned -= len;
|
|
|
|
cache->space_info->bytes_pinned -= len;
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&cache->lock);
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-11-16 22:57:08 +03:00
|
|
|
fs_info->total_pinned -= len;
|
|
|
|
}
|
|
|
|
bytenr += len;
|
|
|
|
num -= len;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2007-04-27 00:46:15 +04:00
|
|
|
|
2008-01-25 00:13:08 +03:00
|
|
|
int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
|
2007-06-28 23:57:36 +04:00
|
|
|
{
|
|
|
|
u64 last = 0;
|
2007-10-16 00:15:26 +04:00
|
|
|
u64 start;
|
|
|
|
u64 end;
|
2008-01-25 00:13:08 +03:00
|
|
|
struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
|
2007-06-28 23:57:36 +04:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
while(1) {
|
2007-10-16 00:15:26 +04:00
|
|
|
ret = find_first_extent_bit(pinned_extents, last,
|
|
|
|
&start, &end, EXTENT_DIRTY);
|
|
|
|
if (ret)
|
2007-06-28 23:57:36 +04:00
|
|
|
break;
|
2007-10-16 00:15:26 +04:00
|
|
|
set_extent_dirty(copy, start, end, GFP_NOFS);
|
|
|
|
last = end + 1;
|
2007-06-28 23:57:36 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
2008-01-25 00:13:08 +03:00
|
|
|
struct extent_io_tree *unpin)
|
2007-03-07 04:08:01 +03:00
|
|
|
{
|
2007-10-16 00:15:26 +04:00
|
|
|
u64 start;
|
|
|
|
u64 end;
|
2007-03-07 04:08:01 +03:00
|
|
|
int ret;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
2007-03-07 04:08:01 +03:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2007-03-07 04:08:01 +03:00
|
|
|
while(1) {
|
2007-10-16 00:15:26 +04:00
|
|
|
ret = find_first_extent_bit(unpin, 0, &start, &end,
|
|
|
|
EXTENT_DIRTY);
|
|
|
|
if (ret)
|
2007-03-07 04:08:01 +03:00
|
|
|
break;
|
2008-09-06 00:13:11 +04:00
|
|
|
btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
|
2007-10-16 00:15:26 +04:00
|
|
|
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = btrfs_lookup_block_group(root->fs_info, start);
|
|
|
|
if (cache->cached)
|
|
|
|
btrfs_add_free_space(cache, start, end - start + 1);
|
2008-07-23 07:06:41 +04:00
|
|
|
if (need_resched()) {
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
cond_resched();
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
}
|
2007-03-07 04:08:01 +03:00
|
|
|
}
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-03-07 04:08:01 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-03 18:01:48 +03:00
|
|
|
static int finish_current_insert(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *extent_root)
|
2007-03-07 19:50:24 +03:00
|
|
|
{
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 start;
|
|
|
|
u64 end;
|
|
|
|
struct btrfs_fs_info *info = extent_root->fs_info;
|
2007-12-11 20:42:00 +03:00
|
|
|
struct extent_buffer *eb;
|
2007-12-11 17:25:06 +03:00
|
|
|
struct btrfs_path *path;
|
2007-03-12 23:22:34 +03:00
|
|
|
struct btrfs_key ins;
|
2007-12-11 20:42:00 +03:00
|
|
|
struct btrfs_disk_key first;
|
2007-03-13 17:46:10 +03:00
|
|
|
struct btrfs_extent_item extent_item;
|
2007-03-07 19:50:24 +03:00
|
|
|
int ret;
|
2007-12-11 20:42:00 +03:00
|
|
|
int level;
|
2007-10-16 00:15:26 +04:00
|
|
|
int err = 0;
|
2007-03-07 19:50:24 +03:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_set_stack_extent_refs(&extent_item, 1);
|
2007-03-15 19:56:47 +03:00
|
|
|
btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
|
2007-12-11 17:25:06 +03:00
|
|
|
path = btrfs_alloc_path();
|
2007-03-07 19:50:24 +03:00
|
|
|
|
2007-08-09 04:17:12 +04:00
|
|
|
while(1) {
|
2007-10-16 00:15:26 +04:00
|
|
|
ret = find_first_extent_bit(&info->extent_ins, 0, &start,
|
|
|
|
&end, EXTENT_LOCKED);
|
|
|
|
if (ret)
|
2007-08-09 04:17:12 +04:00
|
|
|
break;
|
|
|
|
|
2007-10-16 00:15:26 +04:00
|
|
|
ins.objectid = start;
|
|
|
|
ins.offset = end + 1 - start;
|
|
|
|
err = btrfs_insert_item(trans, extent_root, &ins,
|
|
|
|
&extent_item, sizeof(extent_item));
|
|
|
|
clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
|
|
|
|
GFP_NOFS);
|
2008-07-23 07:06:41 +04:00
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
eb = btrfs_find_create_tree_block(extent_root, ins.objectid,
|
2008-07-23 07:06:41 +04:00
|
|
|
ins.offset);
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
if (!btrfs_buffer_uptodate(eb, trans->transid))
|
2008-07-23 07:06:41 +04:00
|
|
|
btrfs_read_buffer(eb, trans->transid);
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_lock(eb);
|
2007-12-11 20:42:00 +03:00
|
|
|
level = btrfs_header_level(eb);
|
|
|
|
if (level == 0) {
|
|
|
|
btrfs_item_key(eb, &first, 0);
|
|
|
|
} else {
|
|
|
|
btrfs_node_key(eb, &first, 0);
|
|
|
|
}
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_unlock(eb);
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
/*
|
|
|
|
* the first key is just a hint, so the race we've created
|
|
|
|
* against reading it is fine
|
|
|
|
*/
|
2007-12-11 17:25:06 +03:00
|
|
|
err = btrfs_insert_extent_backref(trans, extent_root, path,
|
|
|
|
start, extent_root->root_key.objectid,
|
2007-12-13 19:13:32 +03:00
|
|
|
0, level,
|
|
|
|
btrfs_disk_key_objectid(&first));
|
2007-12-11 17:25:06 +03:00
|
|
|
BUG_ON(err);
|
2008-07-23 07:06:41 +04:00
|
|
|
if (need_resched()) {
|
|
|
|
mutex_unlock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
cond_resched();
|
|
|
|
mutex_lock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
}
|
2007-03-07 19:50:24 +03:00
|
|
|
}
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_free_path(path);
|
2007-03-07 19:50:24 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
|
2008-09-08 19:18:08 +04:00
|
|
|
int is_data, int pending)
|
2007-03-22 19:13:20 +03:00
|
|
|
{
|
2007-10-16 00:15:26 +04:00
|
|
|
int err = 0;
|
2007-03-26 18:15:30 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2007-03-27 19:05:53 +04:00
|
|
|
if (!pending) {
|
2008-06-26 00:01:30 +04:00
|
|
|
struct extent_buffer *buf;
|
2008-09-08 19:18:08 +04:00
|
|
|
|
|
|
|
if (is_data)
|
|
|
|
goto pinit;
|
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
buf = btrfs_find_tree_block(root, bytenr, num_bytes);
|
2007-10-16 00:14:19 +04:00
|
|
|
if (buf) {
|
2008-09-06 00:13:11 +04:00
|
|
|
/* we can reuse a block if it hasn't been written
|
|
|
|
* and it is from this transaction. We can't
|
|
|
|
* reuse anything from the tree log root because
|
|
|
|
* it has tiny sub-transactions.
|
|
|
|
*/
|
2008-07-24 20:18:16 +04:00
|
|
|
if (btrfs_buffer_uptodate(buf, 0) &&
|
|
|
|
btrfs_try_tree_lock(buf)) {
|
2007-04-02 18:50:19 +04:00
|
|
|
u64 transid =
|
|
|
|
root->fs_info->running_transaction->transid;
|
2008-01-08 23:46:30 +03:00
|
|
|
u64 header_transid =
|
|
|
|
btrfs_header_generation(buf);
|
2008-09-06 00:13:11 +04:00
|
|
|
if (btrfs_header_owner(buf) !=
|
|
|
|
BTRFS_TREE_LOG_OBJECTID &&
|
|
|
|
header_transid == transid &&
|
2008-04-04 23:40:00 +04:00
|
|
|
!btrfs_header_flag(buf,
|
|
|
|
BTRFS_HEADER_FLAG_WRITTEN)) {
|
2008-01-09 23:55:33 +03:00
|
|
|
clean_tree_block(NULL, root, buf);
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_unlock(buf);
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(buf);
|
2007-11-06 18:25:25 +03:00
|
|
|
return 1;
|
2007-04-02 18:50:19 +04:00
|
|
|
}
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_unlock(buf);
|
2007-03-27 19:05:53 +04:00
|
|
|
}
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(buf);
|
2007-03-26 18:15:30 +04:00
|
|
|
}
|
2008-09-08 19:18:08 +04:00
|
|
|
pinit:
|
2008-09-06 00:13:11 +04:00
|
|
|
btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
|
2007-03-27 19:05:53 +04:00
|
|
|
} else {
|
2007-10-16 00:15:26 +04:00
|
|
|
set_extent_bits(&root->fs_info->pending_del,
|
2007-10-16 00:15:53 +04:00
|
|
|
bytenr, bytenr + num_bytes - 1,
|
|
|
|
EXTENT_LOCKED, GFP_NOFS);
|
2007-03-27 19:05:53 +04:00
|
|
|
}
|
2007-05-06 18:15:01 +04:00
|
|
|
BUG_ON(err < 0);
|
2007-03-22 19:13:20 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-26 18:40:21 +03:00
|
|
|
/*
|
2007-03-07 04:08:01 +03:00
|
|
|
* remove an extent from the root, returns 0 on success
|
2007-02-26 18:40:21 +03:00
|
|
|
*/
|
2007-03-16 23:20:31 +03:00
|
|
|
static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
|
2007-12-11 17:25:06 +03:00
|
|
|
*root, u64 bytenr, u64 num_bytes,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner_objectid, u64 owner_offset, int pin,
|
2007-05-10 04:13:14 +04:00
|
|
|
int mark_free)
|
2007-03-07 04:08:01 +03:00
|
|
|
{
|
2007-04-02 19:20:42 +04:00
|
|
|
struct btrfs_path *path;
|
2007-03-12 23:22:34 +03:00
|
|
|
struct btrfs_key key;
|
2007-03-21 03:35:03 +03:00
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
|
|
|
struct btrfs_root *extent_root = info->extent_root;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *leaf;
|
2007-03-07 04:08:01 +03:00
|
|
|
int ret;
|
2008-02-19 00:33:44 +03:00
|
|
|
int extent_slot = 0;
|
|
|
|
int found_extent = 0;
|
|
|
|
int num_to_del = 1;
|
2007-03-13 17:46:10 +03:00
|
|
|
struct btrfs_extent_item *ei;
|
2007-03-13 16:49:06 +03:00
|
|
|
u32 refs;
|
2007-03-07 19:50:24 +03:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2007-10-16 00:15:53 +04:00
|
|
|
key.objectid = bytenr;
|
2007-03-15 19:56:47 +03:00
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
2007-10-16 00:15:53 +04:00
|
|
|
key.offset = num_bytes;
|
2007-04-02 19:20:42 +04:00
|
|
|
path = btrfs_alloc_path();
|
2007-06-22 22:16:25 +04:00
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
2007-04-05 18:38:44 +04:00
|
|
|
|
2008-04-21 20:01:38 +04:00
|
|
|
path->reada = 1;
|
2007-12-11 17:25:06 +03:00
|
|
|
ret = lookup_extent_backref(trans, extent_root, path,
|
|
|
|
bytenr, root_objectid,
|
|
|
|
ref_generation,
|
|
|
|
owner_objectid, owner_offset, 1);
|
|
|
|
if (ret == 0) {
|
2008-02-19 00:33:44 +03:00
|
|
|
struct btrfs_key found_key;
|
|
|
|
extent_slot = path->slots[0];
|
|
|
|
while(extent_slot > 0) {
|
|
|
|
extent_slot--;
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
|
|
|
|
extent_slot);
|
|
|
|
if (found_key.objectid != bytenr)
|
|
|
|
break;
|
|
|
|
if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
|
|
|
|
found_key.offset == num_bytes) {
|
|
|
|
found_extent = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (path->slots[0] - extent_slot > 5)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!found_extent)
|
|
|
|
ret = btrfs_del_item(trans, extent_root, path);
|
2007-12-11 17:25:06 +03:00
|
|
|
} else {
|
|
|
|
btrfs_print_leaf(extent_root, path->nodes[0]);
|
|
|
|
WARN_ON(1);
|
|
|
|
printk("Unable to find ref byte nr %Lu root %Lu "
|
|
|
|
" gen %Lu owner %Lu offset %Lu\n", bytenr,
|
|
|
|
root_objectid, ref_generation, owner_objectid,
|
|
|
|
owner_offset);
|
|
|
|
}
|
2008-02-19 00:33:44 +03:00
|
|
|
if (!found_extent) {
|
|
|
|
btrfs_release_path(extent_root, path);
|
|
|
|
ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
BUG_ON(ret);
|
|
|
|
extent_slot = path->slots[0];
|
|
|
|
}
|
2007-10-16 00:14:19 +04:00
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
2008-02-19 00:33:44 +03:00
|
|
|
ei = btrfs_item_ptr(leaf, extent_slot,
|
2007-03-14 21:14:43 +03:00
|
|
|
struct btrfs_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
refs = btrfs_extent_refs(leaf, ei);
|
|
|
|
BUG_ON(refs == 0);
|
|
|
|
refs -= 1;
|
|
|
|
btrfs_set_extent_refs(leaf, ei, refs);
|
2008-02-19 00:33:44 +03:00
|
|
|
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
2008-02-19 00:33:44 +03:00
|
|
|
if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
|
|
|
|
/* if the back ref and the extent are next to each other
|
|
|
|
* they get deleted below in one shot
|
|
|
|
*/
|
|
|
|
path->slots[0] = extent_slot;
|
|
|
|
num_to_del = 2;
|
|
|
|
} else if (found_extent) {
|
|
|
|
/* otherwise delete the extent back ref */
|
|
|
|
ret = btrfs_del_item(trans, extent_root, path);
|
|
|
|
BUG_ON(ret);
|
|
|
|
/* if refs are 0, we need to setup the path for deletion */
|
|
|
|
if (refs == 0) {
|
|
|
|
btrfs_release_path(extent_root, path);
|
|
|
|
ret = btrfs_search_slot(trans, extent_root, &key, path,
|
|
|
|
-1, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-13 16:49:06 +03:00
|
|
|
if (refs == 0) {
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 super_used;
|
|
|
|
u64 root_used;
|
2008-08-12 17:13:26 +04:00
|
|
|
#ifdef BIO_RW_DISCARD
|
|
|
|
u64 map_length = num_bytes;
|
|
|
|
struct btrfs_multi_bio *multi = NULL;
|
|
|
|
#endif
|
2007-03-25 19:35:08 +04:00
|
|
|
|
|
|
|
if (pin) {
|
2008-09-08 19:18:08 +04:00
|
|
|
ret = pin_down_bytes(root, bytenr, num_bytes,
|
|
|
|
owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
|
2007-11-06 18:25:25 +03:00
|
|
|
if (ret > 0)
|
|
|
|
mark_free = 1;
|
|
|
|
BUG_ON(ret < 0);
|
2007-03-25 19:35:08 +04:00
|
|
|
}
|
|
|
|
|
2007-08-29 23:47:34 +04:00
|
|
|
/* block accounting for super block */
|
2008-06-26 00:01:30 +04:00
|
|
|
spin_lock_irq(&info->delalloc_lock);
|
2007-10-16 00:15:53 +04:00
|
|
|
super_used = btrfs_super_bytes_used(&info->super_copy);
|
|
|
|
btrfs_set_super_bytes_used(&info->super_copy,
|
|
|
|
super_used - num_bytes);
|
2008-06-26 00:01:30 +04:00
|
|
|
spin_unlock_irq(&info->delalloc_lock);
|
2007-08-29 23:47:34 +04:00
|
|
|
|
|
|
|
/* block accounting for root item */
|
2007-10-16 00:15:53 +04:00
|
|
|
root_used = btrfs_root_used(&root->root_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_set_root_used(&root->root_item,
|
2007-10-16 00:15:53 +04:00
|
|
|
root_used - num_bytes);
|
2008-02-19 00:33:44 +03:00
|
|
|
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
|
|
|
|
num_to_del);
|
2007-06-22 22:16:25 +04:00
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2007-10-16 00:15:53 +04:00
|
|
|
ret = update_block_group(trans, root, bytenr, num_bytes, 0,
|
2008-03-24 22:01:56 +03:00
|
|
|
mark_free);
|
2007-04-27 00:46:15 +04:00
|
|
|
BUG_ON(ret);
|
2008-08-12 17:13:26 +04:00
|
|
|
|
|
|
|
#ifdef BIO_RW_DISCARD
|
|
|
|
/* Tell the block device(s) that the sectors can be discarded */
|
|
|
|
ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
|
|
|
|
bytenr, &map_length, &multi, 0);
|
|
|
|
if (!ret) {
|
|
|
|
struct btrfs_bio_stripe *stripe = multi->stripes;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (map_length > num_bytes)
|
|
|
|
map_length = num_bytes;
|
|
|
|
|
|
|
|
for (i = 0; i < multi->num_stripes; i++, stripe++) {
|
|
|
|
blkdev_issue_discard(stripe->dev->bdev,
|
|
|
|
stripe->physical >> 9,
|
|
|
|
map_length >> 9);
|
|
|
|
}
|
|
|
|
kfree(multi);
|
|
|
|
}
|
|
|
|
#endif
|
2007-03-07 04:08:01 +03:00
|
|
|
}
|
2007-04-02 19:20:42 +04:00
|
|
|
btrfs_free_path(path);
|
2007-03-16 23:20:31 +03:00
|
|
|
finish_current_insert(trans, extent_root);
|
2007-03-07 04:08:01 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* find all the blocks marked as pending in the radix tree and remove
|
|
|
|
* them from the extent map
|
|
|
|
*/
|
2007-03-16 23:20:31 +03:00
|
|
|
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
|
|
|
|
btrfs_root *extent_root)
|
2007-03-07 04:08:01 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2007-03-22 19:13:20 +03:00
|
|
|
int err = 0;
|
2007-10-16 00:15:26 +04:00
|
|
|
u64 start;
|
|
|
|
u64 end;
|
2008-01-25 00:13:08 +03:00
|
|
|
struct extent_io_tree *pending_del;
|
|
|
|
struct extent_io_tree *pinned_extents;
|
2007-03-26 18:15:30 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
|
2007-10-16 00:15:26 +04:00
|
|
|
pending_del = &extent_root->fs_info->pending_del;
|
|
|
|
pinned_extents = &extent_root->fs_info->pinned_extents;
|
2007-03-07 04:08:01 +03:00
|
|
|
|
|
|
|
while(1) {
|
2007-10-16 00:15:26 +04:00
|
|
|
ret = find_first_extent_bit(pending_del, 0, &start, &end,
|
|
|
|
EXTENT_LOCKED);
|
|
|
|
if (ret)
|
2007-03-07 04:08:01 +03:00
|
|
|
break;
|
2007-10-16 00:15:26 +04:00
|
|
|
clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
|
|
|
|
GFP_NOFS);
|
2008-07-23 07:06:41 +04:00
|
|
|
if (!test_range_bit(&extent_root->fs_info->extent_ins,
|
|
|
|
start, end, EXTENT_LOCKED, 0)) {
|
2008-09-06 00:13:11 +04:00
|
|
|
btrfs_update_pinned_extents(extent_root, start,
|
2008-07-23 07:06:41 +04:00
|
|
|
end + 1 - start, 1);
|
|
|
|
ret = __free_extent(trans, extent_root,
|
|
|
|
start, end + 1 - start,
|
|
|
|
extent_root->root_key.objectid,
|
|
|
|
0, 0, 0, 0, 0);
|
|
|
|
} else {
|
|
|
|
clear_extent_bits(&extent_root->fs_info->extent_ins,
|
|
|
|
start, end, EXTENT_LOCKED, GFP_NOFS);
|
|
|
|
}
|
2007-10-16 00:15:26 +04:00
|
|
|
if (ret)
|
|
|
|
err = ret;
|
2008-07-23 07:06:41 +04:00
|
|
|
|
|
|
|
if (need_resched()) {
|
|
|
|
mutex_unlock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
cond_resched();
|
|
|
|
mutex_lock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
}
|
2007-02-26 18:40:21 +03:00
|
|
|
}
|
2007-03-22 19:13:20 +03:00
|
|
|
return err;
|
2007-02-26 18:40:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove an extent from the root, returns 0 on success
|
|
|
|
*/
|
2008-06-26 00:01:30 +04:00
|
|
|
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, u64 bytenr,
|
|
|
|
u64 num_bytes, u64 root_objectid,
|
|
|
|
u64 ref_generation, u64 owner_objectid,
|
|
|
|
u64 owner_offset, int pin)
|
2007-02-26 18:40:21 +03:00
|
|
|
{
|
2007-03-20 21:38:32 +03:00
|
|
|
struct btrfs_root *extent_root = root->fs_info->extent_root;
|
2007-02-26 18:40:21 +03:00
|
|
|
int pending_ret;
|
|
|
|
int ret;
|
2007-03-07 04:08:01 +03:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
2007-12-11 17:25:06 +03:00
|
|
|
if (!root->ref_cows)
|
|
|
|
ref_generation = 0;
|
|
|
|
|
2007-02-26 18:40:21 +03:00
|
|
|
if (root == extent_root) {
|
2008-09-08 19:18:08 +04:00
|
|
|
pin_down_bytes(root, bytenr, num_bytes, 0, 1);
|
2007-02-26 18:40:21 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-08 19:18:08 +04:00
|
|
|
/* if metadata always pin */
|
2008-09-11 23:54:42 +04:00
|
|
|
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
|
2008-09-11 23:54:42 +04:00
|
|
|
/* btrfs_free_reserved_extent */
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
|
|
|
|
BUG_ON(!cache);
|
|
|
|
btrfs_add_free_space(cache, bytenr, num_bytes);
|
2008-09-11 23:54:42 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2008-09-08 19:18:08 +04:00
|
|
|
pin = 1;
|
2008-09-11 23:54:42 +04:00
|
|
|
}
|
2008-09-08 19:18:08 +04:00
|
|
|
|
|
|
|
/* if data pin when any transaction has committed this */
|
|
|
|
if (ref_generation != trans->transid)
|
|
|
|
pin = 1;
|
|
|
|
|
2007-12-11 17:25:06 +03:00
|
|
|
ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
|
|
|
|
ref_generation, owner_objectid, owner_offset,
|
|
|
|
pin, pin == 0);
|
2008-07-17 20:54:40 +04:00
|
|
|
|
|
|
|
finish_current_insert(trans, root->fs_info->extent_root);
|
2007-03-22 19:13:20 +03:00
|
|
|
pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
|
2007-02-26 18:40:21 +03:00
|
|
|
return ret ? ret : pending_ret;
|
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
int btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, u64 bytenr,
|
|
|
|
u64 num_bytes, u64 root_objectid,
|
|
|
|
u64 ref_generation, u64 owner_objectid,
|
|
|
|
u64 owner_offset, int pin)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
|
|
|
|
root_objectid, ref_generation,
|
|
|
|
owner_objectid, owner_offset, pin);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-11-30 19:30:34 +03:00
|
|
|
static u64 stripe_align(struct btrfs_root *root, u64 val)
|
|
|
|
{
|
|
|
|
u64 mask = ((u64)root->stripesize - 1);
|
|
|
|
u64 ret = (val + mask) & ~mask;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-26 18:40:21 +03:00
|
|
|
/*
|
|
|
|
* walks the btree of allocated extents and find a hole of a given size.
|
|
|
|
* The key ins is changed to record the hole:
|
|
|
|
* ins->objectid == block start
|
2007-03-15 19:56:47 +03:00
|
|
|
* ins->flags = BTRFS_EXTENT_ITEM_KEY
|
2007-02-26 18:40:21 +03:00
|
|
|
* ins->offset == number of blocks
|
|
|
|
* Any available blocks before search_start are skipped.
|
|
|
|
*/
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline find_free_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *orig_root,
|
|
|
|
u64 num_bytes, u64 empty_size,
|
|
|
|
u64 search_start, u64 search_end,
|
|
|
|
u64 hint_byte, struct btrfs_key *ins,
|
|
|
|
u64 exclude_start, u64 exclude_nr,
|
|
|
|
int data)
|
2007-02-26 18:40:21 +03:00
|
|
|
{
|
2007-11-30 19:30:34 +03:00
|
|
|
int ret;
|
2008-05-07 19:43:44 +04:00
|
|
|
u64 orig_search_start;
|
2007-03-20 21:38:32 +03:00
|
|
|
struct btrfs_root * root = orig_root->fs_info->extent_root;
|
2007-04-25 23:52:25 +04:00
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 total_needed = num_bytes;
|
2008-03-24 22:02:07 +03:00
|
|
|
u64 *last_ptr = NULL;
|
2007-05-03 17:06:49 +04:00
|
|
|
struct btrfs_block_group_cache *block_group;
|
2008-05-24 22:04:53 +04:00
|
|
|
int chunk_alloc_done = 0;
|
2008-03-24 22:02:07 +03:00
|
|
|
int empty_cluster = 2 * 1024 * 1024;
|
2008-05-24 22:04:53 +04:00
|
|
|
int allowed_chunk_alloc = 0;
|
2007-02-26 18:40:21 +03:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
2007-04-04 23:27:52 +04:00
|
|
|
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (orig_root->ref_cows || empty_size)
|
|
|
|
allowed_chunk_alloc = 1;
|
|
|
|
|
2008-03-24 22:02:07 +03:00
|
|
|
if (data & BTRFS_BLOCK_GROUP_METADATA) {
|
|
|
|
last_ptr = &root->fs_info->last_alloc;
|
2008-04-04 00:29:03 +04:00
|
|
|
empty_cluster = 256 * 1024;
|
2008-03-24 22:02:07 +03:00
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
|
2008-03-24 22:02:07 +03:00
|
|
|
last_ptr = &root->fs_info->last_data_alloc;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
|
|
|
last_ptr = &root->fs_info->last_log_alloc;
|
|
|
|
if (!last_ptr == 0 && root->fs_info->last_alloc) {
|
|
|
|
*last_ptr = root->fs_info->last_alloc + empty_cluster;
|
|
|
|
}
|
|
|
|
}
|
2008-03-24 22:02:07 +03:00
|
|
|
|
|
|
|
if (last_ptr) {
|
|
|
|
if (*last_ptr)
|
|
|
|
hint_byte = *last_ptr;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
else
|
2008-03-24 22:02:07 +03:00
|
|
|
empty_size += empty_cluster;
|
|
|
|
}
|
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
search_start = max(search_start, first_logical_byte(root, 0));
|
|
|
|
orig_search_start = search_start;
|
|
|
|
|
2008-03-24 22:01:28 +03:00
|
|
|
if (search_end == (u64)-1)
|
|
|
|
search_end = btrfs_super_total_bytes(&info->super_copy);
|
2008-03-24 22:01:56 +03:00
|
|
|
|
2008-03-24 22:02:07 +03:00
|
|
|
search_start = max(search_start, hint_byte);
|
2007-08-08 00:15:09 +04:00
|
|
|
total_needed += empty_size;
|
2008-03-24 22:01:56 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
new_group:
|
|
|
|
block_group = btrfs_lookup_block_group(info, search_start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok this looks a little tricky, buts its really simple. First if we
|
|
|
|
* didn't find a block group obviously we want to start over.
|
|
|
|
* Secondly, if the block group we found does not match the type we
|
|
|
|
* need, and we have a last_ptr and its not 0, chances are the last
|
|
|
|
* allocation we made was at the end of the block group, so lets go
|
|
|
|
* ahead and skip the looking through the rest of the block groups and
|
|
|
|
* start at the beginning. This helps with metadata allocations,
|
|
|
|
* since you are likely to have a bunch of data block groups to search
|
|
|
|
* through first before you realize that you need to start over, so go
|
|
|
|
* ahead and start over and save the time.
|
|
|
|
*/
|
|
|
|
if (!block_group || (!block_group_bits(block_group, data) &&
|
|
|
|
last_ptr && *last_ptr)) {
|
|
|
|
if (search_start != orig_search_start) {
|
|
|
|
if (last_ptr && *last_ptr)
|
|
|
|
*last_ptr = 0;
|
|
|
|
search_start = orig_search_start;
|
|
|
|
goto new_group;
|
|
|
|
} else if (!chunk_alloc_done && allowed_chunk_alloc) {
|
|
|
|
ret = do_chunk_alloc(trans, root,
|
|
|
|
num_bytes + 2 * 1024 * 1024,
|
|
|
|
data, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
struct btrfs_space_info *info;
|
|
|
|
|
|
|
|
info = __find_space_info(root->fs_info, data);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
BUG_ON(ret);
|
|
|
|
chunk_alloc_done = 1;
|
|
|
|
search_start = orig_search_start;
|
|
|
|
goto new_group;
|
|
|
|
} else {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
2008-05-24 22:04:53 +04:00
|
|
|
}
|
2008-03-24 22:02:07 +03:00
|
|
|
}
|
2007-10-16 00:17:44 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
/*
|
|
|
|
* this is going to seach through all of the existing block groups it
|
|
|
|
* can find, so if we don't find something we need to see if we can
|
|
|
|
* allocate what we need.
|
|
|
|
*/
|
|
|
|
ret = find_free_space(root, &block_group, &search_start,
|
|
|
|
total_needed, data);
|
|
|
|
if (ret == -ENOSPC) {
|
|
|
|
/*
|
|
|
|
* instead of allocating, start at the original search start
|
|
|
|
* and see if there is something to be found, if not then we
|
|
|
|
* allocate
|
|
|
|
*/
|
|
|
|
if (search_start != orig_search_start) {
|
|
|
|
if (last_ptr && *last_ptr) {
|
|
|
|
*last_ptr = 0;
|
|
|
|
total_needed += empty_cluster;
|
|
|
|
}
|
|
|
|
search_start = orig_search_start;
|
|
|
|
goto new_group;
|
2008-03-24 22:02:07 +03:00
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we've already allocated, we're pretty screwed
|
|
|
|
*/
|
|
|
|
if (chunk_alloc_done) {
|
2008-03-24 22:02:07 +03:00
|
|
|
goto error;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
} else if (!allowed_chunk_alloc && block_group &&
|
|
|
|
block_group_bits(block_group, data)) {
|
|
|
|
block_group->space_info->force_alloc = 1;
|
|
|
|
goto error;
|
|
|
|
} else if (!allowed_chunk_alloc) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = do_chunk_alloc(trans, root, num_bytes + 2 * 1024 * 1024,
|
|
|
|
data, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
BUG_ON(ret);
|
|
|
|
chunk_alloc_done = 1;
|
|
|
|
if (block_group)
|
|
|
|
search_start = block_group->key.objectid +
|
|
|
|
block_group->key.offset;
|
|
|
|
else
|
|
|
|
search_start = orig_search_start;
|
|
|
|
goto new_group;
|
2008-03-24 22:02:07 +03:00
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
search_start = stripe_align(root, search_start);
|
|
|
|
ins->objectid = search_start;
|
|
|
|
ins->offset = num_bytes;
|
2007-05-10 04:13:14 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (ins->objectid + num_bytes >= search_end) {
|
|
|
|
search_start = orig_search_start;
|
|
|
|
if (chunk_alloc_done) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
goto new_group;
|
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
|
|
|
|
if (ins->objectid + num_bytes >
|
|
|
|
block_group->key.objectid + block_group->key.offset) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (search_start == orig_search_start && chunk_alloc_done) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
2007-10-16 00:17:44 +04:00
|
|
|
search_start = block_group->key.objectid +
|
|
|
|
block_group->key.offset;
|
|
|
|
goto new_group;
|
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
|
2007-06-26 20:20:46 +04:00
|
|
|
ins->objectid < exclude_start + exclude_nr)) {
|
|
|
|
search_start = exclude_start + exclude_nr;
|
|
|
|
goto new_group;
|
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (!(data & BTRFS_BLOCK_GROUP_DATA))
|
|
|
|
trans->block_group = block_group;
|
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
ins->offset = num_bytes;
|
2008-03-24 22:02:07 +03:00
|
|
|
if (last_ptr) {
|
|
|
|
*last_ptr = ins->objectid + ins->offset;
|
|
|
|
if (*last_ptr ==
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
btrfs_super_total_bytes(&root->fs_info->super_copy))
|
2008-03-24 22:02:07 +03:00
|
|
|
*last_ptr = 0;
|
2007-05-06 18:15:01 +04:00
|
|
|
}
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
ret = 0;
|
2007-03-01 00:46:22 +03:00
|
|
|
error:
|
|
|
|
return ret;
|
2007-02-26 18:40:21 +03:00
|
|
|
}
|
2008-04-28 23:29:52 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
struct list_head *l;
|
|
|
|
|
|
|
|
printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
|
|
|
|
info->total_bytes - info->bytes_used - info->bytes_pinned,
|
|
|
|
(info->full) ? "" : "not ");
|
|
|
|
|
|
|
|
spin_lock(&info->lock);
|
|
|
|
list_for_each(l, &info->block_groups) {
|
|
|
|
cache = list_entry(l, struct btrfs_block_group_cache, list);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
|
|
|
|
"%Lu pinned\n",
|
|
|
|
cache->key.objectid, cache->key.offset,
|
|
|
|
btrfs_block_group_used(&cache->item), cache->pinned);
|
|
|
|
btrfs_dump_free_space(cache, bytes);
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&info->lock);
|
|
|
|
}
|
2008-07-17 20:53:50 +04:00
|
|
|
static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 num_bytes, u64 min_alloc_size,
|
|
|
|
u64 empty_size, u64 hint_byte,
|
|
|
|
u64 search_end, struct btrfs_key *ins,
|
|
|
|
u64 data)
|
2007-02-26 18:40:21 +03:00
|
|
|
{
|
|
|
|
int ret;
|
2007-05-30 18:22:12 +04:00
|
|
|
u64 search_start = 0;
|
2008-04-04 00:29:03 +04:00
|
|
|
u64 alloc_profile;
|
2007-03-21 03:35:03 +03:00
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
2008-06-26 00:01:30 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
if (data) {
|
2008-04-04 00:29:03 +04:00
|
|
|
alloc_profile = info->avail_data_alloc_bits &
|
|
|
|
info->data_alloc_profile;
|
|
|
|
data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
|
2008-03-24 22:01:59 +03:00
|
|
|
} else if (root == root->fs_info->chunk_root) {
|
2008-04-04 00:29:03 +04:00
|
|
|
alloc_profile = info->avail_system_alloc_bits &
|
|
|
|
info->system_alloc_profile;
|
|
|
|
data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
|
2008-03-24 22:01:59 +03:00
|
|
|
} else {
|
2008-04-04 00:29:03 +04:00
|
|
|
alloc_profile = info->avail_metadata_alloc_bits &
|
|
|
|
info->metadata_alloc_profile;
|
|
|
|
data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
2008-04-14 17:46:10 +04:00
|
|
|
again:
|
2008-05-07 19:43:44 +04:00
|
|
|
data = reduce_alloc_profile(root, data);
|
2008-05-24 22:04:53 +04:00
|
|
|
/*
|
|
|
|
* the only place that sets empty_size is btrfs_realloc_node, which
|
|
|
|
* is not called recursively on allocations
|
|
|
|
*/
|
|
|
|
if (empty_size || root->ref_cows) {
|
2008-03-25 23:50:33 +03:00
|
|
|
if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
|
2008-03-24 22:01:59 +03:00
|
|
|
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
|
2008-05-24 22:04:53 +04:00
|
|
|
2 * 1024 * 1024,
|
|
|
|
BTRFS_BLOCK_GROUP_METADATA |
|
|
|
|
(info->metadata_alloc_profile &
|
|
|
|
info->avail_metadata_alloc_bits), 0);
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
|
|
|
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
|
2008-05-24 22:04:53 +04:00
|
|
|
num_bytes + 2 * 1024 * 1024, data, 0);
|
2008-03-24 22:01:59 +03:00
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
|
2007-10-16 00:15:53 +04:00
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
|
|
|
ret = find_free_extent(trans, root, num_bytes, empty_size,
|
|
|
|
search_start, search_end, hint_byte, ins,
|
2007-08-09 04:17:12 +04:00
|
|
|
trans->alloc_exclude_start,
|
|
|
|
trans->alloc_exclude_nr, data);
|
2008-04-17 19:29:12 +04:00
|
|
|
|
2008-04-14 17:46:10 +04:00
|
|
|
if (ret == -ENOSPC && num_bytes > min_alloc_size) {
|
|
|
|
num_bytes = num_bytes >> 1;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
num_bytes = num_bytes & ~(root->sectorsize - 1);
|
2008-04-14 17:46:10 +04:00
|
|
|
num_bytes = max(num_bytes, min_alloc_size);
|
2008-05-24 22:04:53 +04:00
|
|
|
do_chunk_alloc(trans, root->fs_info->extent_root,
|
|
|
|
num_bytes, data, 1);
|
2008-04-14 17:46:10 +04:00
|
|
|
goto again;
|
|
|
|
}
|
2008-04-28 23:29:52 +04:00
|
|
|
if (ret) {
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_space_info *sinfo;
|
|
|
|
|
|
|
|
sinfo = __find_space_info(root->fs_info, data);
|
|
|
|
printk("allocation failed flags %Lu, wanted %Lu\n",
|
|
|
|
data, num_bytes);
|
|
|
|
dump_space_info(sinfo, num_bytes);
|
2008-06-26 00:01:30 +04:00
|
|
|
BUG();
|
|
|
|
}
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = btrfs_lookup_block_group(root->fs_info, ins->objectid);
|
|
|
|
if (!cache) {
|
|
|
|
printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid);
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset);
|
|
|
|
|
|
|
|
return ret;
|
2008-07-17 20:53:50 +04:00
|
|
|
}
|
|
|
|
|
2008-08-01 23:11:20 +04:00
|
|
|
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
|
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
|
2008-08-01 23:11:20 +04:00
|
|
|
maybe_lock_mutex(root);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
cache = btrfs_lookup_block_group(root->fs_info, start);
|
|
|
|
if (!cache) {
|
|
|
|
printk(KERN_ERR "Unable to find block group for %Lu\n", start);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
btrfs_add_free_space(cache, start, len);
|
2008-08-01 23:11:20 +04:00
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-17 20:53:50 +04:00
|
|
|
int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 num_bytes, u64 min_alloc_size,
|
|
|
|
u64 empty_size, u64 hint_byte,
|
|
|
|
u64 search_end, struct btrfs_key *ins,
|
|
|
|
u64 data)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
|
|
|
|
empty_size, hint_byte, search_end, ins,
|
|
|
|
data);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset,
|
|
|
|
struct btrfs_key *ins)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int pending_ret;
|
|
|
|
u64 super_used;
|
|
|
|
u64 root_used;
|
|
|
|
u64 num_bytes = ins->offset;
|
|
|
|
u32 sizes[2];
|
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
|
|
|
struct btrfs_root *extent_root = info->extent_root;
|
|
|
|
struct btrfs_extent_item *extent_item;
|
|
|
|
struct btrfs_extent_ref *ref;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key keys[2];
|
2007-02-26 18:40:21 +03:00
|
|
|
|
2007-08-29 23:47:34 +04:00
|
|
|
/* block accounting for super block */
|
2008-06-26 00:01:30 +04:00
|
|
|
spin_lock_irq(&info->delalloc_lock);
|
2007-10-16 00:15:53 +04:00
|
|
|
super_used = btrfs_super_bytes_used(&info->super_copy);
|
|
|
|
btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
|
2008-06-26 00:01:30 +04:00
|
|
|
spin_unlock_irq(&info->delalloc_lock);
|
2007-08-09 04:17:12 +04:00
|
|
|
|
2007-08-29 23:47:34 +04:00
|
|
|
/* block accounting for root item */
|
2007-10-16 00:15:53 +04:00
|
|
|
root_used = btrfs_root_used(&root->root_item);
|
|
|
|
btrfs_set_root_used(&root->root_item, root_used + num_bytes);
|
2007-08-29 23:47:34 +04:00
|
|
|
|
2007-08-09 04:17:12 +04:00
|
|
|
if (root == extent_root) {
|
2007-10-16 00:15:26 +04:00
|
|
|
set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
|
|
|
|
ins->objectid + ins->offset - 1,
|
|
|
|
EXTENT_LOCKED, GFP_NOFS);
|
2007-08-09 04:17:12 +04:00
|
|
|
goto update_block;
|
|
|
|
}
|
|
|
|
|
2008-02-01 22:51:59 +03:00
|
|
|
memcpy(&keys[0], ins, sizeof(*ins));
|
|
|
|
keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
|
|
|
|
owner, owner_offset);
|
|
|
|
keys[1].objectid = ins->objectid;
|
|
|
|
keys[1].type = BTRFS_EXTENT_REF_KEY;
|
|
|
|
sizes[0] = sizeof(*extent_item);
|
|
|
|
sizes[1] = sizeof(*ref);
|
2007-12-11 17:25:06 +03:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
BUG_ON(!path);
|
2008-02-01 22:51:59 +03:00
|
|
|
|
|
|
|
ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
|
|
|
|
sizes, 2);
|
2007-06-28 23:57:36 +04:00
|
|
|
BUG_ON(ret);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
|
2008-02-01 22:51:59 +03:00
|
|
|
extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_extent_item);
|
|
|
|
btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
|
|
|
|
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
|
|
|
|
struct btrfs_extent_ref);
|
|
|
|
|
|
|
|
btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
|
|
|
|
btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
|
|
|
|
btrfs_set_ref_objectid(path->nodes[0], ref, owner);
|
|
|
|
btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
|
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
|
|
|
|
|
|
|
trans->alloc_exclude_start = 0;
|
|
|
|
trans->alloc_exclude_nr = 0;
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_free_path(path);
|
2007-03-16 23:20:31 +03:00
|
|
|
finish_current_insert(trans, extent_root);
|
2007-03-22 19:13:20 +03:00
|
|
|
pending_ret = del_pending_extents(trans, extent_root);
|
2007-10-16 00:14:48 +04:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2007-05-10 04:13:14 +04:00
|
|
|
if (pending_ret) {
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = pending_ret;
|
|
|
|
goto out;
|
2007-05-10 04:13:14 +04:00
|
|
|
}
|
2007-08-09 04:17:12 +04:00
|
|
|
|
|
|
|
update_block:
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
|
2008-02-04 18:10:13 +03:00
|
|
|
if (ret) {
|
|
|
|
printk("update block group failed for %Lu %Lu\n",
|
|
|
|
ins->objectid, ins->offset);
|
|
|
|
BUG();
|
|
|
|
}
|
2008-06-26 00:01:30 +04:00
|
|
|
out:
|
2008-07-17 20:53:50 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset,
|
|
|
|
struct btrfs_key *ins)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
|
|
|
|
ref_generation, owner,
|
|
|
|
owner_offset, ins);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-09-06 00:13:11 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this is used by the tree logging recovery code. It records that
|
|
|
|
* an extent has been allocated and makes sure to clear the free
|
|
|
|
* space cache bits as well
|
|
|
|
*/
|
|
|
|
int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset,
|
|
|
|
struct btrfs_key *ins)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_block_group_cache *block_group;
|
|
|
|
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
|
|
|
|
cache_block_group(root, block_group);
|
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
|
|
|
|
ref_generation, owner,
|
|
|
|
owner_offset, ins);
|
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-17 20:53:50 +04:00
|
|
|
/*
|
|
|
|
* finds a free extent and does all the dirty work required for allocation
|
|
|
|
* returns the key for the extent through ins, and a tree buffer for
|
|
|
|
* the first block of the extent through buf.
|
|
|
|
*
|
|
|
|
* returns 0 if everything worked, non-zero otherwise.
|
|
|
|
*/
|
|
|
|
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 num_bytes, u64 min_alloc_size,
|
|
|
|
u64 root_objectid, u64 ref_generation,
|
|
|
|
u64 owner, u64 owner_offset,
|
|
|
|
u64 empty_size, u64 hint_byte,
|
|
|
|
u64 search_end, struct btrfs_key *ins, u64 data)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
maybe_lock_mutex(root);
|
|
|
|
|
|
|
|
ret = __btrfs_reserve_extent(trans, root, num_bytes,
|
|
|
|
min_alloc_size, empty_size, hint_byte,
|
|
|
|
search_end, ins, data);
|
|
|
|
BUG_ON(ret);
|
2008-09-11 23:54:42 +04:00
|
|
|
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
|
|
|
|
ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
|
|
|
|
ref_generation, owner,
|
|
|
|
owner_offset, ins);
|
|
|
|
BUG_ON(ret);
|
2008-07-17 20:53:50 +04:00
|
|
|
|
2008-09-11 23:54:42 +04:00
|
|
|
}
|
2008-06-26 00:01:30 +04:00
|
|
|
maybe_unlock_mutex(root);
|
|
|
|
return ret;
|
2007-02-26 18:40:21 +03:00
|
|
|
}
|
2008-08-01 23:11:20 +04:00
|
|
|
|
|
|
|
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 bytenr, u32 blocksize)
|
|
|
|
{
|
|
|
|
struct extent_buffer *buf;
|
|
|
|
|
|
|
|
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
|
|
|
|
if (!buf)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
btrfs_set_header_generation(buf, trans->transid);
|
|
|
|
btrfs_tree_lock(buf);
|
|
|
|
clean_tree_block(trans, root, buf);
|
|
|
|
btrfs_set_buffer_uptodate(buf);
|
2008-09-12 00:17:57 +04:00
|
|
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
|
|
|
set_extent_dirty(&root->dirty_log_pages, buf->start,
|
|
|
|
buf->start + buf->len - 1, GFP_NOFS);
|
|
|
|
} else {
|
|
|
|
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
|
2008-08-01 23:11:20 +04:00
|
|
|
buf->start + buf->len - 1, GFP_NOFS);
|
2008-09-12 00:17:57 +04:00
|
|
|
}
|
2008-08-01 23:11:20 +04:00
|
|
|
trans->blocks_used++;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2007-02-26 18:40:21 +03:00
|
|
|
/*
|
|
|
|
* helper function to allocate a block for a given tree
|
|
|
|
* returns the tree buffer or NULL.
|
|
|
|
*/
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
|
2007-12-11 17:25:06 +03:00
|
|
|
struct btrfs_root *root,
|
|
|
|
u32 blocksize,
|
|
|
|
u64 root_objectid,
|
|
|
|
u64 ref_generation,
|
|
|
|
u64 first_objectid,
|
|
|
|
int level,
|
|
|
|
u64 hint,
|
2007-10-16 00:14:19 +04:00
|
|
|
u64 empty_size)
|
2007-02-26 18:40:21 +03:00
|
|
|
{
|
2007-03-12 23:22:34 +03:00
|
|
|
struct btrfs_key ins;
|
2007-02-26 18:40:21 +03:00
|
|
|
int ret;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *buf;
|
2007-02-26 18:40:21 +03:00
|
|
|
|
2008-04-14 17:46:10 +04:00
|
|
|
ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
|
2007-12-11 17:25:06 +03:00
|
|
|
root_objectid, ref_generation,
|
2007-12-13 19:13:32 +03:00
|
|
|
level, first_objectid, empty_size, hint,
|
2007-10-16 00:15:53 +04:00
|
|
|
(u64)-1, &ins, 0);
|
2007-02-26 18:40:21 +03:00
|
|
|
if (ret) {
|
2007-06-22 22:16:25 +04:00
|
|
|
BUG_ON(ret > 0);
|
|
|
|
return ERR_PTR(ret);
|
2007-02-26 18:40:21 +03:00
|
|
|
}
|
2008-01-09 23:55:33 +03:00
|
|
|
|
2008-08-01 23:11:20 +04:00
|
|
|
buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
|
2007-02-26 18:40:21 +03:00
|
|
|
return buf;
|
|
|
|
}
|
2007-03-07 04:08:01 +03:00
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, struct extent_buffer *leaf)
|
2007-03-27 14:33:00 +04:00
|
|
|
{
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 leaf_owner;
|
|
|
|
u64 leaf_generation;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct btrfs_key key;
|
2007-03-27 14:33:00 +04:00
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
int i;
|
|
|
|
int nritems;
|
|
|
|
int ret;
|
|
|
|
|
2007-10-16 00:14:19 +04:00
|
|
|
BUG_ON(!btrfs_is_leaf(leaf));
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
2007-12-11 17:25:06 +03:00
|
|
|
leaf_owner = btrfs_header_owner(leaf);
|
|
|
|
leaf_generation = btrfs_header_generation(leaf);
|
|
|
|
|
2007-03-27 14:33:00 +04:00
|
|
|
for (i = 0; i < nritems; i++) {
|
2007-10-16 00:15:53 +04:00
|
|
|
u64 disk_bytenr;
|
2008-07-22 20:08:37 +04:00
|
|
|
cond_resched();
|
2007-10-16 00:14:19 +04:00
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, i);
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
|
2007-03-27 14:33:00 +04:00
|
|
|
continue;
|
|
|
|
fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
|
2007-10-16 00:14:19 +04:00
|
|
|
if (btrfs_file_extent_type(leaf, fi) ==
|
|
|
|
BTRFS_FILE_EXTENT_INLINE)
|
2007-04-19 21:37:44 +04:00
|
|
|
continue;
|
2007-03-27 14:33:00 +04:00
|
|
|
/*
|
|
|
|
* FIXME make sure to insert a trans record that
|
|
|
|
* repeats the snapshot del on crash
|
|
|
|
*/
|
2007-10-16 00:15:53 +04:00
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
|
|
|
|
if (disk_bytenr == 0)
|
2007-05-24 21:35:57 +04:00
|
|
|
continue;
|
2008-07-21 18:29:44 +04:00
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = __btrfs_free_extent(trans, root, disk_bytenr,
|
2007-12-11 17:25:06 +03:00
|
|
|
btrfs_file_extent_disk_num_bytes(leaf, fi),
|
|
|
|
leaf_owner, leaf_generation,
|
|
|
|
key.objectid, key.offset, 0);
|
2008-07-21 18:29:44 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-08-04 16:20:15 +04:00
|
|
|
|
|
|
|
atomic_inc(&root->fs_info->throttle_gen);
|
|
|
|
wake_up(&root->fs_info->transaction_throttle);
|
|
|
|
cond_resched();
|
|
|
|
|
2007-03-27 14:33:00 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_leaf_ref *ref)
|
2008-07-28 23:32:19 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
struct btrfs_extent_info *info = ref->extents;
|
|
|
|
|
|
|
|
for (i = 0; i < ref->nritems; i++) {
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
ret = __btrfs_free_extent(trans, root,
|
|
|
|
info->bytenr, info->num_bytes,
|
|
|
|
ref->owner, ref->generation,
|
|
|
|
info->objectid, info->offset, 0);
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-08-04 16:20:15 +04:00
|
|
|
|
|
|
|
atomic_inc(&root->fs_info->throttle_gen);
|
|
|
|
wake_up(&root->fs_info->transaction_throttle);
|
|
|
|
cond_resched();
|
|
|
|
|
2008-07-28 23:32:19 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
info++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
|
|
|
|
u32 *refs)
|
|
|
|
{
|
2008-07-28 23:32:51 +04:00
|
|
|
int ret;
|
2008-08-01 19:27:23 +04:00
|
|
|
|
2008-07-28 23:32:51 +04:00
|
|
|
ret = lookup_extent_ref(NULL, root, start, len, refs);
|
2008-08-01 19:27:23 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
#if 0 // some debugging code in case we see problems here
|
|
|
|
/* if the refs count is one, it won't get increased again. But
|
|
|
|
* if the ref count is > 1, someone may be decreasing it at
|
|
|
|
* the same time we are.
|
|
|
|
*/
|
|
|
|
if (*refs != 1) {
|
|
|
|
struct extent_buffer *eb = NULL;
|
|
|
|
eb = btrfs_find_create_tree_block(root, start, len);
|
|
|
|
if (eb)
|
|
|
|
btrfs_tree_lock(eb);
|
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
ret = lookup_extent_ref(NULL, root, start, len, refs);
|
|
|
|
BUG_ON(ret);
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
|
|
|
|
if (eb) {
|
|
|
|
btrfs_tree_unlock(eb);
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
}
|
|
|
|
if (*refs == 1) {
|
|
|
|
printk("block %llu went down to one during drop_snap\n",
|
|
|
|
(unsigned long long)start);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-06-26 00:01:31 +04:00
|
|
|
cond_resched();
|
2008-07-28 23:32:51 +04:00
|
|
|
return ret;
|
2008-06-26 00:01:30 +04:00
|
|
|
}
|
|
|
|
|
2007-03-13 18:09:37 +03:00
|
|
|
/*
|
|
|
|
* helper function for drop_snapshot, this walks down the tree dropping ref
|
|
|
|
* counts as it goes.
|
|
|
|
*/
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path, int *level)
|
2007-03-10 14:35:47 +03:00
|
|
|
{
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 root_owner;
|
|
|
|
u64 root_gen;
|
|
|
|
u64 bytenr;
|
2008-05-12 20:59:19 +04:00
|
|
|
u64 ptr_gen;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *next;
|
|
|
|
struct extent_buffer *cur;
|
2007-12-11 17:25:06 +03:00
|
|
|
struct extent_buffer *parent;
|
2008-07-28 23:32:19 +04:00
|
|
|
struct btrfs_leaf_ref *ref;
|
2007-10-16 00:15:53 +04:00
|
|
|
u32 blocksize;
|
2007-03-10 14:35:47 +03:00
|
|
|
int ret;
|
|
|
|
u32 refs;
|
|
|
|
|
2007-04-02 19:20:42 +04:00
|
|
|
WARN_ON(*level < 0);
|
|
|
|
WARN_ON(*level >= BTRFS_MAX_LEVEL);
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
|
2007-10-16 00:15:53 +04:00
|
|
|
path->nodes[*level]->len, &refs);
|
2007-03-10 14:35:47 +03:00
|
|
|
BUG_ON(ret);
|
|
|
|
if (refs > 1)
|
|
|
|
goto out;
|
2007-06-20 00:23:05 +04:00
|
|
|
|
2007-03-13 18:09:37 +03:00
|
|
|
/*
|
|
|
|
* walk down to the last node level and free all the leaves
|
|
|
|
*/
|
2007-03-27 14:33:00 +04:00
|
|
|
while(*level >= 0) {
|
2007-04-02 19:20:42 +04:00
|
|
|
WARN_ON(*level < 0);
|
|
|
|
WARN_ON(*level >= BTRFS_MAX_LEVEL);
|
2007-03-10 14:35:47 +03:00
|
|
|
cur = path->nodes[*level];
|
2007-06-20 00:23:05 +04:00
|
|
|
|
2007-10-16 00:14:19 +04:00
|
|
|
if (btrfs_header_level(cur) != *level)
|
2007-04-02 18:50:19 +04:00
|
|
|
WARN_ON(1);
|
2007-06-20 00:23:05 +04:00
|
|
|
|
2007-03-12 19:01:18 +03:00
|
|
|
if (path->slots[*level] >=
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_header_nritems(cur))
|
2007-03-10 14:35:47 +03:00
|
|
|
break;
|
2007-03-27 14:33:00 +04:00
|
|
|
if (*level == 0) {
|
2008-09-06 00:13:11 +04:00
|
|
|
ret = btrfs_drop_leaf_ref(trans, root, cur);
|
2007-03-27 14:33:00 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
break;
|
|
|
|
}
|
2007-10-16 00:15:53 +04:00
|
|
|
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
|
2008-05-12 20:59:19 +04:00
|
|
|
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
|
2007-10-16 00:15:53 +04:00
|
|
|
blocksize = btrfs_level_size(root, *level - 1);
|
2008-06-26 00:01:30 +04:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
|
2007-03-27 14:33:00 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
if (refs != 1) {
|
2007-12-11 17:25:06 +03:00
|
|
|
parent = path->nodes[*level];
|
|
|
|
root_owner = btrfs_header_owner(parent);
|
|
|
|
root_gen = btrfs_header_generation(parent);
|
2007-03-10 14:35:47 +03:00
|
|
|
path->slots[*level]++;
|
2008-08-01 19:27:23 +04:00
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-06-26 00:01:30 +04:00
|
|
|
ret = __btrfs_free_extent(trans, root, bytenr,
|
2007-12-11 17:25:06 +03:00
|
|
|
blocksize, root_owner,
|
|
|
|
root_gen, 0, 0, 1);
|
2007-03-10 14:35:47 +03:00
|
|
|
BUG_ON(ret);
|
2008-08-01 19:27:23 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-08-01 21:11:41 +04:00
|
|
|
|
|
|
|
atomic_inc(&root->fs_info->throttle_gen);
|
|
|
|
wake_up(&root->fs_info->transaction_throttle);
|
2008-08-04 16:20:15 +04:00
|
|
|
cond_resched();
|
2008-08-01 21:11:41 +04:00
|
|
|
|
2007-03-10 14:35:47 +03:00
|
|
|
continue;
|
|
|
|
}
|
2008-08-01 19:27:23 +04:00
|
|
|
/*
|
|
|
|
* at this point, we have a single ref, and since the
|
|
|
|
* only place referencing this extent is a dead root
|
|
|
|
* the reference count should never go higher.
|
|
|
|
* So, we don't need to check it again
|
|
|
|
*/
|
2008-07-28 23:32:19 +04:00
|
|
|
if (*level == 1) {
|
|
|
|
struct btrfs_key key;
|
|
|
|
btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
|
2008-07-28 23:32:51 +04:00
|
|
|
ref = btrfs_lookup_leaf_ref(root, bytenr);
|
2008-07-28 23:32:19 +04:00
|
|
|
if (ref) {
|
2008-09-06 00:13:11 +04:00
|
|
|
ret = cache_drop_leaf_ref(trans, root, ref);
|
2008-07-28 23:32:19 +04:00
|
|
|
BUG_ON(ret);
|
|
|
|
btrfs_remove_leaf_ref(root, ref);
|
2008-07-31 00:29:20 +04:00
|
|
|
btrfs_free_leaf_ref(root, ref);
|
2008-07-28 23:32:19 +04:00
|
|
|
*level = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-07-31 18:48:37 +04:00
|
|
|
if (printk_ratelimit())
|
|
|
|
printk("leaf ref miss for bytenr %llu\n",
|
|
|
|
(unsigned long long)bytenr);
|
2008-07-28 23:32:19 +04:00
|
|
|
}
|
2007-10-16 00:15:53 +04:00
|
|
|
next = btrfs_find_tree_block(root, bytenr, blocksize);
|
2008-05-12 21:39:03 +04:00
|
|
|
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(next);
|
2008-06-26 00:01:30 +04:00
|
|
|
|
2008-05-12 20:59:19 +04:00
|
|
|
next = read_tree_block(root, bytenr, blocksize,
|
|
|
|
ptr_gen);
|
2008-06-26 00:01:31 +04:00
|
|
|
cond_resched();
|
2008-08-01 19:27:23 +04:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* this is a debugging check and can go away
|
|
|
|
* the ref should never go all the way down to 1
|
|
|
|
* at this point
|
|
|
|
*/
|
2008-07-17 20:53:50 +04:00
|
|
|
ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
|
|
|
|
&refs);
|
2007-08-10 22:06:19 +04:00
|
|
|
BUG_ON(ret);
|
2008-08-01 19:27:23 +04:00
|
|
|
WARN_ON(refs != 1);
|
|
|
|
#endif
|
2007-08-10 22:06:19 +04:00
|
|
|
}
|
2007-04-02 19:20:42 +04:00
|
|
|
WARN_ON(*level <= 0);
|
2007-03-12 16:03:27 +03:00
|
|
|
if (path->nodes[*level-1])
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(path->nodes[*level-1]);
|
2007-03-10 14:35:47 +03:00
|
|
|
path->nodes[*level-1] = next;
|
2007-10-16 00:14:19 +04:00
|
|
|
*level = btrfs_header_level(next);
|
2007-03-10 14:35:47 +03:00
|
|
|
path->slots[*level] = 0;
|
2008-08-04 16:20:15 +04:00
|
|
|
cond_resched();
|
2007-03-10 14:35:47 +03:00
|
|
|
}
|
|
|
|
out:
|
2007-04-02 19:20:42 +04:00
|
|
|
WARN_ON(*level < 0);
|
|
|
|
WARN_ON(*level >= BTRFS_MAX_LEVEL);
|
2007-12-11 17:25:06 +03:00
|
|
|
|
|
|
|
if (path->nodes[*level] == root->node) {
|
|
|
|
parent = path->nodes[*level];
|
2008-07-28 23:32:19 +04:00
|
|
|
bytenr = path->nodes[*level]->start;
|
2007-12-11 17:25:06 +03:00
|
|
|
} else {
|
|
|
|
parent = path->nodes[*level + 1];
|
2008-07-28 23:32:19 +04:00
|
|
|
bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
|
2007-12-11 17:25:06 +03:00
|
|
|
}
|
|
|
|
|
2008-07-28 23:32:19 +04:00
|
|
|
blocksize = btrfs_level_size(root, *level);
|
|
|
|
root_owner = btrfs_header_owner(parent);
|
2007-12-11 17:25:06 +03:00
|
|
|
root_gen = btrfs_header_generation(parent);
|
2008-07-28 23:32:19 +04:00
|
|
|
|
2008-08-01 19:27:23 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-07-28 23:32:19 +04:00
|
|
|
ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
|
|
|
|
root_owner, root_gen, 0, 0, 1);
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(path->nodes[*level]);
|
2007-03-10 14:35:47 +03:00
|
|
|
path->nodes[*level] = NULL;
|
|
|
|
*level += 1;
|
|
|
|
BUG_ON(ret);
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-08-01 19:27:23 +04:00
|
|
|
|
2008-06-26 00:01:31 +04:00
|
|
|
cond_resched();
|
2007-03-10 14:35:47 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-13 18:09:37 +03:00
|
|
|
/*
|
|
|
|
* helper for dropping snapshots. This walks back up the tree in the path
|
|
|
|
* to find the first node higher up where we haven't yet gone through
|
|
|
|
* all the slots
|
|
|
|
*/
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path, int *level)
|
2007-03-10 14:35:47 +03:00
|
|
|
{
|
2007-12-11 17:25:06 +03:00
|
|
|
u64 root_owner;
|
|
|
|
u64 root_gen;
|
|
|
|
struct btrfs_root_item *root_item = &root->root_item;
|
2007-03-10 14:35:47 +03:00
|
|
|
int i;
|
|
|
|
int slot;
|
|
|
|
int ret;
|
2007-08-07 23:52:19 +04:00
|
|
|
|
2007-03-13 17:46:10 +03:00
|
|
|
for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
|
2007-03-10 14:35:47 +03:00
|
|
|
slot = path->slots[i];
|
2007-10-16 00:14:19 +04:00
|
|
|
if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
|
|
|
|
struct extent_buffer *node;
|
|
|
|
struct btrfs_disk_key disk_key;
|
|
|
|
node = path->nodes[i];
|
2007-03-10 14:35:47 +03:00
|
|
|
path->slots[i]++;
|
|
|
|
*level = i;
|
2007-08-07 23:52:19 +04:00
|
|
|
WARN_ON(*level == 0);
|
2007-10-16 00:14:19 +04:00
|
|
|
btrfs_node_key(node, &disk_key, path->slots[i]);
|
2007-08-07 23:52:19 +04:00
|
|
|
memcpy(&root_item->drop_progress,
|
2007-10-16 00:14:19 +04:00
|
|
|
&disk_key, sizeof(disk_key));
|
2007-08-07 23:52:19 +04:00
|
|
|
root_item->drop_level = i;
|
2007-03-10 14:35:47 +03:00
|
|
|
return 0;
|
|
|
|
} else {
|
2007-12-11 17:25:06 +03:00
|
|
|
if (path->nodes[*level] == root->node) {
|
|
|
|
root_owner = root->root_key.objectid;
|
|
|
|
root_gen =
|
|
|
|
btrfs_header_generation(path->nodes[*level]);
|
|
|
|
} else {
|
|
|
|
struct extent_buffer *node;
|
|
|
|
node = path->nodes[*level + 1];
|
|
|
|
root_owner = btrfs_header_owner(node);
|
|
|
|
root_gen = btrfs_header_generation(node);
|
|
|
|
}
|
2007-03-16 23:20:31 +03:00
|
|
|
ret = btrfs_free_extent(trans, root,
|
2007-10-16 00:15:53 +04:00
|
|
|
path->nodes[*level]->start,
|
2007-12-11 17:25:06 +03:00
|
|
|
path->nodes[*level]->len,
|
|
|
|
root_owner, root_gen, 0, 0, 1);
|
2007-03-27 14:33:00 +04:00
|
|
|
BUG_ON(ret);
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(path->nodes[*level]);
|
2007-03-12 16:03:27 +03:00
|
|
|
path->nodes[*level] = NULL;
|
2007-03-10 14:35:47 +03:00
|
|
|
*level = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-03-13 18:09:37 +03:00
|
|
|
/*
|
|
|
|
* drop the reference count on the tree rooted at 'snap'. This traverses
|
|
|
|
* the tree freeing any blocks that have a ref count of zero after being
|
|
|
|
* decremented.
|
|
|
|
*/
|
2007-03-16 23:20:31 +03:00
|
|
|
int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
|
2007-08-07 23:52:19 +04:00
|
|
|
*root)
|
2007-03-10 14:35:47 +03:00
|
|
|
{
|
2007-03-13 23:47:54 +03:00
|
|
|
int ret = 0;
|
2007-03-13 18:09:37 +03:00
|
|
|
int wret;
|
2007-03-10 14:35:47 +03:00
|
|
|
int level;
|
2007-04-02 19:20:42 +04:00
|
|
|
struct btrfs_path *path;
|
2007-03-10 14:35:47 +03:00
|
|
|
int i;
|
|
|
|
int orig_level;
|
2007-08-07 23:52:19 +04:00
|
|
|
struct btrfs_root_item *root_item = &root->root_item;
|
2007-03-10 14:35:47 +03:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
|
2007-04-02 19:20:42 +04:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
BUG_ON(!path);
|
2007-03-10 14:35:47 +03:00
|
|
|
|
2007-10-16 00:14:19 +04:00
|
|
|
level = btrfs_header_level(root->node);
|
2007-03-10 14:35:47 +03:00
|
|
|
orig_level = level;
|
2007-08-07 23:52:19 +04:00
|
|
|
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
|
|
|
|
path->nodes[level] = root->node;
|
2007-10-16 00:14:48 +04:00
|
|
|
extent_buffer_get(root->node);
|
2007-08-07 23:52:19 +04:00
|
|
|
path->slots[level] = 0;
|
|
|
|
} else {
|
|
|
|
struct btrfs_key key;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct btrfs_disk_key found_key;
|
|
|
|
struct extent_buffer *node;
|
2007-08-08 00:15:09 +04:00
|
|
|
|
2007-08-07 23:52:19 +04:00
|
|
|
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
|
2007-08-08 00:15:09 +04:00
|
|
|
level = root_item->drop_level;
|
|
|
|
path->lowest_level = level;
|
2007-08-07 23:52:19 +04:00
|
|
|
wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
2007-08-08 00:15:09 +04:00
|
|
|
if (wret < 0) {
|
2007-08-07 23:52:19 +04:00
|
|
|
ret = wret;
|
|
|
|
goto out;
|
|
|
|
}
|
2007-10-16 00:14:19 +04:00
|
|
|
node = path->nodes[level];
|
|
|
|
btrfs_node_key(node, &found_key, path->slots[level]);
|
|
|
|
WARN_ON(memcmp(&found_key, &root_item->drop_progress,
|
|
|
|
sizeof(found_key)));
|
2008-07-08 22:19:17 +04:00
|
|
|
/*
|
|
|
|
* unlock our path, this is safe because only this
|
|
|
|
* function is allowed to delete this snapshot
|
|
|
|
*/
|
2008-06-26 00:01:30 +04:00
|
|
|
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
|
|
|
|
if (path->nodes[i] && path->locks[i]) {
|
|
|
|
path->locks[i] = 0;
|
|
|
|
btrfs_tree_unlock(path->nodes[i]);
|
|
|
|
}
|
|
|
|
}
|
2007-08-07 23:52:19 +04:00
|
|
|
}
|
2007-03-10 14:35:47 +03:00
|
|
|
while(1) {
|
2007-04-02 19:20:42 +04:00
|
|
|
wret = walk_down_tree(trans, root, path, &level);
|
2007-03-13 18:09:37 +03:00
|
|
|
if (wret > 0)
|
2007-03-10 14:35:47 +03:00
|
|
|
break;
|
2007-03-13 18:09:37 +03:00
|
|
|
if (wret < 0)
|
|
|
|
ret = wret;
|
|
|
|
|
2007-04-02 19:20:42 +04:00
|
|
|
wret = walk_up_tree(trans, root, path, &level);
|
2007-03-13 18:09:37 +03:00
|
|
|
if (wret > 0)
|
2007-03-10 14:35:47 +03:00
|
|
|
break;
|
2007-03-13 18:09:37 +03:00
|
|
|
if (wret < 0)
|
|
|
|
ret = wret;
|
2008-06-26 00:01:31 +04:00
|
|
|
if (trans->transaction->in_commit) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
2008-08-01 21:11:41 +04:00
|
|
|
atomic_inc(&root->fs_info->throttle_gen);
|
2008-07-28 23:32:51 +04:00
|
|
|
wake_up(&root->fs_info->transaction_throttle);
|
2007-03-10 14:35:47 +03:00
|
|
|
}
|
2007-03-12 16:03:27 +03:00
|
|
|
for (i = 0; i <= orig_level; i++) {
|
2007-04-02 19:20:42 +04:00
|
|
|
if (path->nodes[i]) {
|
2007-10-16 00:14:19 +04:00
|
|
|
free_extent_buffer(path->nodes[i]);
|
2007-10-16 00:18:56 +04:00
|
|
|
path->nodes[i] = NULL;
|
2007-03-12 16:03:27 +03:00
|
|
|
}
|
2007-03-10 14:35:47 +03:00
|
|
|
}
|
2007-08-07 23:52:19 +04:00
|
|
|
out:
|
2007-04-02 19:20:42 +04:00
|
|
|
btrfs_free_path(path);
|
2007-03-13 18:09:37 +03:00
|
|
|
return ret;
|
2007-03-10 14:35:47 +03:00
|
|
|
}
|
2007-04-27 00:46:15 +04:00
|
|
|
|
2007-10-16 00:15:19 +04:00
|
|
|
int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
2007-04-27 00:46:15 +04:00
|
|
|
{
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
struct btrfs_block_group_cache *block_group;
|
|
|
|
struct rb_node *n;
|
2008-06-26 00:01:30 +04:00
|
|
|
|
|
|
|
mutex_lock(&info->alloc_mutex);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
spin_lock(&info->block_group_cache_lock);
|
|
|
|
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
|
|
|
|
block_group = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
|
cache_node);
|
|
|
|
|
|
|
|
btrfs_remove_free_space_cache(block_group);
|
|
|
|
rb_erase(&block_group->cache_node,
|
|
|
|
&info->block_group_cache_tree);
|
|
|
|
spin_lock(&block_group->space_info->lock);
|
|
|
|
list_del(&block_group->list);
|
|
|
|
spin_unlock(&block_group->space_info->lock);
|
|
|
|
kfree(block_group);
|
|
|
|
}
|
|
|
|
spin_unlock(&info->block_group_cache_lock);
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&info->alloc_mutex);
|
2007-05-06 18:15:01 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-28 17:02:36 +04:00
|
|
|
static unsigned long calc_ra(unsigned long start, unsigned long last,
|
|
|
|
unsigned long nr)
|
|
|
|
{
|
|
|
|
return min(last, start + nr - 1);
|
|
|
|
}
|
|
|
|
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline relocate_inode_pages(struct inode *inode, u64 start,
|
|
|
|
u64 len)
|
2007-12-22 00:27:24 +03:00
|
|
|
{
|
|
|
|
u64 page_start;
|
|
|
|
u64 page_end;
|
|
|
|
unsigned long last_index;
|
|
|
|
unsigned long i;
|
|
|
|
struct page *page;
|
2008-01-25 00:13:08 +03:00
|
|
|
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
2008-01-03 17:08:48 +03:00
|
|
|
struct file_ra_state *ra;
|
2008-04-28 17:02:36 +04:00
|
|
|
unsigned long total_read = 0;
|
|
|
|
unsigned long ra_pages;
|
2008-07-24 19:57:52 +04:00
|
|
|
struct btrfs_ordered_extent *ordered;
|
2008-05-07 19:43:44 +04:00
|
|
|
struct btrfs_trans_handle *trans;
|
2008-01-03 17:08:48 +03:00
|
|
|
|
|
|
|
ra = kzalloc(sizeof(*ra), GFP_NOFS);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
2008-01-03 17:08:48 +03:00
|
|
|
i = start >> PAGE_CACHE_SHIFT;
|
2007-12-22 00:27:24 +03:00
|
|
|
last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
|
2008-04-28 17:02:36 +04:00
|
|
|
ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
|
|
|
|
|
2008-01-03 17:08:48 +03:00
|
|
|
file_ra_state_init(ra, inode->i_mapping);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-01-03 17:08:48 +03:00
|
|
|
for (; i <= last_index; i++) {
|
2008-04-28 17:02:36 +04:00
|
|
|
if (total_read % ra_pages == 0) {
|
|
|
|
btrfs_force_ra(inode->i_mapping, ra, NULL, i,
|
|
|
|
calc_ra(i, last_index, ra_pages));
|
|
|
|
}
|
|
|
|
total_read++;
|
2008-07-24 19:57:52 +04:00
|
|
|
again:
|
|
|
|
if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
|
2008-05-07 19:43:44 +04:00
|
|
|
goto truncate_racing;
|
2007-12-22 00:27:24 +03:00
|
|
|
page = grab_cache_page(inode->i_mapping, i);
|
2008-05-07 19:43:44 +04:00
|
|
|
if (!page) {
|
2007-12-22 00:27:24 +03:00
|
|
|
goto out_unlock;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
btrfs_readpage(NULL, page);
|
|
|
|
lock_page(page);
|
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
2008-04-28 23:29:52 +04:00
|
|
|
wait_on_page_writeback(page);
|
2008-07-24 19:57:52 +04:00
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
|
|
|
|
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
2008-01-25 00:13:08 +03:00
|
|
|
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-07-24 19:57:52 +04:00
|
|
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
|
|
|
if (ordered) {
|
|
|
|
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
btrfs_start_ordered_extent(inode, ordered, 1);
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
set_page_extent_mapped(page);
|
|
|
|
|
2008-08-01 19:27:23 +04:00
|
|
|
/*
|
|
|
|
* make sure page_mkwrite is called for this page if userland
|
|
|
|
* wants to change it from mmap
|
|
|
|
*/
|
|
|
|
clear_page_dirty_for_io(page);
|
2008-07-24 19:57:52 +04:00
|
|
|
|
2008-08-05 07:17:27 +04:00
|
|
|
btrfs_set_extent_delalloc(inode, page_start, page_end);
|
2008-05-07 19:43:44 +04:00
|
|
|
set_page_dirty(page);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-01-25 00:13:08 +03:00
|
|
|
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
|
2007-12-22 00:27:24 +03:00
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
2008-07-24 19:57:52 +04:00
|
|
|
/* we have to start the IO in order to get the ordered extents
|
|
|
|
* instantiated. This allows the relocation to code to wait
|
|
|
|
* for all the ordered extents to hit the disk.
|
|
|
|
*
|
|
|
|
* Otherwise, it would constantly loop over the same extents
|
|
|
|
* because the old ones don't get deleted until the IO is
|
|
|
|
* started
|
|
|
|
*/
|
|
|
|
btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
|
|
|
|
WB_SYNC_NONE);
|
2008-04-28 23:29:52 +04:00
|
|
|
kfree(ra);
|
2008-05-07 19:43:44 +04:00
|
|
|
trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
|
|
|
|
if (trans) {
|
|
|
|
btrfs_end_transaction(trans, BTRFS_I(inode)->root);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
return 0;
|
2008-05-07 19:43:44 +04:00
|
|
|
|
|
|
|
truncate_racing:
|
|
|
|
vmtruncate(inode, inode->i_size);
|
|
|
|
balance_dirty_pages_ratelimited_nr(inode->i_mapping,
|
|
|
|
total_read);
|
|
|
|
goto out_unlock;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
|
|
|
|
2008-05-08 21:26:18 +04:00
|
|
|
/*
|
|
|
|
* The back references tell us which tree holds a ref on a block,
|
|
|
|
* but it is possible for the tree root field in the reference to
|
|
|
|
* reflect the original root before a snapshot was made. In this
|
|
|
|
* case we should search through all the children of a given root
|
|
|
|
* to find potential holders of references on a block.
|
|
|
|
*
|
|
|
|
* Instead, we do something a little less fancy and just search
|
|
|
|
* all the roots for a given key/block combination.
|
|
|
|
*/
|
|
|
|
static int find_root_for_ref(struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_key *key0,
|
|
|
|
int level,
|
|
|
|
int file_key,
|
|
|
|
struct btrfs_root **found_root,
|
|
|
|
u64 bytenr)
|
|
|
|
{
|
|
|
|
struct btrfs_key root_location;
|
|
|
|
struct btrfs_root *cur_root = *found_root;
|
|
|
|
struct btrfs_file_extent_item *file_extent;
|
|
|
|
u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
|
|
|
|
u64 found_bytenr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
root_location.offset = (u64)-1;
|
|
|
|
root_location.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
path->lowest_level = level;
|
|
|
|
path->reada = 0;
|
|
|
|
while(1) {
|
|
|
|
ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
|
|
|
|
found_bytenr = 0;
|
|
|
|
if (ret == 0 && file_key) {
|
|
|
|
struct extent_buffer *leaf = path->nodes[0];
|
|
|
|
file_extent = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
if (btrfs_file_extent_type(leaf, file_extent) ==
|
|
|
|
BTRFS_FILE_EXTENT_REG) {
|
|
|
|
found_bytenr =
|
|
|
|
btrfs_file_extent_disk_bytenr(leaf,
|
|
|
|
file_extent);
|
|
|
|
}
|
2008-05-09 19:46:48 +04:00
|
|
|
} else if (!file_key) {
|
2008-05-08 21:26:18 +04:00
|
|
|
if (path->nodes[level])
|
|
|
|
found_bytenr = path->nodes[level]->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_release_path(cur_root, path);
|
|
|
|
|
|
|
|
if (found_bytenr == bytenr) {
|
|
|
|
*found_root = cur_root;
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = btrfs_search_root(root->fs_info->tree_root,
|
|
|
|
root_search_start, &root_search_start);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
root_location.objectid = root_search_start;
|
|
|
|
cur_root = btrfs_read_fs_root_no_name(root->fs_info,
|
|
|
|
&root_location);
|
|
|
|
if (!cur_root) {
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
path->lowest_level = 0;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-03 17:08:48 +03:00
|
|
|
/*
|
|
|
|
* note, this releases the path
|
|
|
|
*/
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline relocate_one_reference(struct btrfs_root *extent_root,
|
2007-12-22 00:27:24 +03:00
|
|
|
struct btrfs_path *path,
|
2008-05-24 22:04:53 +04:00
|
|
|
struct btrfs_key *extent_key,
|
|
|
|
u64 *last_file_objectid,
|
|
|
|
u64 *last_file_offset,
|
|
|
|
u64 *last_file_root,
|
|
|
|
u64 last_extent)
|
2007-12-22 00:27:24 +03:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct btrfs_root *found_root;
|
2008-05-08 21:26:18 +04:00
|
|
|
struct btrfs_key root_location;
|
|
|
|
struct btrfs_key found_key;
|
2008-01-03 17:08:48 +03:00
|
|
|
struct btrfs_extent_ref *ref;
|
|
|
|
u64 ref_root;
|
|
|
|
u64 ref_gen;
|
|
|
|
u64 ref_objectid;
|
|
|
|
u64 ref_offset;
|
2007-12-22 00:27:24 +03:00
|
|
|
int ret;
|
2008-05-08 21:26:18 +04:00
|
|
|
int level;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
|
|
|
|
|
2008-01-03 17:08:48 +03:00
|
|
|
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_extent_ref);
|
|
|
|
ref_root = btrfs_ref_root(path->nodes[0], ref);
|
|
|
|
ref_gen = btrfs_ref_generation(path->nodes[0], ref);
|
|
|
|
ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
|
|
|
|
ref_offset = btrfs_ref_offset(path->nodes[0], ref);
|
|
|
|
btrfs_release_path(extent_root, path);
|
|
|
|
|
2008-05-08 21:26:18 +04:00
|
|
|
root_location.objectid = ref_root;
|
2007-12-22 00:27:24 +03:00
|
|
|
if (ref_gen == 0)
|
2008-05-08 21:26:18 +04:00
|
|
|
root_location.offset = 0;
|
2007-12-22 00:27:24 +03:00
|
|
|
else
|
2008-05-08 21:26:18 +04:00
|
|
|
root_location.offset = (u64)-1;
|
|
|
|
root_location.type = BTRFS_ROOT_ITEM_KEY;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
|
2008-05-08 21:26:18 +04:00
|
|
|
&root_location);
|
2007-12-22 00:27:24 +03:00
|
|
|
BUG_ON(!found_root);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_unlock(&extent_root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
|
2008-05-08 21:26:18 +04:00
|
|
|
found_key.objectid = ref_objectid;
|
|
|
|
found_key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
found_key.offset = ref_offset;
|
|
|
|
level = 0;
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (last_extent == extent_key->objectid &&
|
|
|
|
*last_file_objectid == ref_objectid &&
|
|
|
|
*last_file_offset == ref_offset &&
|
|
|
|
*last_file_root == ref_root)
|
|
|
|
goto out;
|
|
|
|
|
2008-05-08 21:26:18 +04:00
|
|
|
ret = find_root_for_ref(extent_root, path, &found_key,
|
|
|
|
level, 1, &found_root,
|
|
|
|
extent_key->objectid);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (last_extent == extent_key->objectid &&
|
|
|
|
*last_file_objectid == ref_objectid &&
|
|
|
|
*last_file_offset == ref_offset &&
|
|
|
|
*last_file_root == ref_root)
|
|
|
|
goto out;
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
inode = btrfs_iget_locked(extent_root->fs_info->sb,
|
|
|
|
ref_objectid, found_root);
|
|
|
|
if (inode->i_state & I_NEW) {
|
|
|
|
/* the inode and parent dir are two different roots */
|
|
|
|
BTRFS_I(inode)->root = found_root;
|
|
|
|
BTRFS_I(inode)->location.objectid = ref_objectid;
|
|
|
|
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
BTRFS_I(inode)->location.offset = 0;
|
|
|
|
btrfs_read_locked_inode(inode);
|
|
|
|
unlock_new_inode(inode);
|
|
|
|
|
|
|
|
}
|
|
|
|
/* this can happen if the reference is not against
|
|
|
|
* the latest version of the tree root
|
|
|
|
*/
|
2008-07-08 22:19:17 +04:00
|
|
|
if (is_bad_inode(inode))
|
2007-12-22 00:27:24 +03:00
|
|
|
goto out;
|
2008-07-08 22:19:17 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
*last_file_objectid = inode->i_ino;
|
|
|
|
*last_file_root = found_root->root_key.objectid;
|
|
|
|
*last_file_offset = ref_offset;
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
relocate_inode_pages(inode, ref_offset, extent_key->offset);
|
|
|
|
iput(inode);
|
|
|
|
} else {
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct extent_buffer *eb;
|
2008-07-08 22:19:17 +04:00
|
|
|
int needs_lock = 0;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
eb = read_tree_block(found_root, extent_key->objectid,
|
2008-05-12 20:59:19 +04:00
|
|
|
extent_key->offset, 0);
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_lock(eb);
|
2007-12-22 00:27:24 +03:00
|
|
|
level = btrfs_header_level(eb);
|
|
|
|
|
|
|
|
if (level == 0)
|
|
|
|
btrfs_item_key_to_cpu(eb, &found_key, 0);
|
|
|
|
else
|
|
|
|
btrfs_node_key_to_cpu(eb, &found_key, 0);
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
btrfs_tree_unlock(eb);
|
2007-12-22 00:27:24 +03:00
|
|
|
free_extent_buffer(eb);
|
|
|
|
|
2008-05-08 21:26:18 +04:00
|
|
|
ret = find_root_for_ref(extent_root, path, &found_key,
|
|
|
|
level, 0, &found_root,
|
|
|
|
extent_key->objectid);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
/*
|
|
|
|
* right here almost anything could happen to our key,
|
|
|
|
* but that's ok. The cow below will either relocate it
|
|
|
|
* or someone else will have relocated it. Either way,
|
|
|
|
* it is in a different spot than it was before and
|
|
|
|
* we're happy.
|
|
|
|
*/
|
|
|
|
|
2008-05-08 21:26:18 +04:00
|
|
|
trans = btrfs_start_transaction(found_root, 1);
|
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
if (found_root == extent_root->fs_info->extent_root ||
|
|
|
|
found_root == extent_root->fs_info->chunk_root ||
|
|
|
|
found_root == extent_root->fs_info->dev_root) {
|
|
|
|
needs_lock = 1;
|
|
|
|
mutex_lock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
}
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
path->lowest_level = level;
|
2008-01-02 18:01:11 +03:00
|
|
|
path->reada = 2;
|
2007-12-22 00:27:24 +03:00
|
|
|
ret = btrfs_search_slot(trans, found_root, &found_key, path,
|
|
|
|
0, 1);
|
|
|
|
path->lowest_level = 0;
|
|
|
|
btrfs_release_path(found_root, path);
|
2008-07-08 22:19:17 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
if (found_root == found_root->fs_info->extent_root)
|
|
|
|
btrfs_extent_post_op(trans, found_root);
|
2008-07-08 22:19:17 +04:00
|
|
|
if (needs_lock)
|
|
|
|
mutex_unlock(&extent_root->fs_info->alloc_mutex);
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
btrfs_end_transaction(trans, found_root);
|
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
out:
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&extent_root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
static int noinline del_extent_zero(struct btrfs_root *extent_root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_key *extent_key)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(extent_root, 1);
|
|
|
|
ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
ret = btrfs_del_item(trans, extent_root, path);
|
|
|
|
out:
|
|
|
|
btrfs_end_transaction(trans, extent_root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-03 18:01:48 +03:00
|
|
|
static int noinline relocate_one_extent(struct btrfs_root *extent_root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_key *extent_key)
|
2007-12-22 00:27:24 +03:00
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
struct extent_buffer *leaf;
|
2008-05-24 22:04:53 +04:00
|
|
|
u64 last_file_objectid = 0;
|
|
|
|
u64 last_file_root = 0;
|
|
|
|
u64 last_file_offset = (u64)-1;
|
|
|
|
u64 last_extent = 0;
|
2007-12-22 00:27:24 +03:00
|
|
|
u32 nritems;
|
|
|
|
u32 item_size;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
if (extent_key->objectid == 0) {
|
|
|
|
ret = del_extent_zero(extent_root, path, extent_key);
|
|
|
|
goto out;
|
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
key.objectid = extent_key->objectid;
|
|
|
|
key.type = BTRFS_EXTENT_REF_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
2008-05-07 19:43:44 +04:00
|
|
|
if (path->slots[0] == nritems) {
|
|
|
|
ret = btrfs_next_leaf(extent_root, path);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2008-05-08 21:26:18 +04:00
|
|
|
leaf = path->nodes[0];
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2008-05-07 19:43:44 +04:00
|
|
|
if (found_key.objectid != extent_key->objectid) {
|
2007-12-22 00:27:24 +03:00
|
|
|
break;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
if (found_key.type != BTRFS_EXTENT_REF_KEY) {
|
2007-12-22 00:27:24 +03:00
|
|
|
break;
|
2008-05-07 19:43:44 +04:00
|
|
|
}
|
2007-12-22 00:27:24 +03:00
|
|
|
|
|
|
|
key.offset = found_key.offset + 1;
|
|
|
|
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
ret = relocate_one_reference(extent_root, path, extent_key,
|
|
|
|
&last_file_objectid,
|
|
|
|
&last_file_offset,
|
|
|
|
&last_file_root, last_extent);
|
2007-12-22 00:27:24 +03:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2008-05-24 22:04:53 +04:00
|
|
|
last_extent = extent_key->objectid;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
btrfs_release_path(extent_root, path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-28 23:29:52 +04:00
|
|
|
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
|
|
|
|
{
|
|
|
|
u64 num_devices;
|
|
|
|
u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
|
|
|
|
|
2008-05-07 19:43:44 +04:00
|
|
|
num_devices = root->fs_info->fs_devices->num_devices;
|
2008-04-28 23:29:52 +04:00
|
|
|
if (num_devices == 1) {
|
|
|
|
stripped |= BTRFS_BLOCK_GROUP_DUP;
|
|
|
|
stripped = flags & ~stripped;
|
|
|
|
|
|
|
|
/* turn raid0 into single device chunks */
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_RAID0)
|
|
|
|
return stripped;
|
|
|
|
|
|
|
|
/* turn mirroring into duplication */
|
|
|
|
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID10))
|
|
|
|
return stripped | BTRFS_BLOCK_GROUP_DUP;
|
|
|
|
return flags;
|
|
|
|
} else {
|
|
|
|
/* they already had raid on here, just return */
|
|
|
|
if (flags & stripped)
|
|
|
|
return flags;
|
|
|
|
|
|
|
|
stripped |= BTRFS_BLOCK_GROUP_DUP;
|
|
|
|
stripped = flags & ~stripped;
|
|
|
|
|
|
|
|
/* switch duplicated blocks with raid1 */
|
|
|
|
if (flags & BTRFS_BLOCK_GROUP_DUP)
|
|
|
|
return stripped | BTRFS_BLOCK_GROUP_RAID1;
|
|
|
|
|
|
|
|
/* turn single device chunks into raid0 */
|
|
|
|
return stripped | BTRFS_BLOCK_GROUP_RAID0;
|
|
|
|
}
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
int __alloc_chunk_for_shrink(struct btrfs_root *root,
|
|
|
|
struct btrfs_block_group_cache *shrink_block_group,
|
|
|
|
int force)
|
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
u64 new_alloc_flags;
|
|
|
|
u64 calc;
|
|
|
|
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&shrink_block_group->lock);
|
2008-05-24 22:04:53 +04:00
|
|
|
if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&shrink_block_group->lock);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-07-23 07:06:41 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
trans = btrfs_start_transaction(root, 1);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock(&shrink_block_group->lock);
|
2008-07-08 22:19:17 +04:00
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
new_alloc_flags = update_block_group_flags(root,
|
|
|
|
shrink_block_group->flags);
|
|
|
|
if (new_alloc_flags != shrink_block_group->flags) {
|
|
|
|
calc =
|
|
|
|
btrfs_block_group_used(&shrink_block_group->item);
|
|
|
|
} else {
|
|
|
|
calc = shrink_block_group->key.offset;
|
|
|
|
}
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_unlock(&shrink_block_group->lock);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
do_chunk_alloc(trans, root->fs_info->extent_root,
|
|
|
|
calc + 2 * 1024 * 1024, new_alloc_flags, force);
|
2008-07-08 22:19:17 +04:00
|
|
|
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-05-24 22:04:53 +04:00
|
|
|
btrfs_end_transaction(trans, root);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-07-23 07:06:41 +04:00
|
|
|
} else
|
|
|
|
spin_unlock(&shrink_block_group->lock);
|
2008-05-24 22:04:53 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
|
2007-12-22 00:27:24 +03:00
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_root *tree_root = root->fs_info->tree_root;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
u64 cur_byte;
|
|
|
|
u64 total_found;
|
2008-04-26 00:53:30 +04:00
|
|
|
u64 shrink_last_byte;
|
|
|
|
struct btrfs_block_group_cache *shrink_block_group;
|
2007-12-22 00:27:24 +03:00
|
|
|
struct btrfs_key key;
|
2008-01-03 22:14:39 +03:00
|
|
|
struct btrfs_key found_key;
|
2007-12-22 00:27:24 +03:00
|
|
|
struct extent_buffer *leaf;
|
|
|
|
u32 nritems;
|
|
|
|
int ret;
|
2008-05-07 19:43:44 +04:00
|
|
|
int progress;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-04-26 00:53:30 +04:00
|
|
|
shrink_block_group = btrfs_lookup_block_group(root->fs_info,
|
|
|
|
shrink_start);
|
|
|
|
BUG_ON(!shrink_block_group);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
shrink_last_byte = shrink_block_group->key.objectid +
|
|
|
|
shrink_block_group->key.offset;
|
2008-04-26 00:53:30 +04:00
|
|
|
|
|
|
|
shrink_block_group->space_info->total_bytes -=
|
|
|
|
shrink_block_group->key.offset;
|
2007-12-22 00:27:24 +03:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
root = root->fs_info->extent_root;
|
2008-01-02 18:01:11 +03:00
|
|
|
path->reada = 2;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-05-09 19:46:48 +04:00
|
|
|
printk("btrfs relocating block group %llu flags %llu\n",
|
|
|
|
(unsigned long long)shrink_start,
|
|
|
|
(unsigned long long)shrink_block_group->flags);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
__alloc_chunk_for_shrink(root, shrink_block_group, 1);
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
again:
|
2008-05-09 19:46:48 +04:00
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
shrink_block_group->ro = 1;
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
total_found = 0;
|
2008-05-07 19:43:44 +04:00
|
|
|
progress = 0;
|
2008-04-26 00:53:30 +04:00
|
|
|
key.objectid = shrink_start;
|
2007-12-22 00:27:24 +03:00
|
|
|
key.offset = 0;
|
|
|
|
key.type = 0;
|
2008-01-03 22:14:39 +03:00
|
|
|
cur_byte = key.objectid;
|
2008-01-03 17:08:48 +03:00
|
|
|
|
2008-08-05 07:17:27 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
|
|
|
|
btrfs_start_delalloc_inodes(root);
|
2008-08-05 21:05:02 +04:00
|
|
|
btrfs_wait_ordered_extents(tree_root, 0);
|
2008-08-05 07:17:27 +04:00
|
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
|
2008-01-03 22:14:39 +03:00
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
|
2008-01-03 22:14:39 +03:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2008-04-26 00:53:30 +04:00
|
|
|
|
2008-01-03 22:14:39 +03:00
|
|
|
if (ret == 0) {
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2008-04-26 00:53:30 +04:00
|
|
|
if (found_key.objectid + found_key.offset > shrink_start &&
|
|
|
|
found_key.objectid < shrink_last_byte) {
|
2008-01-03 22:14:39 +03:00
|
|
|
cur_byte = found_key.objectid;
|
|
|
|
key.objectid = cur_byte;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
|
|
|
|
while(1) {
|
2007-12-22 00:27:24 +03:00
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2008-01-03 22:14:39 +03:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
next:
|
2007-12-22 00:27:24 +03:00
|
|
|
leaf = path->nodes[0];
|
2008-01-03 22:14:39 +03:00
|
|
|
nritems = btrfs_header_nritems(leaf);
|
|
|
|
if (path->slots[0] >= nritems) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret == 1) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
2008-01-03 22:14:39 +03:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
nritems = btrfs_header_nritems(leaf);
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
2008-01-03 22:14:39 +03:00
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2008-01-05 00:47:16 +03:00
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
if (found_key.objectid >= shrink_last_byte)
|
|
|
|
break;
|
|
|
|
|
2008-01-05 00:47:16 +03:00
|
|
|
if (progress && need_resched()) {
|
|
|
|
memcpy(&key, &found_key, sizeof(key));
|
|
|
|
cond_resched();
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
progress = 0;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
progress = 1;
|
|
|
|
|
2008-01-03 22:14:39 +03:00
|
|
|
if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
|
|
|
|
found_key.objectid + found_key.offset <= cur_byte) {
|
2008-05-24 22:04:53 +04:00
|
|
|
memcpy(&key, &found_key, sizeof(key));
|
|
|
|
key.offset++;
|
2007-12-22 00:27:24 +03:00
|
|
|
path->slots[0]++;
|
|
|
|
goto next;
|
|
|
|
}
|
2008-01-03 22:14:39 +03:00
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
total_found++;
|
|
|
|
cur_byte = found_key.objectid + found_key.offset;
|
|
|
|
key.objectid = cur_byte;
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
ret = relocate_one_extent(root, path, &found_key);
|
2008-05-24 22:04:53 +04:00
|
|
|
__alloc_chunk_for_shrink(root, shrink_block_group, 0);
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_release_path(root, path);
|
|
|
|
|
|
|
|
if (total_found > 0) {
|
2008-05-09 19:46:48 +04:00
|
|
|
printk("btrfs relocate found %llu last extent was %llu\n",
|
|
|
|
(unsigned long long)total_found,
|
|
|
|
(unsigned long long)found_key.objectid);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
trans = btrfs_start_transaction(tree_root, 1);
|
|
|
|
btrfs_commit_transaction(trans, tree_root);
|
|
|
|
|
|
|
|
btrfs_clean_old_snapshots(tree_root);
|
|
|
|
|
2008-08-05 07:17:27 +04:00
|
|
|
btrfs_start_delalloc_inodes(root);
|
2008-08-05 21:05:02 +04:00
|
|
|
btrfs_wait_ordered_extents(tree_root, 0);
|
2008-07-24 19:57:52 +04:00
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
trans = btrfs_start_transaction(tree_root, 1);
|
|
|
|
btrfs_commit_transaction(trans, tree_root);
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
/*
|
|
|
|
* we've freed all the extents, now remove the block
|
|
|
|
* group item from the tree
|
|
|
|
*/
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
|
|
2007-12-22 00:27:24 +03:00
|
|
|
trans = btrfs_start_transaction(root, 1);
|
2008-07-23 07:06:41 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2008-04-26 00:53:30 +04:00
|
|
|
memcpy(&key, &shrink_block_group->key, sizeof(key));
|
2008-01-04 17:34:54 +03:00
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -EIO;
|
2008-07-24 20:17:14 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
btrfs_end_transaction(trans, root);
|
2008-04-26 00:53:30 +04:00
|
|
|
goto out;
|
2008-07-24 20:17:14 +04:00
|
|
|
}
|
2008-01-03 22:14:39 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
spin_lock(&root->fs_info->block_group_cache_lock);
|
|
|
|
rb_erase(&shrink_block_group->cache_node,
|
|
|
|
&root->fs_info->block_group_cache_tree);
|
|
|
|
spin_unlock(&root->fs_info->block_group_cache_lock);
|
2008-05-24 22:04:53 +04:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
|
|
|
|
key.offset);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_end_transaction(trans, root);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-08-05 07:17:26 +04:00
|
|
|
/*
|
2008-05-24 22:04:53 +04:00
|
|
|
memset(shrink_block_group, 0, sizeof(*shrink_block_group));
|
|
|
|
kfree(shrink_block_group);
|
2008-08-05 07:17:26 +04:00
|
|
|
*/
|
2008-05-24 22:04:53 +04:00
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
btrfs_del_item(trans, root, path);
|
2008-07-08 22:19:17 +04:00
|
|
|
btrfs_release_path(root, path);
|
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
btrfs_commit_transaction(trans, root);
|
2008-05-24 22:04:53 +04:00
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
|
|
2008-05-24 22:04:53 +04:00
|
|
|
/* the code to unpin extents might set a few bits in the free
|
|
|
|
* space cache for this range again
|
|
|
|
*/
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
/* XXX? */
|
|
|
|
ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
|
|
|
|
key.offset);
|
2007-12-22 00:27:24 +03:00
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2007-12-22 00:27:24 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
|
|
|
|
struct btrfs_key *key)
|
|
|
|
{
|
2008-06-26 00:01:30 +04:00
|
|
|
int ret = 0;
|
2008-03-24 22:01:56 +03:00
|
|
|
struct btrfs_key found_key;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
int slot;
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
2008-06-26 00:01:30 +04:00
|
|
|
goto out;
|
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
while(1) {
|
|
|
|
slot = path->slots[0];
|
2007-12-22 00:27:24 +03:00
|
|
|
leaf = path->nodes[0];
|
2008-03-24 22:01:56 +03:00
|
|
|
if (slot >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
if (ret < 0)
|
2008-06-26 00:01:30 +04:00
|
|
|
goto out;
|
2008-03-24 22:01:56 +03:00
|
|
|
break;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
2007-12-22 00:27:24 +03:00
|
|
|
|
2008-03-24 22:01:56 +03:00
|
|
|
if (found_key.objectid >= key->objectid &&
|
2008-06-26 00:01:30 +04:00
|
|
|
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
path->slots[0]++;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = -ENOENT;
|
2008-06-26 00:01:30 +04:00
|
|
|
out:
|
2008-03-24 22:01:56 +03:00
|
|
|
return ret;
|
2007-12-22 00:27:24 +03:00
|
|
|
}
|
|
|
|
|
2007-04-27 00:46:15 +04:00
|
|
|
int btrfs_read_block_groups(struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
int ret;
|
|
|
|
struct btrfs_block_group_cache *cache;
|
2007-05-06 18:15:01 +04:00
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
2008-03-24 22:01:59 +03:00
|
|
|
struct btrfs_space_info *space_info;
|
2007-04-27 00:46:15 +04:00
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
2007-10-16 00:14:19 +04:00
|
|
|
struct extent_buffer *leaf;
|
2007-10-16 00:15:19 +04:00
|
|
|
|
2007-05-06 18:15:01 +04:00
|
|
|
root = info->extent_root;
|
2007-04-27 00:46:15 +04:00
|
|
|
key.objectid = 0;
|
2008-03-24 22:01:56 +03:00
|
|
|
key.offset = 0;
|
2007-04-27 00:46:15 +04:00
|
|
|
btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
2007-04-27 00:46:15 +04:00
|
|
|
while(1) {
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = find_first_block_group(root, path, &key);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto error;
|
2007-04-27 00:46:15 +04:00
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
if (ret != 0)
|
|
|
|
goto error;
|
|
|
|
|
2007-10-16 00:14:19 +04:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
|
2008-04-26 00:53:30 +04:00
|
|
|
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
2007-04-27 00:46:15 +04:00
|
|
|
if (!cache) {
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = -ENOMEM;
|
2007-04-27 00:46:15 +04:00
|
|
|
break;
|
|
|
|
}
|
2007-05-08 04:03:49 +04:00
|
|
|
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock_init(&cache->lock);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
INIT_LIST_HEAD(&cache->list);
|
2007-10-16 00:14:19 +04:00
|
|
|
read_extent_buffer(leaf, &cache->item,
|
|
|
|
btrfs_item_ptr_offset(leaf, path->slots[0]),
|
|
|
|
sizeof(cache->item));
|
2007-04-27 00:46:15 +04:00
|
|
|
memcpy(&cache->key, &found_key, sizeof(found_key));
|
2008-03-24 22:01:56 +03:00
|
|
|
|
2007-04-27 00:46:15 +04:00
|
|
|
key.objectid = found_key.objectid + found_key.offset;
|
|
|
|
btrfs_release_path(root, path);
|
2008-03-24 22:01:56 +03:00
|
|
|
cache->flags = btrfs_block_group_flags(&cache->item);
|
2007-10-16 00:15:19 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
ret = update_space_info(info, cache->flags, found_key.offset,
|
|
|
|
btrfs_block_group_used(&cache->item),
|
|
|
|
&space_info);
|
|
|
|
BUG_ON(ret);
|
|
|
|
cache->space_info = space_info;
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
spin_lock(&space_info->lock);
|
|
|
|
list_add(&cache->list, &space_info->block_groups);
|
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
|
|
|
|
ret = btrfs_add_block_group_cache(root->fs_info, cache);
|
|
|
|
BUG_ON(ret);
|
2008-03-24 22:01:59 +03:00
|
|
|
|
2007-04-27 00:46:15 +04:00
|
|
|
if (key.objectid >=
|
2007-10-16 00:15:53 +04:00
|
|
|
btrfs_super_total_bytes(&info->super_copy))
|
2007-04-27 00:46:15 +04:00
|
|
|
break;
|
|
|
|
}
|
2008-03-24 22:01:56 +03:00
|
|
|
ret = 0;
|
|
|
|
error:
|
2007-04-27 00:46:15 +04:00
|
|
|
btrfs_free_path(path);
|
2008-06-26 00:01:30 +04:00
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
2008-03-24 22:01:56 +03:00
|
|
|
return ret;
|
2007-04-27 00:46:15 +04:00
|
|
|
}
|
2008-03-24 22:01:59 +03:00
|
|
|
|
|
|
|
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, u64 bytes_used,
|
2008-04-15 23:41:47 +04:00
|
|
|
u64 type, u64 chunk_objectid, u64 chunk_offset,
|
2008-03-24 22:01:59 +03:00
|
|
|
u64 size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_root *extent_root;
|
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
|
|
2008-07-08 22:19:17 +04:00
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
2008-03-24 22:01:59 +03:00
|
|
|
extent_root = root->fs_info->extent_root;
|
|
|
|
|
2008-09-06 00:13:11 +04:00
|
|
|
root->fs_info->last_trans_new_blockgroup = trans->transid;
|
|
|
|
|
2008-04-26 00:53:30 +04:00
|
|
|
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
if (!cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-04-15 23:41:47 +04:00
|
|
|
cache->key.objectid = chunk_offset;
|
2008-03-24 22:01:59 +03:00
|
|
|
cache->key.offset = size;
|
2008-07-23 07:06:41 +04:00
|
|
|
spin_lock_init(&cache->lock);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
INIT_LIST_HEAD(&cache->list);
|
2008-03-24 22:01:59 +03:00
|
|
|
btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
|
2008-05-24 22:04:53 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
btrfs_set_block_group_used(&cache->item, bytes_used);
|
|
|
|
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
|
|
|
|
cache->flags = type;
|
|
|
|
btrfs_set_block_group_flags(&cache->item, type);
|
|
|
|
|
|
|
|
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
|
|
|
|
&cache->space_info);
|
|
|
|
BUG_ON(ret);
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
spin_lock(&cache->space_info->lock);
|
|
|
|
list_add(&cache->list, &cache->space_info->block_groups);
|
|
|
|
spin_unlock(&cache->space_info->lock);
|
2008-03-24 22:01:59 +03:00
|
|
|
|
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two
rb-trees per block group to track free space areas via offset and size. The
reason to do this is because most allocations come with a hint byte where to
start, so we can usually find a chunk of free space at that hint byte to satisfy
the allocation and get good space packing. If we cannot find free space at or
after the given offset we fall back on looking for a chunk of the given size as
close to that given offset as possible. When we fall back on the size search we
also try to find a slot as close to the size we want as possible, to avoid
breaking small chunks off of huge areas if possible.
2) remove the extent_io_tree that tracked the block group cache from fs_info and
replaced it with an rb-tree thats tracks block group cache via offset. also
added a per space_info list that tracks the block group cache for the particular
space so we can lookup related block groups easily.
3) cleaned up the allocation code to make it a little easier to read and a
little less complicated. Basically there are 3 steps, first look from our
provided hint. If we couldn't find from that given hint, start back at our
original search start and look for space from there. If that fails try to
allocate space if we can and start looking again. If not we're screwed and need
to start over again.
4) small fixes. there were some issues in volumes.c where we wouldn't allocate
the rest of the disk. fixed cow_file_range to actually pass the alloc_hint,
which has helped a good bit in making the fs_mark test I run have semi-normal
results as we run out of space. Generally with data allocations we don't track
where we last allocated from, so everytime we did a data allocation we'd search
through every block group that we have looking for free space. Now searching a
block group with no free space isn't terribly time consuming, it was causing a
slight degradation as we got more data block groups. The alloc_hint has fixed
this slight degredation and made things semi-normal.
There is still one nagging problem I'm working on where we will get ENOSPC when
there is definitely plenty of space. This only happens with metadata
allocations, and only when we are almost full. So you generally hit the 85%
mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm
still tracking it down, but until then this seems to be pretty stable and make a
significant performance gain.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-09-23 21:14:11 +04:00
|
|
|
ret = btrfs_add_block_group_cache(root->fs_info, cache);
|
|
|
|
BUG_ON(ret);
|
2008-07-23 07:06:41 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
|
|
|
|
sizeof(cache->item));
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
finish_current_insert(trans, extent_root);
|
|
|
|
ret = del_pending_extents(trans, extent_root);
|
|
|
|
BUG_ON(ret);
|
2008-04-04 23:40:00 +04:00
|
|
|
set_avail_alloc_bits(extent_root->fs_info, type);
|
2008-06-26 00:01:30 +04:00
|
|
|
|
2008-03-24 22:01:59 +03:00
|
|
|
return 0;
|
|
|
|
}
|