summaryrefslogtreecommitdiff
path: root/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-03-24 15:03:18 -0400
committerDavid Woodhouse <dwmw2@hera.kernel.org>2008-03-24 15:03:18 -0400
commit510be296772108b6d5ae829d76cef2b1a1690262 (patch)
tree39cfcefb9891552b388a04e0b5c05b1228f1716e /extent-tree.c
parent00eb6ff6566816982903eb8c817606d6961c61ea (diff)
Add support for multiple devices per filesystem
Diffstat (limited to 'extent-tree.c')
-rw-r--r--extent-tree.c423
1 files changed, 146 insertions, 277 deletions
diff --git a/extent-tree.c b/extent-tree.c
index c907fd8a..2a47827d 100644
--- a/extent-tree.c
+++ b/extent-tree.c
@@ -25,37 +25,19 @@
#include "print-tree.h"
#include "transaction.h"
#include "crc32c.h"
+#include "volumes.h"
-#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
+#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
+#define BLOCK_GROUP_SYSTEM EXTENT_NEW
+
#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
static int finish_current_insert(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
-static int find_previous_extent(struct btrfs_root *root,
- struct btrfs_path *path)
-{
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
- int ret;
- while(1) {
- if (path->slots[0] == 0) {
- ret = btrfs_prev_leaf(root, path);
- if (ret != 0)
- return ret;
- } else {
- path->slots[0]--;
- }
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
- return 0;
- }
- return 1;
-}
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
@@ -91,7 +73,7 @@ static int cache_block_group(struct btrfs_root *root,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
- ret = find_previous_extent(root, path);
+ ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
if (ret < 0)
return ret;
if (ret == 0) {
@@ -168,7 +150,8 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
block_group_cache = &info->block_group_cache;
ret = find_first_extent_bit(block_group_cache,
bytenr, &start, &end,
- BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA);
+ BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
+ BLOCK_GROUP_SYSTEM);
if (ret) {
return NULL;
}
@@ -182,9 +165,24 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
return block_group;
return NULL;
}
-static u64 noinline find_search_start(struct btrfs_root *root,
+
+static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+{
+ if ((bits & BLOCK_GROUP_DATA) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_DATA))
+ return 1;
+ if ((bits & BLOCK_GROUP_METADATA) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_METADATA))
+ return 1;
+ if ((bits & BLOCK_GROUP_SYSTEM) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
+ return 1;
+ return 0;
+}
+
+static int noinline find_search_start(struct btrfs_root *root,
struct btrfs_block_group_cache **cache_ret,
- u64 search_start, int num, int data)
+ u64 *start_ret, int num, int data)
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;
@@ -192,6 +190,7 @@ static u64 noinline find_search_start(struct btrfs_root *root,
u64 start = 0;
u64 end = 0;
u64 cache_miss = 0;
+ u64 search_start = *start_ret;
int wrapped = 0;
if (!cache) {
@@ -203,6 +202,9 @@ again:
goto out;
last = max(search_start, cache->key.objectid);
+ if (!block_group_bits(cache, data)) {
+ goto new_group;
+ }
while(1) {
ret = find_first_extent_bit(&root->fs_info->free_space_cache,
@@ -220,20 +222,18 @@ again:
cache_miss = start;
continue;
}
- if (data != BTRFS_BLOCK_GROUP_MIXED &&
- start + num > cache->key.objectid + cache->key.offset)
+ if (start + num > cache->key.objectid + cache->key.offset)
goto new_group;
- return start;
+ *start_ret = start;
+ return 0;
}
out:
cache = btrfs_lookup_block_group(root->fs_info, search_start);
if (!cache) {
- printk("Unable to find block group for %Lu\n",
- search_start);
+ printk("Unable to find block group for %Lu\n", search_start);
WARN_ON(1);
- return search_start;
}
- return search_start;
+ return -ENOSPC;
new_group:
last = cache->key.objectid + cache->key.offset;
@@ -244,7 +244,6 @@ no_cache:
if (!wrapped) {
wrapped = 1;
last = search_start;
- data = BTRFS_BLOCK_GROUP_MIXED;
goto wrapped;
}
goto out;
@@ -291,26 +290,18 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
int ret;
int full_search = 0;
int factor = 8;
- int data_swap = 0;
block_group_cache = &info->block_group_cache;
if (!owner)
factor = 8;
- if (data == BTRFS_BLOCK_GROUP_MIXED) {
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- factor = 10;
- } else if (data)
- bit = BLOCK_GROUP_DATA;
- else
- bit = BLOCK_GROUP_METADATA;
+ bit = data;
if (search_start) {
struct btrfs_block_group_cache *shint;
shint = btrfs_lookup_block_group(info, search_start);
- if (shint && (shint->data == data ||
- shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
+ if (shint && block_group_bits(shint, data)) {
used = btrfs_block_group_used(&shint->item);
if (used + shint->pinned <
div_factor(shint->key.offset, factor)) {
@@ -318,8 +309,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
}
}
}
- if (hint && (hint->data == data ||
- hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
+ if (hint && block_group_bits(hint, data)) {
used = btrfs_block_group_used(&hint->item);
if (used + hint->pinned <
div_factor(hint->key.offset, factor)) {
@@ -365,12 +355,6 @@ again:
full_search = 1;
goto again;
}
- if (!data_swap) {
- data_swap = 1;
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- last = search_start;
- goto again;
- }
found:
return found_group;
}
@@ -985,7 +969,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
- int mark_free, int data)
+ int mark_free)
{
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info;
@@ -1010,41 +994,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
old_val = btrfs_block_group_used(&cache->item);
num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) {
- if (cache->data != data &&
- old_val < (cache->key.offset >> 1)) {
- int bit_to_clear;
- int bit_to_set;
- cache->data = data;
- if (data) {
- bit_to_clear = BLOCK_GROUP_METADATA;
- bit_to_set = BLOCK_GROUP_DATA;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_MIXED;
- cache->item.flags |=
- BTRFS_BLOCK_GROUP_DATA;
- } else {
- bit_to_clear = BLOCK_GROUP_DATA;
- bit_to_set = BLOCK_GROUP_METADATA;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_MIXED;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_DATA;
- }
- clear_extent_bits(&info->block_group_cache,
- start, end, bit_to_clear,
- GFP_NOFS);
- set_extent_bits(&info->block_group_cache,
- start, end, bit_to_set,
- GFP_NOFS);
- } else if (cache->data != data &&
- cache->data != BTRFS_BLOCK_GROUP_MIXED) {
- cache->data = BTRFS_BLOCK_GROUP_MIXED;
- set_extent_bits(&info->block_group_cache,
- start, end,
- BLOCK_GROUP_DATA |
- BLOCK_GROUP_METADATA,
- GFP_NOFS);
- }
old_val += num_bytes;
} else {
old_val -= num_bytes;
@@ -1341,7 +1290,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
ops->free_extent(root, bytenr, num_bytes);
ret = update_block_group(trans, root, bytenr, num_bytes, 0,
- mark_free, 0);
+ mark_free);
BUG_ON(ret);
}
btrfs_free_path(path);
@@ -1434,36 +1383,21 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
u64 exclude_start, u64 exclude_nr,
int data)
{
- struct btrfs_path *path;
- struct btrfs_key key;
- u64 hole_size = 0;
- u64 aligned;
int ret;
- int slot = 0;
- u64 last_byte = 0;
u64 orig_search_start = search_start;
- int start_found;
- struct extent_buffer *l;
struct btrfs_root * root = orig_root->fs_info->extent_root;
struct btrfs_fs_info *info = root->fs_info;
u64 total_needed = num_bytes;
- int level;
struct btrfs_block_group_cache *block_group;
int full_scan = 0;
int wrapped = 0;
- u64 cached_start;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
- level = btrfs_header_level(root->node);
-
- if (num_bytes >= 32 * 1024 * 1024 && hint_byte) {
- data = BTRFS_BLOCK_GROUP_MIXED;
- }
-
if (search_end == (u64)-1)
search_end = btrfs_super_total_bytes(&info->super_copy);
+
if (hint_byte) {
block_group = btrfs_lookup_block_group(info, hint_byte);
if (!block_group)
@@ -1477,7 +1411,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
}
total_needed += empty_size;
- path = btrfs_alloc_path();
+
check_failed:
if (!block_group) {
block_group = btrfs_lookup_block_group(info, search_start);
@@ -1485,135 +1419,49 @@ check_failed:
block_group = btrfs_lookup_block_group(info,
orig_search_start);
}
- search_start = find_search_start(root, &block_group, search_start,
- total_needed, data);
- search_start = stripe_align(root, search_start);
- cached_start = search_start;
- btrfs_init_path(path);
- ins->objectid = search_start;
- ins->offset = 0;
- start_found = 0;
- path->reada = 2;
-
- ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
- if (ret < 0)
- goto error;
- ret = find_previous_extent(root, path);
- if (ret < 0)
+ ret = find_search_start(root, &block_group, &search_start,
+ total_needed, data);
+ if (ret)
goto error;
- l = path->nodes[0];
- btrfs_item_key_to_cpu(l, &key, path->slots[0]);
- while (1) {
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto error;
-
- search_start = max(search_start,
- block_group->key.objectid);
- if (!start_found) {
- aligned = stripe_align(root, search_start);
- ins->objectid = aligned;
- if (aligned >= search_end) {
- ret = -ENOSPC;
- goto error;
- }
- ins->offset = search_end - aligned;
- start_found = 1;
- goto check_pending;
- }
- ins->objectid = stripe_align(root,
- last_byte > search_start ?
- last_byte : search_start);
- if (search_end <= ins->objectid) {
- ret = -ENOSPC;
- goto error;
- }
- ins->offset = search_end - ins->objectid;
- BUG_ON(ins->objectid >= search_end);
- goto check_pending;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
-
- if (key.objectid >= search_start && key.objectid > last_byte &&
- start_found) {
- if (last_byte < search_start)
- last_byte = search_start;
- aligned = stripe_align(root, last_byte);
- hole_size = key.objectid - aligned;
- if (key.objectid > aligned && hole_size >= num_bytes) {
- ins->objectid = aligned;
- ins->offset = hole_size;
- goto check_pending;
- }
- }
- if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
- if (!start_found && btrfs_key_type(&key) ==
- BTRFS_BLOCK_GROUP_ITEM_KEY) {
- last_byte = key.objectid;
- start_found = 1;
- }
- goto next;
- }
-
- start_found = 1;
- last_byte = key.objectid + key.offset;
-
- if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
- last_byte >= block_group->key.objectid +
- block_group->key.offset) {
- btrfs_release_path(root, path);
- search_start = block_group->key.objectid +
- block_group->key.offset;
- goto new_group;
- }
-next:
- path->slots[0]++;
- cond_resched();
- }
-check_pending:
- /* we have to make sure we didn't find an extent that has already
- * been allocated by the map tree or the original allocation
- */
- btrfs_release_path(root, path);
- BUG_ON(ins->objectid < search_start);
+ search_start = stripe_align(root, search_start);
+ ins->objectid = search_start;
+ ins->offset = num_bytes;
if (ins->objectid + num_bytes >= search_end)
goto enospc;
- if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
- ins->objectid + num_bytes > block_group->
- key.objectid + block_group->key.offset) {
+
+ if (ins->objectid + num_bytes >
+ block_group->key.objectid + block_group->key.offset) {
search_start = block_group->key.objectid +
block_group->key.offset;
goto new_group;
}
+
if (test_range_bit(&info->extent_ins, ins->objectid,
ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
search_start = ins->objectid + num_bytes;
goto new_group;
}
+
if (test_range_bit(&info->pinned_extents, ins->objectid,
ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
search_start = ins->objectid + num_bytes;
goto new_group;
}
+
if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
ins->objectid < exclude_start + exclude_nr)) {
search_start = exclude_start + exclude_nr;
goto new_group;
}
- if (!data) {
+
+ if (!(data & BLOCK_GROUP_DATA)) {
block_group = btrfs_lookup_block_group(info, ins->objectid);
if (block_group)
trans->block_group = block_group;
}
ins->offset = num_bytes;
- btrfs_free_path(path);
return 0;
new_group:
@@ -1628,7 +1476,6 @@ enospc:
if (!full_scan)
total_needed -= empty_size;
full_scan = 1;
- data = BTRFS_BLOCK_GROUP_MIXED;
} else
wrapped = 1;
}
@@ -1639,8 +1486,6 @@ enospc:
goto check_failed;
error:
- btrfs_release_path(root, path);
- btrfs_free_path(path);
return ret;
}
/*
@@ -1670,6 +1515,13 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_extent_ref *ref;
struct btrfs_key keys[2];
+ if (data)
+ data = BLOCK_GROUP_DATA;
+ else if (info->force_system_allocs || root == root->fs_info->chunk_root)
+ data = BLOCK_GROUP_SYSTEM;
+ else
+ data = BLOCK_GROUP_METADATA;
+
WARN_ON(num_bytes < root->sectorsize);
if (ops && ops->alloc_extent) {
ret = ops->alloc_extent(root, num_bytes, hint_byte, ins);
@@ -1699,7 +1551,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
ins->objectid + ins->offset - 1,
EXTENT_LOCKED, GFP_NOFS);
- WARN_ON(data == 1);
goto update_block;
}
@@ -1749,8 +1600,7 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
}
update_block:
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
- data);
+ ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
if (ret) {
printk("update block group failed for %Lu %Lu\n",
ins->objectid, ins->offset);
@@ -1812,6 +1662,7 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
btrfs_free_extent(trans, root, ins.objectid, blocksize,
root->root_key.objectid, ref_generation,
0, 0, 0);
+ BUG_ON(1);
return ERR_PTR(-ENOMEM);
}
btrfs_set_buffer_uptodate(buf);
@@ -2179,11 +2030,44 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
return 0;
}
+int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret;
+ struct btrfs_key found_key;
+ struct extent_buffer *leaf;
+ int slot;
+
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ while(1) {
+ slot = path->slots[0];
+ leaf = path->nodes[0];
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto error;
+ break;
+ }
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+ if (found_key.objectid >= key->objectid &&
+ found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY)
+ return 0;
+ path->slots[0]++;
+ }
+ ret = -ENOENT;
+error:
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_root *root)
{
struct btrfs_path *path;
int ret;
- int err = 0;
int bit;
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info;
@@ -2196,25 +2080,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
root = info->extent_root;
key.objectid = 0;
- key.offset = BTRFS_BLOCK_GROUP_SIZE;
+ key.offset = 0;
btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while(1) {
- ret = btrfs_search_slot(NULL, info->extent_root,
- &key, path, 0, 0);
+ ret = find_first_block_group(root, path, &key);
+ if (ret > 0) {
+ ret = 0;
+ goto error;
+ }
if (ret != 0) {
- err = ret;
- break;
+ goto error;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = kmalloc(sizeof(*cache), GFP_NOFS);
if (!cache) {
- err = -1;
+ ret = -ENOMEM;
break;
}
@@ -2226,16 +2111,14 @@ int btrfs_read_block_groups(struct btrfs_root *root)
cache->pinned = 0;
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(root, path);
-
- if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- cache->data = BTRFS_BLOCK_GROUP_MIXED;
- } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
+ cache->flags = btrfs_block_group_flags(&cache->item);
+ bit = 0;
+ if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
bit = BLOCK_GROUP_DATA;
- cache->data = BTRFS_BLOCK_GROUP_DATA;
- } else {
+ } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+ bit = BLOCK_GROUP_SYSTEM;
+ } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
bit = BLOCK_GROUP_METADATA;
- cache->data = 0;
}
/* use EXTENT_LOCKED to prevent merging */
@@ -2249,9 +2132,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_super_total_bytes(&info->super_copy))
break;
}
-
+ ret = 0;
+error:
btrfs_free_path(path);
- return 0;
+ return ret;
}
static int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
@@ -2274,14 +2158,11 @@ static int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_make_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytes_used,
+ u64 type, u64 chunk_tree, u64 chunk_objectid,
+ u64 size)
{
- u64 group_size;
- u64 bytes_used;
- u64 total_bytes;
- u64 cur_start;
- u64 nr = 0;
int ret;
int bit;
struct btrfs_root *extent_root;
@@ -2290,46 +2171,34 @@ int btrfs_make_block_groups(struct btrfs_trans_handle *trans,
extent_root = root->fs_info->extent_root;
block_group_cache = &root->fs_info->block_group_cache;
- group_size = BTRFS_BLOCK_GROUP_SIZE;
- bytes_used = btrfs_super_bytes_used(&root->fs_info->super_copy);
- total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
-
- cur_start = 0;
- while (cur_start < total_bytes) {
- cache = malloc(sizeof(*cache));
- BUG_ON(!cache);
- cache->key.objectid = cur_start;
- cache->key.offset = group_size;
- btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
- memset(&cache->item, 0, sizeof(cache->item));
- if (nr == 0)
- btrfs_set_block_group_used(&cache->item, bytes_used);
- if (nr++ % 3) {
- bit = BLOCK_GROUP_DATA;
- cache->data = 1;
- cache->item.flags |= BTRFS_BLOCK_GROUP_DATA;
- } else {
- bit = BLOCK_GROUP_METADATA;
- cache->data = 0;
- }
- set_extent_bits(block_group_cache, cur_start,
- cur_start + group_size - 1,
- bit | EXTENT_LOCKED, GFP_NOFS);
- set_state_private(block_group_cache, cur_start,
- (unsigned long)cache);
- cur_start += group_size;
- }
- /* then insert all the items */
- cur_start = 0;
- while(cur_start < total_bytes) {
- cache = btrfs_lookup_block_group(root->fs_info, cur_start);
- BUG_ON(!cache);
- ret = btrfs_insert_block_group(trans, root, &cache->key,
- &cache->item);
- BUG_ON(ret);
- cur_start += group_size;
+ cache = malloc(sizeof(*cache));
+ BUG_ON(!cache);
+ cache->key.objectid = chunk_objectid;
+ cache->key.offset = size;
+ cache->cached = 0;
+ btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+ memset(&cache->item, 0, sizeof(cache->item));
+ btrfs_set_block_group_used(&cache->item, bytes_used);
+ btrfs_set_block_group_chunk_tree(&cache->item, chunk_tree);
+ btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
+ cache->flags = type;
+ btrfs_set_block_group_flags(&cache->item, type);
+
+ if (type & BTRFS_BLOCK_GROUP_DATA) {
+ bit = BLOCK_GROUP_DATA;
+ } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ bit = BLOCK_GROUP_SYSTEM;
+ } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
+ bit = BLOCK_GROUP_METADATA;
}
+ set_extent_bits(block_group_cache, chunk_objectid,
+ chunk_objectid + size - 1,
+ bit | EXTENT_LOCKED, GFP_NOFS);
+ set_state_private(block_group_cache, chunk_objectid,
+ (unsigned long)cache);
+ ret = btrfs_insert_block_group(trans, root, &cache->key, &cache->item);
+ BUG_ON(ret);
return 0;
}
@@ -2343,8 +2212,8 @@ u64 btrfs_hash_extent_ref(u64 root_objectid, u64 ref_generation,
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
- int mark_free, int data)
+ int mark_free)
{
return update_block_group(trans, root, bytenr, num_bytes,
- alloc, mark_free, data);
+ alloc, mark_free);
}