summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2016-01-29 13:03:13 +0800
committerDavid Sterba <dsterba@suse.com>2016-06-07 18:15:19 +0200
commit522ef705e38fdb9ae952344b454da392e60dc90d (patch)
tree2f36fd78399c4743f3db0ff701194e649a2712f1
parent9d615049d223a473f96bfcfc174f05a293cc163f (diff)
btrfs-progs: convert: Introduce function to calculate the available space
Introduce a new function, calculate_available_space() to get available space cache_tree data_chunks cache_tree. Unlike old implementation, this function will do the new work: 1) batch used ext* data space. To ensure data chunks will recovery them all. And restore the result into mkfs_cfg->convert_data_chunks for later use. 2) avoid SB and reserved space at chunk level Both batched data space or free space will not cover reserved space, like SB or the first 1M. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--btrfs-convert.c92
1 files changed, 90 insertions, 2 deletions
diff --git a/btrfs-convert.c b/btrfs-convert.c
index 9776c67a..89df815f 100644
--- a/btrfs-convert.c
+++ b/btrfs-convert.c
@@ -2664,12 +2664,100 @@ static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
return ret;
}
+static int calculate_available_space(struct btrfs_convert_context *cctx)
+{
+ struct cache_tree *used = &cctx->used;
+ struct cache_tree *data_chunks = &cctx->data_chunks;
+ struct cache_tree *free = &cctx->free;
+ struct cache_extent *cache;
+ u64 cur_off = 0;
+ /*
+ * Twice the minimal chunk size, to allow later wipe_reserved_ranges()
+ * works without need to consider overlap
+ */
+ u64 min_stripe_size = 2 * 16 * 1024 * 1024;
+ int ret;
+
+ /* Calculate data_chunks */
+ for (cache = first_cache_extent(used); cache;
+ cache = next_cache_extent(cache)) {
+ u64 cur_len;
+
+ if (cache->start + cache->size < cur_off)
+ continue;
+ if (cache->start > cur_off + min_stripe_size)
+ cur_off = cache->start;
+ cur_len = max(cache->start + cache->size - cur_off,
+ min_stripe_size);
+ ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
+ if (ret < 0)
+ goto out;
+ cur_off += cur_len;
+ }
+ /*
+ * remove reserved ranges, so we won't ever bother relocating an old
+ * filesystem extent to other place.
+ */
+ ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
+ if (ret < 0)
+ goto out;
+
+ cur_off = 0;
+ /*
+ * Calculate free space
+ * Always round up the start bytenr, to avoid metadata extent corss
+ * stripe boundary, as later mkfs_convert() won't have all the extent
+ * allocation check
+ */
+ for (cache = first_cache_extent(data_chunks); cache;
+ cache = next_cache_extent(cache)) {
+ if (cache->start < cur_off)
+ continue;
+ if (cache->start > cur_off) {
+ u64 insert_start;
+ u64 len;
+
+ len = cache->start - round_up(cur_off,
+ BTRFS_STRIPE_LEN);
+ insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
+
+ ret = add_merge_cache_extent(free, insert_start, len);
+ if (ret < 0)
+ goto out;
+ }
+ cur_off = cache->start + cache->size;
+ }
+ /* Don't forget the last range */
+ if (cctx->total_bytes > cur_off) {
+ u64 len = cctx->total_bytes - cur_off;
+ u64 insert_start;
+
+ insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
+
+ ret = add_merge_cache_extent(free, insert_start, len);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Remove reserved bytes */
+ ret = wipe_reserved_ranges(free, min_stripe_size, 0);
+out:
+ return ret;
+}
/*
- * Read used space
+ * Read used space, and since we have the used space,
+ * calcuate data_chunks and free for later mkfs
*/
static int convert_read_used_space(struct btrfs_convert_context *cctx)
{
- return cctx->convert_ops->read_used_space(cctx);
+ int ret;
+
+ ret = cctx->convert_ops->read_used_space(cctx);
+ if (ret)
+ return ret;
+
+ ret = calculate_available_space(cctx);
+ return ret;
}
static int do_convert(const char *devname, int datacsum, int packing, int noxattr,