Factor out create_chunk() from btrfs_alloc_chunk(). This new function
creates a chunk.

There is no functional changes.

Signed-off-by: Naohiro Aota <naohiro.a...@wdc.com>
---
 kernel-shared/volumes.c | 217 ++++++++++++++++++++++------------------
 1 file changed, 120 insertions(+), 97 deletions(-)

diff --git a/kernel-shared/volumes.c b/kernel-shared/volumes.c
index 95b42eab846d..a409dd3d0366 100644
--- a/kernel-shared/volumes.c
+++ b/kernel-shared/volumes.c
@@ -149,6 +149,7 @@ const struct btrfs_raid_attr 
btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
 };
 
 struct alloc_chunk_ctl {
+       u64 start;
        u64 type;
        int num_stripes;
        int max_stripes;
@@ -156,6 +157,7 @@ struct alloc_chunk_ctl {
        int sub_stripes;
        u64 calc_size;
        u64 min_stripe_size;
+       u64 num_bytes;
        u64 max_chunk_size;
        int stripe_len;
        int total_devs;
@@ -1118,88 +1120,23 @@ static int decide_stripe_size(struct btrfs_fs_info 
*info,
        }
 }
 
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-                     struct btrfs_fs_info *info, u64 *start,
-                     u64 *num_bytes, u64 type)
+static int create_chunk(struct btrfs_trans_handle *trans,
+                       struct btrfs_fs_info *info, struct alloc_chunk_ctl *ctl,
+                       struct list_head *private_devs)
 {
-       u64 dev_offset;
        struct btrfs_root *extent_root = info->extent_root;
        struct btrfs_root *chunk_root = info->chunk_root;
        struct btrfs_stripe *stripes;
        struct btrfs_device *device = NULL;
        struct btrfs_chunk *chunk;
-       struct list_head private_devs;
        struct list_head *dev_list = &info->fs_devices->devices;
        struct list_head *cur;
        struct map_lookup *map;
-       u64 min_free;
-       u64 avail = 0;
-       u64 max_avail = 0;
-       struct alloc_chunk_ctl ctl;
-       int looped = 0;
        int ret;
        int index;
        struct btrfs_key key;
        u64 offset;
 
-       if (list_empty(dev_list)) {
-               return -ENOSPC;
-       }
-
-       ctl.type = type;
-       init_alloc_chunk_ctl(info, &ctl);
-       if (ctl.num_stripes < ctl.min_stripes)
-               return -ENOSPC;
-
-again:
-       ret = decide_stripe_size(info, &ctl);
-       if (ret < 0)
-               return ret;
-
-       INIT_LIST_HEAD(&private_devs);
-       cur = dev_list->next;
-       index = 0;
-
-       if (type & BTRFS_BLOCK_GROUP_DUP)
-               min_free = ctl.calc_size * 2;
-       else
-               min_free = ctl.calc_size;
-
-       /* build a private list of devices we will allocate from */
-       while(index < ctl.num_stripes) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
-               ret = btrfs_device_avail_bytes(trans, device, &avail);
-               if (ret)
-                       return ret;
-               cur = cur->next;
-               if (avail >= min_free) {
-                       list_move(&device->dev_list, &private_devs);
-                       index++;
-                       if (type & BTRFS_BLOCK_GROUP_DUP)
-                               index++;
-               } else if (avail > max_avail)
-                       max_avail = avail;
-               if (cur == dev_list)
-                       break;
-       }
-       if (index < ctl.num_stripes) {
-               list_splice(&private_devs, dev_list);
-               if (index >= ctl.min_stripes) {
-                       ctl.num_stripes = index;
-                       if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-                               ctl.num_stripes /= ctl.sub_stripes;
-                               ctl.num_stripes *= ctl.sub_stripes;
-                       }
-                       looped = 1;
-                       goto again;
-               }
-               if (!looped && max_avail > 0) {
-                       looped = 1;
-                       ctl.calc_size = max_avail;
-                       goto again;
-               }
-               return -ENOSPC;
-       }
        ret = find_next_chunk(info, &offset);
        if (ret)
                return ret;
@@ -1207,36 +1144,38 @@ again:
        key.type = BTRFS_CHUNK_ITEM_KEY;
        key.offset = offset;
 
-       chunk = kmalloc(btrfs_chunk_item_size(ctl.num_stripes), GFP_NOFS);
+       chunk = kmalloc(btrfs_chunk_item_size(ctl->num_stripes), GFP_NOFS);
        if (!chunk)
                return -ENOMEM;
 
-       map = kmalloc(btrfs_map_lookup_size(ctl.num_stripes), GFP_NOFS);
+       map = kmalloc(btrfs_map_lookup_size(ctl->num_stripes), GFP_NOFS);
        if (!map) {
                kfree(chunk);
                return -ENOMEM;
        }
 
        stripes = &chunk->stripe;
-       *num_bytes = chunk_bytes_by_type(type, ctl.calc_size, &ctl);
+       ctl->num_bytes = chunk_bytes_by_type(ctl->type, ctl->calc_size, ctl);
        index = 0;
-       while(index < ctl.num_stripes) {
+       while (index < ctl->num_stripes) {
+               u64 dev_offset;
                struct btrfs_stripe *stripe;
-               BUG_ON(list_empty(&private_devs));
-               cur = private_devs.next;
+
+               BUG_ON(list_empty(private_devs));
+               cur = private_devs->next;
                device = list_entry(cur, struct btrfs_device, dev_list);
 
                /* loop over this device again if we're doing a dup group */
-               if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
-                   (index == ctl.num_stripes - 1))
+               if (!(ctl->type & BTRFS_BLOCK_GROUP_DUP) ||
+                   (index == ctl->num_stripes - 1))
                        list_move(&device->dev_list, dev_list);
 
                ret = btrfs_alloc_dev_extent(trans, device, key.offset,
-                            ctl.calc_size, &dev_offset);
+                            ctl->calc_size, &dev_offset);
                if (ret < 0)
                        goto out_chunk_map;
 
-               device->bytes_used += ctl.calc_size;
+               device->bytes_used += ctl->calc_size;
                ret = btrfs_update_device(trans, device);
                if (ret < 0)
                        goto out_chunk_map;
@@ -1249,41 +1188,41 @@ again:
                memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
                index++;
        }
-       BUG_ON(!list_empty(&private_devs));
+       BUG_ON(!list_empty(private_devs));
 
        /* key was set above */
-       btrfs_set_stack_chunk_length(chunk, *num_bytes);
+       btrfs_set_stack_chunk_length(chunk, ctl->num_bytes);
        btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
-       btrfs_set_stack_chunk_stripe_len(chunk, ctl.stripe_len);
-       btrfs_set_stack_chunk_type(chunk, type);
-       btrfs_set_stack_chunk_num_stripes(chunk, ctl.num_stripes);
-       btrfs_set_stack_chunk_io_align(chunk, ctl.stripe_len);
-       btrfs_set_stack_chunk_io_width(chunk, ctl.stripe_len);
+       btrfs_set_stack_chunk_stripe_len(chunk, ctl->stripe_len);
+       btrfs_set_stack_chunk_type(chunk, ctl->type);
+       btrfs_set_stack_chunk_num_stripes(chunk, ctl->num_stripes);
+       btrfs_set_stack_chunk_io_align(chunk, ctl->stripe_len);
+       btrfs_set_stack_chunk_io_width(chunk, ctl->stripe_len);
        btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
-       btrfs_set_stack_chunk_sub_stripes(chunk, ctl.sub_stripes);
+       btrfs_set_stack_chunk_sub_stripes(chunk, ctl->sub_stripes);
        map->sector_size = info->sectorsize;
-       map->stripe_len = ctl.stripe_len;
-       map->io_align = ctl.stripe_len;
-       map->io_width = ctl.stripe_len;
-       map->type = type;
-       map->num_stripes = ctl.num_stripes;
-       map->sub_stripes = ctl.sub_stripes;
+       map->stripe_len = ctl->stripe_len;
+       map->io_align = ctl->stripe_len;
+       map->io_width = ctl->stripe_len;
+       map->type = ctl->type;
+       map->num_stripes = ctl->num_stripes;
+       map->sub_stripes = ctl->sub_stripes;
 
        ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
-                               btrfs_chunk_item_size(ctl.num_stripes));
+                               btrfs_chunk_item_size(ctl->num_stripes));
        BUG_ON(ret);
-       *start = key.offset;;
+       ctl->start = key.offset;
 
        map->ce.start = key.offset;
-       map->ce.size = *num_bytes;
+       map->ce.size = ctl->num_bytes;
 
        ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
        if (ret < 0)
                goto out_chunk_map;
 
-       if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+       if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) {
                ret = btrfs_add_system_chunk(info, &key,
-                           chunk, btrfs_chunk_item_size(ctl.num_stripes));
+                           chunk, btrfs_chunk_item_size(ctl->num_stripes));
                if (ret < 0)
                        goto out_chunk;
        }
@@ -1298,6 +1237,90 @@ out_chunk:
        return ret;
 }
 
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                     struct btrfs_fs_info *info, u64 *start,
+                     u64 *num_bytes, u64 type)
+{
+       struct btrfs_device *device = NULL;
+       struct list_head private_devs;
+       struct list_head *dev_list = &info->fs_devices->devices;
+       struct list_head *cur;
+       u64 min_free;
+       u64 avail = 0;
+       u64 max_avail = 0;
+       struct alloc_chunk_ctl ctl;
+       int looped = 0;
+       int ret;
+       int index;
+
+       if (list_empty(dev_list))
+               return -ENOSPC;
+
+       ctl.type = type;
+       /* start and num_bytes will be set by create_chunk() */
+       ctl.start = 0;
+       ctl.num_bytes = 0;
+       init_alloc_chunk_ctl(info, &ctl);
+       if (ctl.num_stripes < ctl.min_stripes)
+               return -ENOSPC;
+
+again:
+       ret = decide_stripe_size(info, &ctl);
+       if (ret < 0)
+               return ret;
+
+       INIT_LIST_HEAD(&private_devs);
+       cur = dev_list->next;
+       index = 0;
+
+       if (type & BTRFS_BLOCK_GROUP_DUP)
+               min_free = ctl.calc_size * 2;
+       else
+               min_free = ctl.calc_size;
+
+       /* build a private list of devices we will allocate from */
+       while (index < ctl.num_stripes) {
+               device = list_entry(cur, struct btrfs_device, dev_list);
+               ret = btrfs_device_avail_bytes(trans, device, &avail);
+               if (ret)
+                       return ret;
+               cur = cur->next;
+               if (avail >= min_free) {
+                       list_move(&device->dev_list, &private_devs);
+                       index++;
+                       if (type & BTRFS_BLOCK_GROUP_DUP)
+                               index++;
+               } else if (avail > max_avail)
+                       max_avail = avail;
+               if (cur == dev_list)
+                       break;
+       }
+       if (index < ctl.num_stripes) {
+               list_splice(&private_devs, dev_list);
+               if (index >= ctl.min_stripes) {
+                       ctl.num_stripes = index;
+                       if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+                               ctl.num_stripes /= ctl.sub_stripes;
+                               ctl.num_stripes *= ctl.sub_stripes;
+                       }
+                       looped = 1;
+                       goto again;
+               }
+               if (!looped && max_avail > 0) {
+                       looped = 1;
+                       ctl.calc_size = max_avail;
+                       goto again;
+               }
+               return -ENOSPC;
+       }
+
+       ret = create_chunk(trans, info, &ctl, &private_devs);
+       *start = ctl.start;
+       *num_bytes = ctl.num_bytes;
+
+       return ret;
+}
+
 /*
  * Alloc a DATA chunk with SINGLE profile.
  *
-- 
2.31.1

Reply via email to