On 09/07/2012 01:58 PM, Josh Durgin wrote:
> Might want to keep the comments about the fields
> moved to the mapping struct, but it looks good to me.
>
> Reviewed-by: Josh Durgin <[email protected]>
I'm going to leave this as-is. I'm going to definitely be
reworking all these structures in the coming weeks to more
clearly separate images from snapshots, and I'll try to
improve how things are documented as I go.
-Alex
> On 09/07/2012 06:45 AM, Alex Elder wrote:
>> Several fields in a struct rbd_dev are related to what is mapped, as
>> opposed to the actual base rbd image. If the base image is mapped
>> these are almost unneeded, but if a snapshot is mapped they describe
>> information about that snapshot.
>>
>> In some contexts this can be a little bit confusing. So group these
>> mapping-related field into a structure to make it clear what they
>> are describing.
>>
>> This also includes a minor change that rearranges the fields in the
>> in-core image header structure so that invariant fields are at the
>> top, followed by those that change.
>>
>> Signed-off-by: Alex Elder <[email protected]>
>> ---
>> drivers/block/rbd.c | 83
>> ++++++++++++++++++++++++++++-----------------------
>> 1 file changed, 45 insertions(+), 38 deletions(-)
>>
>> diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
>> index eb6b772..dff6210 100644
>> --- a/drivers/block/rbd.c
>> +++ b/drivers/block/rbd.c
>> @@ -81,13 +81,15 @@
>> * block device image metadata (in-memory version)
>> */
>> struct rbd_image_header {
>> - u64 image_size;
>> + /* These four fields never change for a given rbd image */
>> char *object_prefix;
>> __u8 obj_order;
>> __u8 crypt_type;
>> __u8 comp_type;
>> - struct ceph_snap_context *snapc;
>>
>> + /* The remaining fields need to be updated occasionally */
>> + u64 image_size;
>> + struct ceph_snap_context *snapc;
>> char *snap_names;
>> u64 *snap_sizes;
>>
>> @@ -146,6 +148,13 @@ struct rbd_snap {
>> u64 id;
>> };
>>
>> +struct rbd_mapping {
>> + char *snap_name;
>> + u64 snap_id;
>> + bool snap_exists;
>> + bool read_only;
>> +};
>> +
>> /*
>> * a single device
>> */
>> @@ -174,13 +183,8 @@ struct rbd_device {
>>
>> /* protects updating the header */
>> struct rw_semaphore header_rwsem;
>> - /* name of the snapshot this device reads from */
>> - char *snap_name;
>> - /* id of the snapshot this device reads from */
>> - u64 snap_id; /* current snapshot id */
>> - /* whether the snap_id this device reads from still exists */
>> - bool snap_exists;
>> - bool read_only;
>> +
>> + struct rbd_mapping mapping;
>>
>> struct list_head node;
>>
>> @@ -261,11 +265,11 @@ static int rbd_open(struct block_device *bdev,
>> fmode_t mode)
>> {
>> struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
>>
>> - if ((mode & FMODE_WRITE) && rbd_dev->read_only)
>> + if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
>> return -EROFS;
>>
>> rbd_get_dev(rbd_dev);
>> - set_device_ro(bdev, rbd_dev->read_only);
>> + set_device_ro(bdev, rbd_dev->mapping.read_only);
>>
>> return 0;
>> }
>> @@ -375,7 +379,7 @@ enum {
>> static match_table_t rbd_opts_tokens = {
>> /* int args above */
>> /* string args above */
>> - {Opt_read_only, "read_only"},
>> + {Opt_read_only, "mapping.read_only"},
>> {Opt_read_only, "ro"}, /* Alternate spelling */
>> {Opt_read_write, "read_write"},
>> {Opt_read_write, "rw"}, /* Alternate spelling */
>> @@ -583,13 +587,13 @@ static int rbd_header_from_disk(struct
>> rbd_image_header *header,
>> header->snap_sizes = NULL;
>> }
>>
>> - header->image_size = le64_to_cpu(ondisk->image_size);
>> header->obj_order = ondisk->options.order;
>> header->crypt_type = ondisk->options.crypt_type;
>> header->comp_type = ondisk->options.comp_type;
>>
>> /* Allocate and fill in the snapshot context */
>>
>> + header->image_size = le64_to_cpu(ondisk->image_size);
>> size = sizeof (struct ceph_snap_context);
>> size += snap_count * sizeof (header->snapc->snaps[0]);
>> header->snapc = kzalloc(size, GFP_KERNEL);
>> @@ -645,23 +649,24 @@ static int rbd_header_set_snap(struct rbd_device
>> *rbd_dev, u64 *size)
>>
>> down_write(&rbd_dev->header_rwsem);
>>
>> - if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
>> + if (!memcmp(rbd_dev->mapping.snap_name, RBD_SNAP_HEAD_NAME,
>> sizeof (RBD_SNAP_HEAD_NAME))) {
>> - rbd_dev->snap_id = CEPH_NOSNAP;
>> - rbd_dev->snap_exists = false;
>> - rbd_dev->read_only = rbd_dev->rbd_opts.read_only;
>> + rbd_dev->mapping.snap_id = CEPH_NOSNAP;
>> + rbd_dev->mapping.snap_exists = false;
>> + rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
>> if (size)
>> *size = rbd_dev->header.image_size;
>> } else {
>> u64 snap_id = 0;
>>
>> - ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name,
>> + ret = snap_by_name(&rbd_dev->header,
>> + rbd_dev->mapping.snap_name,
>> &snap_id, size);
>> if (ret < 0)
>> goto done;
>> - rbd_dev->snap_id = snap_id;
>> - rbd_dev->snap_exists = true;
>> - rbd_dev->read_only = true; /* No choice for snapshots */
>> + rbd_dev->mapping.snap_id = snap_id;
>> + rbd_dev->mapping.snap_exists = true;
>> + rbd_dev->mapping.read_only = true;
>> }
>>
>> ret = 0;
>> @@ -1532,7 +1537,7 @@ static void rbd_rq_fn(struct request_queue *q)
>> size = blk_rq_bytes(rq);
>> ofs = blk_rq_pos(rq) * SECTOR_SIZE;
>> rq_bio = rq->bio;
>> - if (do_write && rbd_dev->read_only) {
>> + if (do_write && rbd_dev->mapping.read_only) {
>> __blk_end_request_all(rq, -EROFS);
>> continue;
>> }
>> @@ -1541,7 +1546,8 @@ static void rbd_rq_fn(struct request_queue *q)
>>
>> down_read(&rbd_dev->header_rwsem);
>>
>> - if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) {
>> + if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
>> + !rbd_dev->mapping.snap_exists) {
>> up_read(&rbd_dev->header_rwsem);
>> dout("request for non-existent snapshot");
>> spin_lock_irq(q->queue_lock);
>> @@ -1595,7 +1601,7 @@ static void rbd_rq_fn(struct request_queue *q)
>> coll, cur_seg);
>> else
>> rbd_req_read(rq, rbd_dev,
>> - rbd_dev->snap_id,
>> + rbd_dev->mapping.snap_id,
>> ofs,
>> op_size, bio,
>> coll, cur_seg);
>> @@ -1767,7 +1773,7 @@ static int rbd_header_add_snap(struct rbd_device
>> *rbd_dev,
>> struct ceph_mon_client *monc;
>>
>> /* we should create a snapshot only if we're pointing at the
>> head */
>> - if (rbd_dev->snap_id != CEPH_NOSNAP)
>> + if (rbd_dev->mapping.snap_id != CEPH_NOSNAP)
>> return -EINVAL;
>>
>> monc = &rbd_dev->rbd_client->client->monc;
>> @@ -1821,7 +1827,7 @@ static int __rbd_refresh_header(struct rbd_device
>> *rbd_dev, u64 *hver)
>> down_write(&rbd_dev->header_rwsem);
>>
>> /* resized? */
>> - if (rbd_dev->snap_id == CEPH_NOSNAP) {
>> + if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
>> sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
>>
>> dout("setting size to %llu sectors", (unsigned long long)
>> size);
>> @@ -2004,7 +2010,7 @@ static ssize_t rbd_snap_show(struct device *dev,
>> {
>> struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
>>
>> - return sprintf(buf, "%s\n", rbd_dev->snap_name);
>> + return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
>> }
>>
>> static ssize_t rbd_image_refresh(struct device *dev,
>> @@ -2205,11 +2211,12 @@ static int rbd_dev_snap_devs_update(struct
>> rbd_device *rbd_dev)
>>
>> /* Existing snapshot not in the new snap context */
>>
>> - if (rbd_dev->snap_id == snap->id)
>> - rbd_dev->snap_exists = false;
>> + if (rbd_dev->mapping.snap_id == snap->id)
>> + rbd_dev->mapping.snap_exists = false;
>> __rbd_remove_snap_dev(snap);
>> dout("%ssnap id %llu has been removed\n",
>> - rbd_dev->snap_id == snap->id ? "mapped " : "",
>> + rbd_dev->mapping.snap_id == snap->id ?
>> + "mapped " : "",
>> (unsigned long long) snap->id);
>>
>> /* Done with this list entry; advance */
>> @@ -2522,18 +2529,18 @@ static int rbd_add_parse_args(struct rbd_device
>> *rbd_dev,
>> * The snapshot name is optional. If none is is supplied,
>> * we use the default value.
>> */
>> - rbd_dev->snap_name = dup_token(&buf, &len);
>> - if (!rbd_dev->snap_name)
>> + rbd_dev->mapping.snap_name = dup_token(&buf, &len);
>> + if (!rbd_dev->mapping.snap_name)
>> goto out_err;
>> if (!len) {
>> /* Replace the empty name with the default */
>> - kfree(rbd_dev->snap_name);
>> - rbd_dev->snap_name
>> + kfree(rbd_dev->mapping.snap_name);
>> + rbd_dev->mapping.snap_name
>> = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL);
>> - if (!rbd_dev->snap_name)
>> + if (!rbd_dev->mapping.snap_name)
>> goto out_err;
>>
>> - memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
>> + memcpy(rbd_dev->mapping.snap_name, RBD_SNAP_HEAD_NAME,
>> sizeof (RBD_SNAP_HEAD_NAME));
>> }
>>
>> @@ -2642,7 +2649,7 @@ err_out_client:
>> rbd_put_client(rbd_dev);
>> err_put_id:
>> if (rbd_dev->pool_name) {
>> - kfree(rbd_dev->snap_name);
>> + kfree(rbd_dev->mapping.snap_name);
>> kfree(rbd_dev->header_name);
>> kfree(rbd_dev->image_name);
>> kfree(rbd_dev->pool_name);
>> @@ -2695,7 +2702,7 @@ static void rbd_dev_release(struct device *dev)
>> unregister_blkdev(rbd_dev->major, rbd_dev->name);
>>
>> /* done with the id, and with the rbd_dev */
>> - kfree(rbd_dev->snap_name);
>> + kfree(rbd_dev->mapping.snap_name);
>> kfree(rbd_dev->header_name);
>> kfree(rbd_dev->pool_name);
>> kfree(rbd_dev->image_name);
>>
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html