On 3/21/19 8:15 AM, Jens Axboe wrote:
> You also haven't solved the issue of now having an extra bit, 2/2 uses
> the last bit which the other patch already took...
Here's one way - kill BIO_SEG_VALID. We should just use
->bi_phys_segments to tell if it's valid or not. This patch uses -1 to
signify it's not.
Totally untested...
diff --git a/block/bio.c b/block/bio.c
index 71a78d9fb8b7..4bc165e7ca43 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -280,6 +280,7 @@ void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs)
{
memset(bio, 0, sizeof(*bio));
+ bio->bi_phys_segments = -1;
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
@@ -305,6 +306,7 @@ void bio_reset(struct bio *bio)
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
+ bio->bi_phys_segments = -1;
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
@@ -573,7 +575,7 @@ EXPORT_SYMBOL(bio_put);
int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+ if (unlikely(bio->bi_phys_segments == -1))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
@@ -731,7 +733,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio
*bio, struct page
/* If we may be able to merge these biovecs, force a recount */
if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
- bio_clear_flag(bio, BIO_SEG_VALID);
+ bio->bi_phys_segments = -1;
done:
return len;
@@ -1913,10 +1915,8 @@ void bio_trim(struct bio *bio, int offset, int size)
if (offset == 0 && size == bio->bi_iter.bi_size)
return;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
+ bio->bi_phys_segments = -1;
bio_advance(bio, offset << 9);
-
bio->bi_iter.bi_size = size;
if (bio_integrity(bio))
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1c9d4f0f96ea..57cea2782a76 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -343,7 +343,6 @@ void blk_queue_split(struct request_queue *q, struct bio
**bio)
/* physical segments can be figured out during splitting */
res = split ? split : *bio;
res->bi_phys_segments = nsegs;
- bio_set_flag(res, BIO_SEG_VALID);
if (split) {
/* there isn't chance to merge the splitted bio */
@@ -440,8 +439,6 @@ void blk_recount_segments(struct request_queue *q, struct
bio *bio)
bio->bi_next = NULL;
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
bio->bi_next = nxt;
-
- bio_set_flag(bio, BIO_SEG_VALID);
}
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
@@ -651,9 +648,9 @@ int ll_back_merge_fn(struct request_queue *q, struct
request *req,
req_set_nomerge(q, req);
return 0;
}
- if (!bio_flagged(req->biotail, BIO_SEG_VALID))
+ if (req->biotail->bi_phys_segments == -1)
blk_recount_segments(q, req->biotail);
- if (!bio_flagged(bio, BIO_SEG_VALID))
+ if (bio->bi_phys_segments == -1)
blk_recount_segments(q, bio);
return ll_new_hw_segment(q, req, bio);
@@ -673,9 +670,9 @@ int ll_front_merge_fn(struct request_queue *q, struct
request *req,
req_set_nomerge(q, req);
return 0;
}
- if (!bio_flagged(bio, BIO_SEG_VALID))
+ if (bio->bi_phys_segments == -1)
blk_recount_segments(q, bio);
- if (!bio_flagged(req->bio, BIO_SEG_VALID))
+ if (req->bio->bi_phys_segments == -1)
blk_recount_segments(q, req->bio);
return ll_new_hw_segment(q, req, bio);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c033bfcb209e..79eb54dcf0f9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5247,7 +5247,7 @@ static int raid5_read_one_chunk(struct mddev *mddev,
struct bio *raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
bio_set_dev(align_bi, rdev->bdev);
- bio_clear_flag(align_bi, BIO_SEG_VALID);
+ align_bi->bi_phys_segments = -1;
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d66bf5f32610..472059e92071 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,7 +215,6 @@ struct bio {
/*
* bio flags
*/
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
#define BIO_USER_MAPPED 4 /* contains user pages */
--
Jens Axboe