Range lengths are usually expressed as bytes in the VFS, switch fscrypt_zeroout_range_inline_crypt to this convention.
Signed-off-by: Christoph Hellwig <[email protected]> --- fs/crypto/bio.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 07d757d2777e..be751bcd2976 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -73,8 +73,6 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, loff_t pos, sector_t sector, unsigned int len) { - const unsigned int blockbits = inode->i_blkbits; - const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); struct fscrypt_zero_done done = { .pending = ATOMIC_INIT(1), .done = COMPLETION_INITIALIZER_ONSTACK(done.done), @@ -92,12 +90,10 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_NOFS); for (n = 0; n < BIO_MAX_VECS; n++) { - unsigned int blocks_this_page = - min(len, blocks_per_page); - unsigned int bytes_this_page = blocks_this_page << blockbits; + unsigned int bytes_this_page = min(len, PAGE_SIZE); __bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); - len -= blocks_this_page; + len -= bytes_this_page; pos += bytes_this_page; sector += (bytes_this_page >> SECTOR_SHIFT); if (!len || !fscrypt_mergeable_bio(bio, inode, pos)) @@ -155,7 +151,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, if (fscrypt_inode_uses_inline_crypto(inode)) return fscrypt_zeroout_range_inline_crypt(inode, pos, sector, - len); + len << inode->i_blkbits); BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS); nr_pages = min_t(u64, ARRAY_SIZE(pages), -- 2.47.3
