Daniel Alder: > The unit byte was only an intermediate value. but if you take 256 > instead it is just more complicated: > > if reported bsize is 131072: > (7*(131072/512) + 5*(4096/512) + ...) / (131072/512) > > maybe we could fix bsize to a constant 512. in that case it's just: > > (7*(131072/512) + 5*(4096/512) + ...) > > or as formula: sum of all ( buf->blocks * ( buf->bsize / 512 ) ) > > Is that what you mean?
No. I know the multiplication is intermediate. But the overflow can happen in the intermediate multiplication, isn't it? What I meant is that 1 to 32 (or 63) blocks on 4k bsize should be counted as 1 for 128k bsize. No multiplication is necessary. > In the aggregation loop you have to normalize to a known unit, which > will also be the reported bsize. This normalization needs both > multiplication and division. No. I don't want multiplication. Actually it is unnecessary since we count in the unit of block instead of byte. > bsize can be any power of 2, either one of the source file systems or a > completely different one like a constant 512. Fixing the aufs bsize is a good idea, but I am afraid it makes users confused since the fixed bsize may not match any of real bsize on branches. Just for your information, here is my current patch (after apply all three your patches). But I am considering to replace au_div_rup() by plain div64_u64(). J. R. Okajima ---------------------------------------------------------------------- commit ec0b6464d8dd8588bb92057ced5ceff3436c908a Author: J. R. Okajima <hooano...@yahoo.co.jp> Date: Wed Mar 14 14:19:07 2012 +0900 aufs: statfs, support various bsize Signed-off-by: J. R. Okajima <hooano...@yahoo.co.jp> diff --git a/fs/aufs/super.c b/fs/aufs/super.c index abb685f..a9d18dc 100644 --- a/fs/aufs/super.c +++ b/fs/aufs/super.c @@ -323,59 +323,103 @@ static u64 au_add_muldiv_till_max(u64 a, u64 b, u64 mul, u64 div) return ULLONG_MAX; } +static u64 au_div_rup(u64 a, u64 b) +{ + u64 ret; + + ret = div64_u64(a, b); + if (ret * b != a) + ret++; + return ret; +} + static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf) { int err; - u64 blocks, bfree, bavail, files, ffree, bsize; + long biggest; + u64 blocks, bfree, bavail, files, ffree; aufs_bindex_t bend, bindex, i; unsigned char shared; struct path h_path; struct super_block *h_sb; + struct { + long bsize; + u64 blocks, bfree, bavail; + } *bs; - blocks = 0; - bfree = 0; - bavail = 0; - files = 0; - ffree = 0; - - // we need an initial bsize to calculate correct f_blocks - h_path.mnt = au_sbr_mnt(sb, 0); - h_path.dentry = h_path.mnt->mnt_root; - err = vfs_statfs(&h_path, buf); - if (unlikely(err)) + err = -ENOMEM; + bend = au_sbend(sb); + /* + * if you really need so many branches, then replace kmalloc/kfree by + * vmalloc/vfree. + */ + BUILD_BUG_ON(sizeof(*bs) * AUFS_BRANCH_MAX > KMALLOC_MAX_SIZE); + bs = kmalloc(sizeof(*bs) * (bend + 1), GFP_NOFS); + if (unlikely(!bs)) goto out; - bsize = buf->f_bsize; - err = 0; - bend = au_sbend(sb); - for (bindex = bend; bindex >= 0; bindex--) { + biggest = 0; + files = 0; + ffree = 0; + for (bindex = 0; bindex <= bend; bindex++) { h_path.mnt = au_sbr_mnt(sb, bindex); h_sb = h_path.mnt->mnt_sb; shared = 0; - for (i = bindex + 1; !shared && i <= bend; i++) + for (i = 0; !shared && i < bindex; i++) shared = (au_sbr_sb(sb, i) == h_sb); - if (shared) + if (shared) { + bs[bindex].bsize = 0; continue; + } /* sb->s_root for NFS is unreliable */ h_path.dentry = h_path.mnt->mnt_root; err = vfs_statfs(&h_path, buf); if (unlikely(err)) - goto out; + goto out_bs; + + if (buf->f_bsize > biggest) + biggest = buf->f_bsize; + bs[bindex].bsize = buf->f_bsize; + bs[bindex].blocks = buf->f_blocks; + bs[bindex].bfree = buf->f_bfree; + bs[bindex].bavail = buf->f_bavail; - blocks = au_add_muldiv_till_max(blocks, buf->f_blocks, buf->f_bsize, bsize ); - bfree = au_add_muldiv_till_max(bfree, buf->f_bfree, buf->f_bsize, bsize ); - bavail = au_add_muldiv_till_max(bavail, buf->f_bavail, buf->f_bsize, bsize ); files = au_add_till_max(files, buf->f_files); ffree = au_add_till_max(ffree, buf->f_ffree); } + blocks = 0; + bfree = 0; + bavail = 0; + for (bindex = 0; bindex <= bend; bindex++) { + if (!bs[bindex].bsize) + continue; + if (bs[bindex].bsize != biggest) { + bs[bindex].bsize = biggest / bs[bindex].bsize; + bs[bindex].blocks = au_div_rup(bs[bindex].blocks, + bs[bindex].bsize); + bs[bindex].bfree = au_div_rup(bs[bindex].bfree, + bs[bindex].bsize); + bs[bindex].bavail = au_div_rup(bs[bindex].bavail, + bs[bindex].bsize); + } + + blocks = au_add_till_max(blocks, bs[bindex].blocks); + bfree = au_add_till_max(bfree, bs[bindex].bfree); + bavail = au_add_till_max(bavail, bs[bindex].bavail); + } + + buf->f_bsize = biggest; buf->f_blocks = blocks; buf->f_bfree = bfree; buf->f_bavail = bavail; buf->f_files = files; buf->f_ffree = ffree; + buf->f_frsize = 0; +out_bs: + kfree(bs); out: return err; } ------------------------------------------------------------------------------ Virtualization & Cloud Management Using Capacity Planning Cloud computing makes use of virtualization - but cloud computing also focuses on allowing computing to be delivered as a service. http://www.accelacomm.com/jaw/sfnl/114/51521223/