I don't know why you like 512 so much.
It's not that I like 512 so much, but I think it is better to attach
some zero-bits too much to it than possibly lose some one-bits by
chosing a bsize which is too big.
If we choose fixing the aufs bsize, I'd say the bsize on the first
branch is best.
That is like in my very first version
Now you wrote the multiplication variant and I wrote the division
variant, which means we can choose any value as the aufs bsize!
What you describe is almost what I wrote in the last patch. The only
difference:
So, I'd like to implement such like this.
- the aufs bsize is the one on first branch.
- when adding in the sum mode,
+ if the branch bsize is greater than the aufs bsize, then we
calculate the number of blocks by division.
+ if the branch bsize is smaller than the aufs bsize, then we
calculate the number of blocks by multiplication.
Here I avoid losing precision by multiplying the sum instead, while
lowering bsize.
So bsize goes down and is not fixed anymore. And at the end of the loop
it is always equal to the lowest bsize of all branches.
I think this is the way to go. No array to allocate, flexible bsize,
always the same precision as our branches.
And AFAIK divisions are slower than multiplications (especially 64bit
ones), so the whole thing is also faster...
attached same patch again because I forgot to CC the last mail.
I was able to compile and successfully test it now.
Daniel
Variant of au_statfs_sum which does not need to allocate an array
It uses the lowest used bsize. The trick is that the value is lowered during
loop while blocks etc. are expanded in contrast. Again, there is a new function
au_add_mul_till_max(a,b,mul) which returns a+b*mul and has to be overflow
checked.
Index: aufs3-standalone.git/fs/aufs/super.c
===================================================================
--- aufs3-standalone.git.orig/fs/aufs/super.c 2012-03-14 21:27:03.000000000
+0100
+++ aufs3-standalone.git/fs/aufs/super.c 2012-03-15 00:23:38.000000000
+0100
@@ -323,6 +323,17 @@
return ULLONG_MAX;
}
+static u64 au_mul_till_max(u64 a, long mul)
+{
+ u64 old;
+
+ old = a;
+ a *= mul;
+ if (old <= a)
+ return a;
+ return ULLONG_MAX;
+}
+
static u64 au_div_rup(u64 a, u64 b)
{
u64 ret;
@@ -336,81 +347,59 @@
static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
{
int err;
- long biggest;
+ long bsize, f;
u64 blocks, bfree, bavail, files, ffree;
aufs_bindex_t bend, bindex, i;
unsigned char shared;
struct path h_path;
struct super_block *h_sb;
- struct {
- long bsize;
- u64 blocks, bfree, bavail;
- } *bs;
- err = -ENOMEM;
bend = au_sbend(sb);
- /*
- * if you really need so many branches, then replace kmalloc/kfree by
- * vmalloc/vfree.
- */
- BUILD_BUG_ON(sizeof(*bs) * AUFS_BRANCH_MAX > KMALLOC_MAX_SIZE);
- bs = kmalloc(sizeof(*bs) * (bend + 1), GFP_NOFS);
- if (unlikely(!bs))
- goto out;
- biggest = 0;
+ bsize = 0;
files = 0;
ffree = 0;
+ blocks = 0;
+ bfree = 0;
+ bavail = 0;
for (bindex = 0; bindex <= bend; bindex++) {
h_path.mnt = au_sbr_mnt(sb, bindex);
h_sb = h_path.mnt->mnt_sb;
shared = 0;
for (i = 0; !shared && i < bindex; i++)
shared = (au_sbr_sb(sb, i) == h_sb);
- if (shared) {
- bs[bindex].bsize = 0;
+ if (shared)
continue;
- }
/* sb->s_root for NFS is unreliable */
h_path.dentry = h_path.mnt->mnt_root;
err = vfs_statfs(&h_path, buf);
if (unlikely(err))
- goto out_bs;
+ goto out;
+
+ if (bsize == 0 || bsize > buf->f_bsize) {
+ /* we will reduce bsize, so we have to expand blocks
+ etc. to match them again */
+ f = (bsize / buf->f_bsize);
+ blocks = au_mul_till_max(blocks, f);
+ bfree = au_mul_till_max(bfree, f);
+ bavail = au_mul_till_max(bavail, f);
+ bsize = buf->f_bsize;
+ }
- if (buf->f_bsize > biggest)
- biggest = buf->f_bsize;
- bs[bindex].bsize = buf->f_bsize;
- bs[bindex].blocks = buf->f_blocks;
- bs[bindex].bfree = buf->f_bfree;
- bs[bindex].bavail = buf->f_bavail;
+ f = (buf->f_bsize / bsize);
+ blocks = au_add_till_max(blocks,
+ au_mul_till_max(buf->f_blocks, f));
+ bfree = au_add_till_max(bfree,
+ au_mul_till_max(buf->f_bfree, f));
+ bavail = au_add_till_max(bavail,
+ au_mul_till_max(buf->f_bavail, f));
files = au_add_till_max(files, buf->f_files);
ffree = au_add_till_max(ffree, buf->f_ffree);
}
- blocks = 0;
- bfree = 0;
- bavail = 0;
- for (bindex = 0; bindex <= bend; bindex++) {
- if (!bs[bindex].bsize)
- continue;
- if (bs[bindex].bsize != biggest) {
- bs[bindex].bsize = biggest / bs[bindex].bsize;
- bs[bindex].blocks = au_div_rup(bs[bindex].blocks,
- bs[bindex].bsize);
- bs[bindex].bfree = au_div_rup(bs[bindex].bfree,
- bs[bindex].bsize);
- bs[bindex].bavail = au_div_rup(bs[bindex].bavail,
- bs[bindex].bsize);
- }
-
- blocks = au_add_till_max(blocks, bs[bindex].blocks);
- bfree = au_add_till_max(bfree, bs[bindex].bfree);
- bavail = au_add_till_max(bavail, bs[bindex].bavail);
- }
-
- buf->f_bsize = biggest;
+ buf->f_bsize = bsize;
buf->f_blocks = blocks;
buf->f_bfree = bfree;
buf->f_bavail = bavail;
@@ -418,8 +407,6 @@
buf->f_ffree = ffree;
buf->f_frsize = 0;
-out_bs:
- kfree(bs);
out:
return err;
}
------------------------------------------------------------------------------
This SF email is sponsosred by:
Try Windows Azure free for 90 days Click Here
http://p.sf.net/sfu/sfd2d-msazure