fibmap: Fix file too large causing file_pos overflow

fibmap show file_pos with incorrectly value when passing a large file.

Before:

----------------file info-------------------
/data/media/0/data1 :
--------------------------------------------
dev       [254:18]
ino       [0x    4db1 : 19889]
mode      [0x    81b0 : 33200]
nlink     [0x       1 : 1]
uid       [0x    280e : 10254]
gid       [0x     3ff : 1023]
size      [0x1b3dca314 : 7312548628]
blksize   [0x    1000 : 4096]
blocks    [0x  da2530 : 14296368]
--------------------------------------------

file_pos   start_blk     end_blk        blks
       0     3197602     3198463         862
 3530752     3197509     3197509           1
 3534848     3197557     3197578          22
 3624960     3198464     3396701      198238
815607808     3396703     3632480      235778
1781354496     3632482     3652095       19614
1861693440     3396702     3396702           1
1861697536     3632481     3632481           1
1861701632     1514948     1514948           1
1861705728     1518774     1518774           1
1861709824     2543104     2543125          22
...
1862111232     2457813     2457813           1
1862115328     3652096     3878168      226073
-1506856960     3878170     4133725      255556
-460099584     1510048     1510052           5

Patched:
----------------file info-------------------
/data/media/0/data1 :
--------------------------------------------
dev       [254:18]
ino       [0x    4db1 : 19889]
mode      [0x    81b0 : 33200]
nlink     [0x       1 : 1]
uid       [0x    280e : 10254]
gid       [0x     3ff : 1023]
size      [0x1b3dca314 : 7312548628]
blksize   [0x    1000 : 4096]
blocks    [0x  da2530 : 14296368]
--------------------------------------------

file_pos   start_blk     end_blk        blks
       0     3197602     3198463         862
 3530752     3197509     3197509           1
 3534848     3197557     3197578          22
 3624960     3198464     3396701      198238
815607808     3396703     3632480      235778
1781354496     3632482     3652095       19614
1861693440     3396702     3396702           1
1861697536     3632481     3632481           1
1861701632     1514948     1514948           1
1861705728     1518774     1518774           1
1861709824     2543104     2543125          22
...
1862111232     2457813     2457813           1
1862115328     3652096     3878168      226073
2788110336     3878170     4133725      255556
3834867712     1510048     1510052           5

Change-Id: Ic2486e25ea03114d4dbf3651650c6a2399db0714
Signed-off-by: Dylan Chang <[email protected]>
---
 tools/fibmap.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/tools/fibmap.c b/tools/fibmap.c
index 3238f29..3217600 100644
--- a/tools/fibmap.c
+++ b/tools/fibmap.c
@@ -47,7 +47,7 @@
 #endif
 
 struct file_ext {
-       __u32 f_pos;
+       __u64 f_pos;
        __u32 start_blk;
        __u32 end_blk;
        __u32 blk_count;
@@ -56,9 +56,9 @@ struct file_ext {
 void print_ext(struct file_ext *ext)
 {
        if (ext->end_blk == 0)
-               printf("%8d    %8d    %8d    %8d\n", ext->f_pos, 0, 0, 
ext->blk_count);
+               printf("%8llu    %8lu    %8lu    %8lu\n", ext->f_pos, 0, 0, 
ext->blk_count);
        else
-               printf("%8d    %8d    %8d    %8d\n", ext->f_pos, ext->start_blk,
+               printf("%8llu    %8lu    %8lu    %8lu\n", ext->f_pos, 
ext->start_blk,
                                        ext->end_blk, ext->blk_count);
 }
 
@@ -209,7 +209,7 @@ int main(int argc, char *argv[])
                        ext.blk_count++;
                } else {
                        print_ext(&ext);
-                       ext.f_pos = i * st.st_blksize;
+                       ext.f_pos = (__u64)i * st.st_blksize;
                        ext.start_blk = blknum;
                        ext.end_blk = blknum;
                        ext.blk_count = 1;
-- 
2.17.1



_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to