Re: [PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-11-26 Thread Gui Hecheng
On Tue, 2014-10-14 at 11:32 +0200, David Sterba wrote:
 On Tue, Oct 14, 2014 at 10:06:16AM +0200, Marc Dietrich wrote:
  This hasn't landed in an btrfs-progs branch I found. Any update?
 
 I had it tagged for review and found something that needs fixing. The
 PAGE_CACHE_SIZE is hardcoded to 4k, this will break on filesystems with
 larger sectors (eg. the powerpc machines). I'll scheudule the patch post
 3.17, with a fix.

Hi David,

I note that this patch is not yet in the latest integration, how's the
fix going?

-Gui

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-10-14 Thread Marc Dietrich
This hasn't landed in an btrfs-progs branch I found. Any update?

Marc

Am Dienstag, 23. September 2014, 16:34:54 schrieb Gui Hecheng:
 When runing restore under lzo compression, bad compress length
 problems are encountered.
 It is because there is a page align problem with the @decompress_lzo,
 
 as follows:
   |--| ||-| |--|...|--|
 
 page ^page   page
 
 3 bytes left
 
   When lzo compress pages im RAM, lzo will ensure that
   the 4 bytes len will be in one page as a whole.
   There is a situation that 3 (or less) bytes are left
   at the end of a page, and then the 4 bytes len is
   stored at the start of the next page.
   But the @decompress_lzo doesn't goto the start of
   the next page and continue to read the next 4 bytes
   which is across two pages, so a random value is fetched
   as a bad compress length.
 
 So we check page alignment every time before we are going to
 fetch the next @len and after the former piece of data is decompressed.
 If the current page that we reach has less than 4 bytes left,
 then we should fetch the next @len at the start of next page.
 
 Signed-off-by: Gui Hecheng guihc.f...@cn.fujitsu.com
 Reviewed-by: Marc Dietrich marvi...@gmx.de
 ---
 changelog
   v1-v2: adopt alignment check method suggested by Marc
   v2-v3: make code more readable
   v3-v4: keep type safety  reformat comments
 ---
  cmds-restore.c | 27 +--
  1 file changed, 25 insertions(+), 2 deletions(-)
 
 diff --git a/cmds-restore.c b/cmds-restore.c
 index e09acc4..1fe2df0 100644
 --- a/cmds-restore.c
 +++ b/cmds-restore.c
 @@ -56,7 +56,10 @@ static int get_xattrs = 0;
  static int dry_run = 0;
 
  #define LZO_LEN 4
 -#define PAGE_CACHE_SIZE 4096
 +#define PAGE_CACHE_SIZE 4096UL
 +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
 +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)   \
 +  PAGE_CACHE_MASK)
  #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
  static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
 @@ -93,6 +96,26 @@ static inline size_t read_compress_length(unsigned char
 *buf) return le32_to_cpu(dlen);
  }
 
 +static void align_if_need(size_t *tot_in, size_t *in_len)
 +{
 + size_t tot_in_aligned;
 + size_t bytes_left;
 +
 + tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
 + bytes_left = tot_in_aligned - *tot_in;
 +
 + if (bytes_left = LZO_LEN)
 + return;
 +
 + /*
 +  * The LZO_LEN bytes is guaranteed to be in one page as a whole,
 +  * so if a page has fewer than LZO_LEN bytes left, the LZO_LEN bytes
 +  * should be fetched at the start of the next page
 +  */
 + *in_len += bytes_left;
 + *tot_in = tot_in_aligned;
 +}
 +
  static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64
 compress_len, u64 *decompress_len)
  {
 @@ -135,8 +158,8 @@ static int decompress_lzo(unsigned char *inbuf, char
 *outbuf, u64 compress_len, }
   out_len += new_len;
   outbuf += new_len;
 + align_if_need(tot_in, in_len);
   inbuf += in_len;
 - tot_in += in_len;
   }
 
   *decompress_len = out_len;


signature.asc
Description: This is a digitally signed message part.


Re: [PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-10-14 Thread David Sterba
On Tue, Oct 14, 2014 at 10:06:16AM +0200, Marc Dietrich wrote:
 This hasn't landed in an btrfs-progs branch I found. Any update?

I had it tagged for review and found something that needs fixing. The
PAGE_CACHE_SIZE is hardcoded to 4k, this will break on filesystems with
larger sectors (eg. the powerpc machines). I'll scheudule the patch post
3.17, with a fix.
--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-09-23 Thread Gui Hecheng
On Tue, 2014-09-23 at 10:25 +0800, Gui Hecheng wrote:
 When runing restore under lzo compression, bad compress length
 problems are encountered.
 It is because there is a page align problem with the @decompress_lzo,
 as follows:
   |--| ||-| |--|...|--|
 page ^page   page
  |
 3 bytes left
 
   When lzo compress pages im RAM, lzo will ensure that
   the 4 bytes len will be in one page as a whole.
   There is a situation that 3 (or less) bytes are left
   at the end of a page, and then the 4 bytes len is
   stored at the start of the next page.
   But the @decompress_lzo doesn't goto the start of
   the next page and continue to read the next 4 bytes
   which is across two pages, so a random value is fetched
   as a bad compress length.
 
 So we check page alignment every time before we are going to
 fetch the next @len and after the former piece of data is decompressed.
 If the current page that we reach has less than 4 bytes left,
 then we should fetch the next @len at the start of next page.
 
 Signed-off-by: Gui Hecheng guihc.f...@cn.fujitsu.com
 Reviewed-by: Marc Dietrich marvi...@gmx.de
 ---
 changelog
   v1-v2: adopt alignment check method suggested by Marc
   v2-v3: make code more readable
   v3-v4: keep type safety
 ---
  cmds-restore.c | 29 +++--
  1 file changed, 27 insertions(+), 2 deletions(-)
 
 diff --git a/cmds-restore.c b/cmds-restore.c
 index 38a131e..fa5d5d1 100644
 --- a/cmds-restore.c
 +++ b/cmds-restore.c
 @@ -56,7 +56,10 @@ static int get_xattrs = 0;
  static int dry_run = 0;
  
  #define LZO_LEN 4
 -#define PAGE_CACHE_SIZE 4096
 +#define PAGE_CACHE_SIZE 4096UL
 +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
 +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)   \
 +  PAGE_CACHE_MASK)
  #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
  
  static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
 @@ -93,6 +96,28 @@ static inline size_t read_compress_length(unsigned char 
 *buf)
   return le32_to_cpu(dlen);
  }
  
 +static void align_if_need(size_t *tot_in, size_t *in_len)
 +{
 + size_t tot_in_aligned;
 + size_t bytes_left;
 +
 + tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
 + bytes_left = tot_in_aligned - *tot_in;
 +
 + if (bytes_left = LZO_LEN)
 + return;
 +
 + /*
 +  * The LZO_LEN bytes is guaranteed to be
 +  * in one page as a whole, so if a page
 +  * has fewer than LZO_LEN bytes left,
 +  * the LZO_LEN bytes should be fetched
 +  * at the start of the next page
 +  */
 + *in_len += bytes_left;
 + *tot_in = tot_in_aligned;
 +}
 +
  static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 
 compress_len,
 u64 *decompress_len)
  {
 @@ -135,8 +160,8 @@ static int decompress_lzo(unsigned char *inbuf, char 
 *outbuf, u64 compress_len,
   }
   out_len += new_len;
   outbuf += new_len;
 + align_if_need(tot_in, in_len);
   inbuf += in_len;
 - tot_in += in_len;
   }
  
   *decompress_len = out_len;

Sorry, please scratch this one, the comments should be reformated. 

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-09-23 Thread Gui Hecheng
When runing restore under lzo compression, bad compress length
problems are encountered.
It is because there is a page align problem with the @decompress_lzo,
as follows:
|--| ||-| |--|...|--|
  page ^page   page
   |
  3 bytes left

When lzo compress pages im RAM, lzo will ensure that
the 4 bytes len will be in one page as a whole.
There is a situation that 3 (or less) bytes are left
at the end of a page, and then the 4 bytes len is
stored at the start of the next page.
But the @decompress_lzo doesn't goto the start of
the next page and continue to read the next 4 bytes
which is across two pages, so a random value is fetched
as a bad compress length.

So we check page alignment every time before we are going to
fetch the next @len and after the former piece of data is decompressed.
If the current page that we reach has less than 4 bytes left,
then we should fetch the next @len at the start of next page.

Signed-off-by: Gui Hecheng guihc.f...@cn.fujitsu.com
Reviewed-by: Marc Dietrich marvi...@gmx.de
---
changelog
v1-v2: adopt alignment check method suggested by Marc
v2-v3: make code more readable
v3-v4: keep type safety  reformat comments
---
 cmds-restore.c | 27 +--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/cmds-restore.c b/cmds-restore.c
index e09acc4..1fe2df0 100644
--- a/cmds-restore.c
+++ b/cmds-restore.c
@@ -56,7 +56,10 @@ static int get_xattrs = 0;
 static int dry_run = 0;
 
 #define LZO_LEN 4
-#define PAGE_CACHE_SIZE 4096
+#define PAGE_CACHE_SIZE 4096UL
+#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
+#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1) \
+PAGE_CACHE_MASK)
 #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
 static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
@@ -93,6 +96,26 @@ static inline size_t read_compress_length(unsigned char *buf)
return le32_to_cpu(dlen);
 }
 
+static void align_if_need(size_t *tot_in, size_t *in_len)
+{
+   size_t tot_in_aligned;
+   size_t bytes_left;
+
+   tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
+   bytes_left = tot_in_aligned - *tot_in;
+
+   if (bytes_left = LZO_LEN)
+   return;
+
+   /*
+* The LZO_LEN bytes is guaranteed to be in one page as a whole,
+* so if a page has fewer than LZO_LEN bytes left, the LZO_LEN bytes
+* should be fetched at the start of the next page
+*/
+   *in_len += bytes_left;
+   *tot_in = tot_in_aligned;
+}
+
 static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
  u64 *decompress_len)
 {
@@ -135,8 +158,8 @@ static int decompress_lzo(unsigned char *inbuf, char 
*outbuf, u64 compress_len,
}
out_len += new_len;
outbuf += new_len;
+   align_if_need(tot_in, in_len);
inbuf += in_len;
-   tot_in += in_len;
}
 
*decompress_len = out_len;
-- 
1.8.1.4

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4] btrfs-progs: fix page align issue for lzo compress in restore

2014-09-22 Thread Gui Hecheng
When runing restore under lzo compression, bad compress length
problems are encountered.
It is because there is a page align problem with the @decompress_lzo,
as follows:
|--| ||-| |--|...|--|
  page ^page   page
   |
  3 bytes left

When lzo compress pages im RAM, lzo will ensure that
the 4 bytes len will be in one page as a whole.
There is a situation that 3 (or less) bytes are left
at the end of a page, and then the 4 bytes len is
stored at the start of the next page.
But the @decompress_lzo doesn't goto the start of
the next page and continue to read the next 4 bytes
which is across two pages, so a random value is fetched
as a bad compress length.

So we check page alignment every time before we are going to
fetch the next @len and after the former piece of data is decompressed.
If the current page that we reach has less than 4 bytes left,
then we should fetch the next @len at the start of next page.

Signed-off-by: Gui Hecheng guihc.f...@cn.fujitsu.com
Reviewed-by: Marc Dietrich marvi...@gmx.de
---
changelog
v1-v2: adopt alignment check method suggested by Marc
v2-v3: make code more readable
v3-v4: keep type safety
---
 cmds-restore.c | 29 +++--
 1 file changed, 27 insertions(+), 2 deletions(-)

diff --git a/cmds-restore.c b/cmds-restore.c
index 38a131e..fa5d5d1 100644
--- a/cmds-restore.c
+++ b/cmds-restore.c
@@ -56,7 +56,10 @@ static int get_xattrs = 0;
 static int dry_run = 0;
 
 #define LZO_LEN 4
-#define PAGE_CACHE_SIZE 4096
+#define PAGE_CACHE_SIZE 4096UL
+#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
+#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1) \
+PAGE_CACHE_MASK)
 #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
 static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
@@ -93,6 +96,28 @@ static inline size_t read_compress_length(unsigned char *buf)
return le32_to_cpu(dlen);
 }
 
+static void align_if_need(size_t *tot_in, size_t *in_len)
+{
+   size_t tot_in_aligned;
+   size_t bytes_left;
+
+   tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
+   bytes_left = tot_in_aligned - *tot_in;
+
+   if (bytes_left = LZO_LEN)
+   return;
+
+   /*
+* The LZO_LEN bytes is guaranteed to be
+* in one page as a whole, so if a page
+* has fewer than LZO_LEN bytes left,
+* the LZO_LEN bytes should be fetched
+* at the start of the next page
+*/
+   *in_len += bytes_left;
+   *tot_in = tot_in_aligned;
+}
+
 static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
  u64 *decompress_len)
 {
@@ -135,8 +160,8 @@ static int decompress_lzo(unsigned char *inbuf, char 
*outbuf, u64 compress_len,
}
out_len += new_len;
outbuf += new_len;
+   align_if_need(tot_in, in_len);
inbuf += in_len;
-   tot_in += in_len;
}
 
*decompress_len = out_len;
-- 
1.8.1.4

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html