Re: [f2fs-dev] [PATCH v4] fsverity: stop using PG_error to track error status

2022-11-29 Thread Jaegeuk Kim
On 11/28, Eric Biggers wrote:
> On Mon, Nov 28, 2022 at 10:48:41PM -0800, Jaegeuk Kim wrote:
> > >  static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
> > >  {
> > >   struct bio_vec *bv;
> > >   struct bvec_iter_all iter_all;
> > > + struct bio_post_read_ctx *ctx = bio->bi_private;
> > >  
> > > - /*
> > > -  * Update and unlock the bio's pagecache pages, and put the
> > > -  * decompression context for any compressed pages.
> > > -  */
> > >   bio_for_each_segment_all(bv, bio, iter_all) {
> > >   struct page *page = bv->bv_page;
> > >  
> > >   if (f2fs_is_compressed_page(page)) {
> > > - if (bio->bi_status)
> > > + if (!ctx->decompression_attempted)
> > 
> > If seems this causes a panic due to the ctx nullified by f2fs_verify_bio.
> > 
> 
> Thanks for catching that!  I've sent out v5 that checks for 'ctx &&
> !ctx->decompression_attempted' here.  That's the right thing to do, since if 
> ctx
> is NULL then decompression must have been attempted.
> 
> I'd like to get rid of freeing the bio_post_read_ctx in f2fs_verify_bio().
> But I believe it's still needed, at least in theory.
> 
> Do you have a suggestion for testing f2fs compression + verity with xfstests?
> I missed this because compression isn't covered by the "verity" group tests.
> Maybe there should be an "f2fs/compress" config in xfstests-bld that uses mkfs
> and mount options that cause all files to be automatically compressed, similar
> to how f2fs/encrypt automatically encrypts all files with 
> test_dummy_encryption.

I used for fsstress+fault_injection+shutdown loop with compressed and
non-compressed directories with:

# mkfs.f2fs -f -O extra_attr -O project_quota -O compression -g android 
/dev/$DEV
# mount -t f2fs -o 
discard,fsync_mode=nobarrier,reserve_root=32768,checkpoint_merge,atgc,compress_cache
 /dev/$DEV $TESTDIR
# mkdir $TESTDIR/comp
# f2fs_io setflags compression $TESTDIR/comp
# fsstress [options] -d $TESTDIR/comp

I think you can simply mount with "-o compress_extension=*" to compress
everything.

> 
> - Eric


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v4] fsverity: stop using PG_error to track error status

2022-11-28 Thread Eric Biggers
On Mon, Nov 28, 2022 at 10:48:41PM -0800, Jaegeuk Kim wrote:
> >  static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
> >  {
> > struct bio_vec *bv;
> > struct bvec_iter_all iter_all;
> > +   struct bio_post_read_ctx *ctx = bio->bi_private;
> >  
> > -   /*
> > -* Update and unlock the bio's pagecache pages, and put the
> > -* decompression context for any compressed pages.
> > -*/
> > bio_for_each_segment_all(bv, bio, iter_all) {
> > struct page *page = bv->bv_page;
> >  
> > if (f2fs_is_compressed_page(page)) {
> > -   if (bio->bi_status)
> > +   if (!ctx->decompression_attempted)
> 
> If seems this causes a panic due to the ctx nullified by f2fs_verify_bio.
> 

Thanks for catching that!  I've sent out v5 that checks for 'ctx &&
!ctx->decompression_attempted' here.  That's the right thing to do, since if ctx
is NULL then decompression must have been attempted.

I'd like to get rid of freeing the bio_post_read_ctx in f2fs_verify_bio().
But I believe it's still needed, at least in theory.

Do you have a suggestion for testing f2fs compression + verity with xfstests?
I missed this because compression isn't covered by the "verity" group tests.
Maybe there should be an "f2fs/compress" config in xfstests-bld that uses mkfs
and mount options that cause all files to be automatically compressed, similar
to how f2fs/encrypt automatically encrypts all files with test_dummy_encryption.

- Eric


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v4] fsverity: stop using PG_error to track error status

2022-11-28 Thread Jaegeuk Kim
On 11/25, Eric Biggers wrote:
> From: Eric Biggers 
> 
> As a step towards freeing the PG_error flag for other uses, change ext4
> and f2fs to stop using PG_error to track verity errors.  Instead, if a
> verity error occurs, just mark the whole bio as failed.  The coarser
> granularity isn't really a problem since it isn't any worse than what
> the block layer provides, and errors from a multi-page readahead aren't
> reported to applications unless a single-page read fails too.
> 
> f2fs supports compression, which makes the f2fs changes a bit more
> complicated than desired, but the basic premise still works.
> 
> Note: there are still a few uses of PageError in f2fs, but they are on
> the write path, so they are unrelated and this patch doesn't touch them.
> 
> Reviewed-by: Chao Yu 
> Signed-off-by: Eric Biggers 
> ---
> 
> v4: Added a comment for decompression_attempted, added a paragraph to
> the commit message, and added Chao's Reviewed-by.
> 
> v3: made a small simplification to the f2fs changes.  Also dropped the
> fscrypt patch since it is upstream now.
> 
>  fs/ext4/readpage.c |  8 ++
>  fs/f2fs/compress.c | 64 ++
>  fs/f2fs/data.c | 53 +++---
>  fs/verity/verify.c | 12 -
>  4 files changed, 72 insertions(+), 65 deletions(-)
> 
> diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
> index 3d21eae267fca..e604ea4e102b7 100644
> --- a/fs/ext4/readpage.c
> +++ b/fs/ext4/readpage.c
> @@ -75,14 +75,10 @@ static void __read_end_io(struct bio *bio)
>   bio_for_each_segment_all(bv, bio, iter_all) {
>   page = bv->bv_page;
>  
> - /* PG_error was set if verity failed. */
> - if (bio->bi_status || PageError(page)) {
> + if (bio->bi_status)
>   ClearPageUptodate(page);
> - /* will re-read again later */
> - ClearPageError(page);
> - } else {
> + else
>   SetPageUptodate(page);
> - }
>   unlock_page(page);
>   }
>   if (bio->bi_private)
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index d315c2de136f2..2b7a5cc4ed662 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1711,50 +1711,27 @@ static void f2fs_put_dic(struct decompress_io_ctx 
> *dic, bool in_task)
>   }
>  }
>  
> -/*
> - * Update and unlock the cluster's pagecache pages, and release the 
> reference to
> - * the decompress_io_ctx that was being held for I/O completion.
> - */
> -static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool 
> failed,
> - bool in_task)
> +static void f2fs_verify_cluster(struct work_struct *work)
>  {
> + struct decompress_io_ctx *dic =
> + container_of(work, struct decompress_io_ctx, verity_work);
>   int i;
>  
> + /* Verify, update, and unlock the decompressed pages. */
>   for (i = 0; i < dic->cluster_size; i++) {
>   struct page *rpage = dic->rpages[i];
>  
>   if (!rpage)
>   continue;
>  
> - /* PG_error was set if verity failed. */
> - if (failed || PageError(rpage)) {
> - ClearPageUptodate(rpage);
> - /* will re-read again later */
> - ClearPageError(rpage);
> - } else {
> + if (fsverity_verify_page(rpage))
>   SetPageUptodate(rpage);
> - }
> + else
> + ClearPageUptodate(rpage);
>   unlock_page(rpage);
>   }
>  
> - f2fs_put_dic(dic, in_task);
> -}
> -
> -static void f2fs_verify_cluster(struct work_struct *work)
> -{
> - struct decompress_io_ctx *dic =
> - container_of(work, struct decompress_io_ctx, verity_work);
> - int i;
> -
> - /* Verify the cluster's decompressed pages with fs-verity. */
> - for (i = 0; i < dic->cluster_size; i++) {
> - struct page *rpage = dic->rpages[i];
> -
> - if (rpage && !fsverity_verify_page(rpage))
> - SetPageError(rpage);
> - }
> -
> - __f2fs_decompress_end_io(dic, false, true);
> + f2fs_put_dic(dic, true);
>  }
>  
>  /*
> @@ -1764,6 +1741,8 @@ static void f2fs_verify_cluster(struct work_struct 
> *work)
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
>   bool in_task)
>  {
> + int i;
> +
>   if (!failed && dic->need_verity) {
>   /*
>* Note that to avoid deadlocks, the verity work can't be done
> @@ -1773,9 +1752,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx 
> *dic, bool failed,
>*/
>   INIT_WORK(>verity_work, f2fs_verify_cluster);
>   fsverity_enqueue_verify_work(>verity_work);
> - } else {
> - 

Re: [f2fs-dev] [PATCH v4] fsverity: stop using PG_error to track error status

2022-11-28 Thread Jaegeuk Kim
On 11/25, Eric Biggers wrote:
> From: Eric Biggers 
> 
> As a step towards freeing the PG_error flag for other uses, change ext4
> and f2fs to stop using PG_error to track verity errors.  Instead, if a
> verity error occurs, just mark the whole bio as failed.  The coarser
> granularity isn't really a problem since it isn't any worse than what
> the block layer provides, and errors from a multi-page readahead aren't
> reported to applications unless a single-page read fails too.
> 
> f2fs supports compression, which makes the f2fs changes a bit more
> complicated than desired, but the basic premise still works.
> 
> Note: there are still a few uses of PageError in f2fs, but they are on
> the write path, so they are unrelated and this patch doesn't touch them.
> 
> Reviewed-by: Chao Yu 

Acked-by: Jaegeuk Kim 

Thanks,

> Signed-off-by: Eric Biggers 
> ---
> 
> v4: Added a comment for decompression_attempted, added a paragraph to
> the commit message, and added Chao's Reviewed-by.
> 
> v3: made a small simplification to the f2fs changes.  Also dropped the
> fscrypt patch since it is upstream now.
> 
>  fs/ext4/readpage.c |  8 ++
>  fs/f2fs/compress.c | 64 ++
>  fs/f2fs/data.c | 53 +++---
>  fs/verity/verify.c | 12 -
>  4 files changed, 72 insertions(+), 65 deletions(-)
> 
> diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
> index 3d21eae267fca..e604ea4e102b7 100644
> --- a/fs/ext4/readpage.c
> +++ b/fs/ext4/readpage.c
> @@ -75,14 +75,10 @@ static void __read_end_io(struct bio *bio)
>   bio_for_each_segment_all(bv, bio, iter_all) {
>   page = bv->bv_page;
>  
> - /* PG_error was set if verity failed. */
> - if (bio->bi_status || PageError(page)) {
> + if (bio->bi_status)
>   ClearPageUptodate(page);
> - /* will re-read again later */
> - ClearPageError(page);
> - } else {
> + else
>   SetPageUptodate(page);
> - }
>   unlock_page(page);
>   }
>   if (bio->bi_private)
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index d315c2de136f2..2b7a5cc4ed662 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1711,50 +1711,27 @@ static void f2fs_put_dic(struct decompress_io_ctx 
> *dic, bool in_task)
>   }
>  }
>  
> -/*
> - * Update and unlock the cluster's pagecache pages, and release the 
> reference to
> - * the decompress_io_ctx that was being held for I/O completion.
> - */
> -static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool 
> failed,
> - bool in_task)
> +static void f2fs_verify_cluster(struct work_struct *work)
>  {
> + struct decompress_io_ctx *dic =
> + container_of(work, struct decompress_io_ctx, verity_work);
>   int i;
>  
> + /* Verify, update, and unlock the decompressed pages. */
>   for (i = 0; i < dic->cluster_size; i++) {
>   struct page *rpage = dic->rpages[i];
>  
>   if (!rpage)
>   continue;
>  
> - /* PG_error was set if verity failed. */
> - if (failed || PageError(rpage)) {
> - ClearPageUptodate(rpage);
> - /* will re-read again later */
> - ClearPageError(rpage);
> - } else {
> + if (fsverity_verify_page(rpage))
>   SetPageUptodate(rpage);
> - }
> + else
> + ClearPageUptodate(rpage);
>   unlock_page(rpage);
>   }
>  
> - f2fs_put_dic(dic, in_task);
> -}
> -
> -static void f2fs_verify_cluster(struct work_struct *work)
> -{
> - struct decompress_io_ctx *dic =
> - container_of(work, struct decompress_io_ctx, verity_work);
> - int i;
> -
> - /* Verify the cluster's decompressed pages with fs-verity. */
> - for (i = 0; i < dic->cluster_size; i++) {
> - struct page *rpage = dic->rpages[i];
> -
> - if (rpage && !fsverity_verify_page(rpage))
> - SetPageError(rpage);
> - }
> -
> - __f2fs_decompress_end_io(dic, false, true);
> + f2fs_put_dic(dic, true);
>  }
>  
>  /*
> @@ -1764,6 +1741,8 @@ static void f2fs_verify_cluster(struct work_struct 
> *work)
>  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
>   bool in_task)
>  {
> + int i;
> +
>   if (!failed && dic->need_verity) {
>   /*
>* Note that to avoid deadlocks, the verity work can't be done
> @@ -1773,9 +1752,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx 
> *dic, bool failed,
>*/
>   INIT_WORK(>verity_work, f2fs_verify_cluster);
>   fsverity_enqueue_verify_work(>verity_work);
> - } else {
> -

[f2fs-dev] [PATCH v4] fsverity: stop using PG_error to track error status

2022-11-25 Thread Eric Biggers
From: Eric Biggers 

As a step towards freeing the PG_error flag for other uses, change ext4
and f2fs to stop using PG_error to track verity errors.  Instead, if a
verity error occurs, just mark the whole bio as failed.  The coarser
granularity isn't really a problem since it isn't any worse than what
the block layer provides, and errors from a multi-page readahead aren't
reported to applications unless a single-page read fails too.

f2fs supports compression, which makes the f2fs changes a bit more
complicated than desired, but the basic premise still works.

Note: there are still a few uses of PageError in f2fs, but they are on
the write path, so they are unrelated and this patch doesn't touch them.

Reviewed-by: Chao Yu 
Signed-off-by: Eric Biggers 
---

v4: Added a comment for decompression_attempted, added a paragraph to
the commit message, and added Chao's Reviewed-by.

v3: made a small simplification to the f2fs changes.  Also dropped the
fscrypt patch since it is upstream now.

 fs/ext4/readpage.c |  8 ++
 fs/f2fs/compress.c | 64 ++
 fs/f2fs/data.c | 53 +++---
 fs/verity/verify.c | 12 -
 4 files changed, 72 insertions(+), 65 deletions(-)

diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 3d21eae267fca..e604ea4e102b7 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -75,14 +75,10 @@ static void __read_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, iter_all) {
page = bv->bv_page;
 
-   /* PG_error was set if verity failed. */
-   if (bio->bi_status || PageError(page)) {
+   if (bio->bi_status)
ClearPageUptodate(page);
-   /* will re-read again later */
-   ClearPageError(page);
-   } else {
+   else
SetPageUptodate(page);
-   }
unlock_page(page);
}
if (bio->bi_private)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index d315c2de136f2..2b7a5cc4ed662 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1711,50 +1711,27 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, 
bool in_task)
}
 }
 
-/*
- * Update and unlock the cluster's pagecache pages, and release the reference 
to
- * the decompress_io_ctx that was being held for I/O completion.
- */
-static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool 
failed,
-   bool in_task)
+static void f2fs_verify_cluster(struct work_struct *work)
 {
+   struct decompress_io_ctx *dic =
+   container_of(work, struct decompress_io_ctx, verity_work);
int i;
 
+   /* Verify, update, and unlock the decompressed pages. */
for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i];
 
if (!rpage)
continue;
 
-   /* PG_error was set if verity failed. */
-   if (failed || PageError(rpage)) {
-   ClearPageUptodate(rpage);
-   /* will re-read again later */
-   ClearPageError(rpage);
-   } else {
+   if (fsverity_verify_page(rpage))
SetPageUptodate(rpage);
-   }
+   else
+   ClearPageUptodate(rpage);
unlock_page(rpage);
}
 
-   f2fs_put_dic(dic, in_task);
-}
-
-static void f2fs_verify_cluster(struct work_struct *work)
-{
-   struct decompress_io_ctx *dic =
-   container_of(work, struct decompress_io_ctx, verity_work);
-   int i;
-
-   /* Verify the cluster's decompressed pages with fs-verity. */
-   for (i = 0; i < dic->cluster_size; i++) {
-   struct page *rpage = dic->rpages[i];
-
-   if (rpage && !fsverity_verify_page(rpage))
-   SetPageError(rpage);
-   }
-
-   __f2fs_decompress_end_io(dic, false, true);
+   f2fs_put_dic(dic, true);
 }
 
 /*
@@ -1764,6 +1741,8 @@ static void f2fs_verify_cluster(struct work_struct *work)
 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
 {
+   int i;
+
if (!failed && dic->need_verity) {
/*
 * Note that to avoid deadlocks, the verity work can't be done
@@ -1773,9 +1752,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx 
*dic, bool failed,
 */
INIT_WORK(>verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(>verity_work);
-   } else {
-   __f2fs_decompress_end_io(dic, failed, in_task);
+   return;
+   }
+
+   /* Update and unlock the cluster's pagecache pages. */
+   for (i = 0; i < dic->cluster_size; i++) {
+