Re: [PATCH 3/3] zram: get rid of lockdep warning
On Mon, Jan 21, 2013 at 02:31:47PM -0800, Nitin Gupta wrote: > On 01/20/2013 09:18 PM, Minchan Kim wrote: > >On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: > >>On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim wrote: > >>>Lockdep complains about recursive deadlock of zram->init_lock. > >>>[1] made it false positive because we can't request IO to zram > >>>before setting disksize. Anyway, we should shut lockdep up to > >>>avoid many reporting from user. > >>> > >>>This patch allocates zram's metadata out of lock so we can fix it. > >>>In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC > >>>in request handle path for partial I/O. > >>> > >>>[1] zram: give up lazy initialization of zram metadata > >>> > >>>Signed-off-by: Minchan Kim > >>>--- > >>> drivers/staging/zram/zram_drv.c | 194 > >>> +++-- > >>> drivers/staging/zram/zram_drv.h | 12 ++- > >>> drivers/staging/zram/zram_sysfs.c | 13 ++- > >>> 3 files changed, 118 insertions(+), 101 deletions(-) > >>> > >>>diff --git a/drivers/staging/zram/zram_drv.c > >>>b/drivers/staging/zram/zram_drv.c > >>>index 3693780..eb1bc37 100644 > >>>--- a/drivers/staging/zram/zram_drv.c > >>>+++ b/drivers/staging/zram/zram_drv.c > >>>@@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) > >>> zram_stat64_add(zram, v, 1); > >>> } > >>> > >>>-static int zram_test_flag(struct zram *zram, u32 index, > >>>+static int zram_test_flag(struct zram_meta *meta, u32 index, > >>> enum zram_pageflags flag) > >>> { > >>>- return zram->table[index].flags & BIT(flag); > >>>+ return meta->table[index].flags & BIT(flag); > >>> } > >>> > >>>-static void zram_set_flag(struct zram *zram, u32 index, > >>>+static void zram_set_flag(struct zram_meta *meta, u32 index, > >>> enum zram_pageflags flag) > >>> { > >>>- zram->table[index].flags |= BIT(flag); > >>>+ meta->table[index].flags |= BIT(flag); > >>> } > >>> > >>>-static void zram_clear_flag(struct zram *zram, u32 index, > >>>+static void zram_clear_flag(struct zram_meta *meta, u32 index, > >>> enum zram_pageflags flag) > >>> { > >>>- zram->table[index].flags &= ~BIT(flag); > >>>+ meta->table[index].flags &= ~BIT(flag); > >>> } > >>> > >>> static int page_zero_filled(void *ptr) > >>>@@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) > >>> > >>> static void zram_free_page(struct zram *zram, size_t index) > >>> { > >>>- unsigned long handle = zram->table[index].handle; > >>>- u16 size = zram->table[index].size; > >>>+ struct zram_meta *meta = zram->meta; > >>>+ unsigned long handle = meta->table[index].handle; > >>>+ u16 size = meta->table[index].size; > >>> > >>> if (unlikely(!handle)) { > >>> /* > >>> * No memory is allocated for zero filled pages. > >>> * Simply clear zero page flag. > >>> */ > >>>- if (zram_test_flag(zram, index, ZRAM_ZERO)) { > >>>- zram_clear_flag(zram, index, ZRAM_ZERO); > >>>+ if (zram_test_flag(meta, index, ZRAM_ZERO)) { > >>>+ zram_clear_flag(meta, index, ZRAM_ZERO); > >>> zram_stat_dec(>stats.pages_zero); > >>> } > >>> return; > >>>@@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t > >>>index) > >>> if (unlikely(size > max_zpage_size)) > >>> zram_stat_dec(>stats.bad_compress); > >>> > >>>- zs_free(zram->mem_pool, handle); > >>>+ zs_free(meta->mem_pool, handle); > >>> > >>> if (size <= PAGE_SIZE / 2) > >>> zram_stat_dec(>stats.good_compress); > >>> > >>> zram_stat64_sub(zram, >stats.compr_size, > >>>- zram->table[index].size); > >>>+ meta->table[index].size); > >>> zram_stat_dec(>stats.pages_stored); > >>> > >>>- zram->table[index].handle = 0; > >>>- zram->table[index].size = 0; > >>>+ meta->table[index].handle = 0; > >>>+ meta->table[index].size = 0; > >>> } > >>> > >>> static void handle_zero_page(struct bio_vec *bvec) > >>>@@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, > >>>char *mem, u32 index) > >>> int ret = LZO_E_OK; > >>> size_t clen = PAGE_SIZE; > >>> unsigned char *cmem; > >>>- unsigned long handle = zram->table[index].handle; > >>>+ struct zram_meta *meta = zram->meta; > >>>+ unsigned long handle = meta->table[index].handle; > >>> > >>>- if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { > >>>+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > >>> memset(mem, 0, PAGE_SIZE); > >>> return 0; > >>> } > >>> > >>>- cmem = zs_map_object(zram->mem_pool,
Re: [PATCH 3/3] zram: get rid of lockdep warning
On 01/20/2013 09:18 PM, Minchan Kim wrote: On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim wrote: Lockdep complains about recursive deadlock of zram->init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram->table[index].flags & BIT(flag); + return meta->table[index].flags & BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags |= BIT(flag); + meta->table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags &= ~BIT(flag); + meta->table[index].flags &= ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram->table[index].handle; - u16 size = zram->table[index].size; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; + u16 size = meta->table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(>stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size > max_zpage_size)) zram_stat_dec(>stats.bad_compress); - zs_free(zram->mem_pool, handle); + zs_free(meta->mem_pool, handle); if (size <= PAGE_SIZE / 2) zram_stat_dec(>stats.good_compress); zram_stat64_sub(zram, >stats.compr_size, - zram->table[index].size); + meta->table[index].size); zram_stat_dec(>stats.pages_stored); - zram->table[index].handle = 0; - zram->table[index].size = 0; + meta->table[index].handle = 0; + meta->table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram->table[index].handle; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (zram->table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); + if (meta->table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, + ret = lzo1x_decompress_safe(cmem, meta->table[index].size, mem, ); - zs_unmap_object(zram->mem_pool, handle); + zs_unmap_object(meta->mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret !=
Re: [PATCH 3/3] zram: get rid of lockdep warning
On 01/20/2013 09:18 PM, Minchan Kim wrote: On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim minc...@kernel.org wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); - zs_unmap_object(zram-mem_pool, handle); + zs_unmap_object(meta-mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Mon, Jan 21, 2013 at 02:31:47PM -0800, Nitin Gupta wrote: On 01/20/2013 09:18 PM, Minchan Kim wrote: On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim minc...@kernel.org wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size,
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: > On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim wrote: > > Lockdep complains about recursive deadlock of zram->init_lock. > > [1] made it false positive because we can't request IO to zram > > before setting disksize. Anyway, we should shut lockdep up to > > avoid many reporting from user. > > > > This patch allocates zram's metadata out of lock so we can fix it. > > In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC > > in request handle path for partial I/O. > > > > [1] zram: give up lazy initialization of zram metadata > > > > Signed-off-by: Minchan Kim > > --- > > drivers/staging/zram/zram_drv.c | 194 > > +++-- > > drivers/staging/zram/zram_drv.h | 12 ++- > > drivers/staging/zram/zram_sysfs.c | 13 ++- > > 3 files changed, 118 insertions(+), 101 deletions(-) > > > > diff --git a/drivers/staging/zram/zram_drv.c > > b/drivers/staging/zram/zram_drv.c > > index 3693780..eb1bc37 100644 > > --- a/drivers/staging/zram/zram_drv.c > > +++ b/drivers/staging/zram/zram_drv.c > > @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) > > zram_stat64_add(zram, v, 1); > > } > > > > -static int zram_test_flag(struct zram *zram, u32 index, > > +static int zram_test_flag(struct zram_meta *meta, u32 index, > > enum zram_pageflags flag) > > { > > - return zram->table[index].flags & BIT(flag); > > + return meta->table[index].flags & BIT(flag); > > } > > > > -static void zram_set_flag(struct zram *zram, u32 index, > > +static void zram_set_flag(struct zram_meta *meta, u32 index, > > enum zram_pageflags flag) > > { > > - zram->table[index].flags |= BIT(flag); > > + meta->table[index].flags |= BIT(flag); > > } > > > > -static void zram_clear_flag(struct zram *zram, u32 index, > > +static void zram_clear_flag(struct zram_meta *meta, u32 index, > > enum zram_pageflags flag) > > { > > - zram->table[index].flags &= ~BIT(flag); > > + meta->table[index].flags &= ~BIT(flag); > > } > > > > static int page_zero_filled(void *ptr) > > @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) > > > > static void zram_free_page(struct zram *zram, size_t index) > > { > > - unsigned long handle = zram->table[index].handle; > > - u16 size = zram->table[index].size; > > + struct zram_meta *meta = zram->meta; > > + unsigned long handle = meta->table[index].handle; > > + u16 size = meta->table[index].size; > > > > if (unlikely(!handle)) { > > /* > > * No memory is allocated for zero filled pages. > > * Simply clear zero page flag. > > */ > > - if (zram_test_flag(zram, index, ZRAM_ZERO)) { > > - zram_clear_flag(zram, index, ZRAM_ZERO); > > + if (zram_test_flag(meta, index, ZRAM_ZERO)) { > > + zram_clear_flag(meta, index, ZRAM_ZERO); > > zram_stat_dec(>stats.pages_zero); > > } > > return; > > @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t > > index) > > if (unlikely(size > max_zpage_size)) > > zram_stat_dec(>stats.bad_compress); > > > > - zs_free(zram->mem_pool, handle); > > + zs_free(meta->mem_pool, handle); > > > > if (size <= PAGE_SIZE / 2) > > zram_stat_dec(>stats.good_compress); > > > > zram_stat64_sub(zram, >stats.compr_size, > > - zram->table[index].size); > > + meta->table[index].size); > > zram_stat_dec(>stats.pages_stored); > > > > - zram->table[index].handle = 0; > > - zram->table[index].size = 0; > > + meta->table[index].handle = 0; > > + meta->table[index].size = 0; > > } > > > > static void handle_zero_page(struct bio_vec *bvec) > > @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, > > char *mem, u32 index) > > int ret = LZO_E_OK; > > size_t clen = PAGE_SIZE; > > unsigned char *cmem; > > - unsigned long handle = zram->table[index].handle; > > + struct zram_meta *meta = zram->meta; > > + unsigned long handle = meta->table[index].handle; > > > > - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { > > + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > > memset(mem, 0, PAGE_SIZE); > > return 0; > > } > > > > - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); > > - if (zram->table[index].size == PAGE_SIZE) > > + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); > > + if (meta->table[index].size == PAGE_SIZE) > > memcpy(mem, cmem, PAGE_SIZE); > > else > > - ret =
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Fri, Jan 18, 2013 at 01:34:18PM -0800, Nitin Gupta wrote: On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim minc...@kernel.org wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); -
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim wrote: > Lockdep complains about recursive deadlock of zram->init_lock. > [1] made it false positive because we can't request IO to zram > before setting disksize. Anyway, we should shut lockdep up to > avoid many reporting from user. > > This patch allocates zram's metadata out of lock so we can fix it. > In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC > in request handle path for partial I/O. > > [1] zram: give up lazy initialization of zram metadata > > Signed-off-by: Minchan Kim > --- > drivers/staging/zram/zram_drv.c | 194 > +++-- > drivers/staging/zram/zram_drv.h | 12 ++- > drivers/staging/zram/zram_sysfs.c | 13 ++- > 3 files changed, 118 insertions(+), 101 deletions(-) > > diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c > index 3693780..eb1bc37 100644 > --- a/drivers/staging/zram/zram_drv.c > +++ b/drivers/staging/zram/zram_drv.c > @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) > zram_stat64_add(zram, v, 1); > } > > -static int zram_test_flag(struct zram *zram, u32 index, > +static int zram_test_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - return zram->table[index].flags & BIT(flag); > + return meta->table[index].flags & BIT(flag); > } > > -static void zram_set_flag(struct zram *zram, u32 index, > +static void zram_set_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - zram->table[index].flags |= BIT(flag); > + meta->table[index].flags |= BIT(flag); > } > > -static void zram_clear_flag(struct zram *zram, u32 index, > +static void zram_clear_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - zram->table[index].flags &= ~BIT(flag); > + meta->table[index].flags &= ~BIT(flag); > } > > static int page_zero_filled(void *ptr) > @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) > > static void zram_free_page(struct zram *zram, size_t index) > { > - unsigned long handle = zram->table[index].handle; > - u16 size = zram->table[index].size; > + struct zram_meta *meta = zram->meta; > + unsigned long handle = meta->table[index].handle; > + u16 size = meta->table[index].size; > > if (unlikely(!handle)) { > /* > * No memory is allocated for zero filled pages. > * Simply clear zero page flag. > */ > - if (zram_test_flag(zram, index, ZRAM_ZERO)) { > - zram_clear_flag(zram, index, ZRAM_ZERO); > + if (zram_test_flag(meta, index, ZRAM_ZERO)) { > + zram_clear_flag(meta, index, ZRAM_ZERO); > zram_stat_dec(>stats.pages_zero); > } > return; > @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t > index) > if (unlikely(size > max_zpage_size)) > zram_stat_dec(>stats.bad_compress); > > - zs_free(zram->mem_pool, handle); > + zs_free(meta->mem_pool, handle); > > if (size <= PAGE_SIZE / 2) > zram_stat_dec(>stats.good_compress); > > zram_stat64_sub(zram, >stats.compr_size, > - zram->table[index].size); > + meta->table[index].size); > zram_stat_dec(>stats.pages_stored); > > - zram->table[index].handle = 0; > - zram->table[index].size = 0; > + meta->table[index].handle = 0; > + meta->table[index].size = 0; > } > > static void handle_zero_page(struct bio_vec *bvec) > @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char > *mem, u32 index) > int ret = LZO_E_OK; > size_t clen = PAGE_SIZE; > unsigned char *cmem; > - unsigned long handle = zram->table[index].handle; > + struct zram_meta *meta = zram->meta; > + unsigned long handle = meta->table[index].handle; > > - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { > + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > memset(mem, 0, PAGE_SIZE); > return 0; > } > > - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); > - if (zram->table[index].size == PAGE_SIZE) > + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); > + if (meta->table[index].size == PAGE_SIZE) > memcpy(mem, cmem, PAGE_SIZE); > else > - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, > + ret = lzo1x_decompress_safe(cmem, meta->table[index].size, > mem, ); > - zs_unmap_object(zram->mem_pool, handle); > + zs_unmap_object(meta->mem_pool, handle); > > /*
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Wed, Jan 16, 2013 at 6:12 PM, Minchan Kim minc...@kernel.org wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); - zs_unmap_object(zram-mem_pool, handle); + zs_unmap_object(meta-mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@
[PATCH 3/3] zram: get rid of lockdep warning
Lockdep complains about recursive deadlock of zram->init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram->table[index].flags & BIT(flag); + return meta->table[index].flags & BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags |= BIT(flag); + meta->table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags &= ~BIT(flag); + meta->table[index].flags &= ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram->table[index].handle; - u16 size = zram->table[index].size; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; + u16 size = meta->table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(>stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size > max_zpage_size)) zram_stat_dec(>stats.bad_compress); - zs_free(zram->mem_pool, handle); + zs_free(meta->mem_pool, handle); if (size <= PAGE_SIZE / 2) zram_stat_dec(>stats.good_compress); zram_stat64_sub(zram, >stats.compr_size, - zram->table[index].size); + meta->table[index].size); zram_stat_dec(>stats.pages_stored); - zram->table[index].handle = 0; - zram->table[index].size = 0; + meta->table[index].handle = 0; + meta->table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram->table[index].handle; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (zram->table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); + if (meta->table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, + ret = lzo1x_decompress_safe(cmem, meta->table[index].size, mem, ); - zs_unmap_object(zram->mem_pool, handle); + zs_unmap_object(meta->mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -190,11 +192,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, int ret; struct page *page; unsigned char *user_mem, *uncmem =
[PATCH 3/3] zram: get rid of lockdep warning
Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partial I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 194 +++-- drivers/staging/zram/zram_drv.h | 12 ++- drivers/staging/zram/zram_sysfs.c | 13 ++- 3 files changed, 118 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3693780..eb1bc37 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); - zs_unmap_object(zram-mem_pool, handle); + zs_unmap_object(meta-mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -190,11 +192,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, int ret; struct page *page; unsigned char *user_mem,
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Wed, Nov 28, 2012 at 03:54:45PM +0100, Jerome Marchand wrote: > On 11/28/2012 03:35 AM, Minchan Kim wrote: > > Lockdep complains about recursive deadlock of zram->init_lock. > > [1] made it false positive because we can't request IO to zram > > before setting disksize. Anyway, we should shut lockdep up to > > avoid many reporting from user. > > > > This patch allocates zram's metadata out of lock so we can fix it. > > Is that me or the functions zram_meta_alloc/free are missing? Who bite my zram_meta_alloc/free? :) Will resend with your suggestion for removing GFP_ATOMIC. Thanks! > > Regards, > Jerome -- Kind regards, Minchan Kim -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 3/3] zram: get rid of lockdep warning
On 11/28/2012 03:35 AM, Minchan Kim wrote: > Lockdep complains about recursive deadlock of zram->init_lock. > [1] made it false positive because we can't request IO to zram > before setting disksize. Anyway, we should shut lockdep up to > avoid many reporting from user. > > This patch allocates zram's metadata out of lock so we can fix it. Is that me or the functions zram_meta_alloc/free are missing? Regards, Jerome > In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC > in request handle path for partion I/O. > > [1] zram: give up lazy initialization of zram metadata > > Signed-off-by: Minchan Kim > --- > drivers/staging/zram/zram_drv.c | 139 > - > drivers/staging/zram/zram_drv.h | 12 +++- > drivers/staging/zram/zram_sysfs.c | 13 ++-- > 3 files changed, 63 insertions(+), 101 deletions(-) > > diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c > index e04aefc..a19059e 100644 > --- a/drivers/staging/zram/zram_drv.c > +++ b/drivers/staging/zram/zram_drv.c > @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) > zram_stat64_add(zram, v, 1); > } > > -static int zram_test_flag(struct zram *zram, u32 index, > +static int zram_test_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - return zram->table[index].flags & BIT(flag); > + return meta->table[index].flags & BIT(flag); > } > > -static void zram_set_flag(struct zram *zram, u32 index, > +static void zram_set_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - zram->table[index].flags |= BIT(flag); > + meta->table[index].flags |= BIT(flag); > } > > -static void zram_clear_flag(struct zram *zram, u32 index, > +static void zram_clear_flag(struct zram_meta *meta, u32 index, > enum zram_pageflags flag) > { > - zram->table[index].flags &= ~BIT(flag); > + meta->table[index].flags &= ~BIT(flag); > } > > static int page_zero_filled(void *ptr) > @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) > > static void zram_free_page(struct zram *zram, size_t index) > { > - unsigned long handle = zram->table[index].handle; > - u16 size = zram->table[index].size; > + struct zram_meta *meta = zram->meta; > + unsigned long handle = meta->table[index].handle; > + u16 size = meta->table[index].size; > > if (unlikely(!handle)) { > /* >* No memory is allocated for zero filled pages. >* Simply clear zero page flag. >*/ > - if (zram_test_flag(zram, index, ZRAM_ZERO)) { > - zram_clear_flag(zram, index, ZRAM_ZERO); > + if (zram_test_flag(meta, index, ZRAM_ZERO)) { > + zram_clear_flag(meta, index, ZRAM_ZERO); > zram_stat_dec(>stats.pages_zero); > } > return; > @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t > index) > if (unlikely(size > max_zpage_size)) > zram_stat_dec(>stats.bad_compress); > > - zs_free(zram->mem_pool, handle); > + zs_free(meta->mem_pool, handle); > > if (size <= PAGE_SIZE / 2) > zram_stat_dec(>stats.good_compress); > > zram_stat64_sub(zram, >stats.compr_size, > - zram->table[index].size); > + meta->table[index].size); > zram_stat_dec(>stats.pages_stored); > > - zram->table[index].handle = 0; > - zram->table[index].size = 0; > + meta->table[index].handle = 0; > + meta->table[index].size = 0; > } > > static void handle_zero_page(struct bio_vec *bvec) > @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char > *mem, u32 index) > int ret = LZO_E_OK; > size_t clen = PAGE_SIZE; > unsigned char *cmem; > - unsigned long handle = zram->table[index].handle; > + struct zram_meta *meta = zram->meta; > + unsigned long handle = meta->table[index].handle; > > - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { > + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > memset(mem, 0, PAGE_SIZE); > return 0; > } > > - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); > - if (zram->table[index].size == PAGE_SIZE) > + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); > + if (meta->table[index].size == PAGE_SIZE) > memcpy(mem, cmem, PAGE_SIZE); > else > - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, > + ret = lzo1x_decompress_safe(cmem, meta->table[index].size, > mem, ); > - zs_unmap_object(zram->mem_pool, handle); > + zs_unmap_object(meta->mem_pool, handle); > > /* Should NEVER happen.
Re: [PATCH 3/3] zram: get rid of lockdep warning
On 11/28/2012 03:35 AM, Minchan Kim wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. Is that me or the functions zram_meta_alloc/free are missing? Regards, Jerome In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partion I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 139 - drivers/staging/zram/zram_drv.h | 12 +++- drivers/staging/zram/zram_sysfs.c | 13 ++-- 3 files changed, 63 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index e04aefc..a19059e 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); - zs_unmap_object(zram-mem_pool, handle); + zs_unmap_object(meta-mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -190,11 +192,11 @@ static int
Re: [PATCH 3/3] zram: get rid of lockdep warning
On Wed, Nov 28, 2012 at 03:54:45PM +0100, Jerome Marchand wrote: On 11/28/2012 03:35 AM, Minchan Kim wrote: Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. Is that me or the functions zram_meta_alloc/free are missing? Who bite my zram_meta_alloc/free? :) Will resend with your suggestion for removing GFP_ATOMIC. Thanks! Regards, Jerome -- Kind regards, Minchan Kim -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH 3/3] zram: get rid of lockdep warning
Lockdep complains about recursive deadlock of zram->init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partion I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim --- drivers/staging/zram/zram_drv.c | 139 - drivers/staging/zram/zram_drv.h | 12 +++- drivers/staging/zram/zram_sysfs.c | 13 ++-- 3 files changed, 63 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index e04aefc..a19059e 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram->table[index].flags & BIT(flag); + return meta->table[index].flags & BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags |= BIT(flag); + meta->table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].flags &= ~BIT(flag); + meta->table[index].flags &= ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram->table[index].handle; - u16 size = zram->table[index].size; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; + u16 size = meta->table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(>stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size > max_zpage_size)) zram_stat_dec(>stats.bad_compress); - zs_free(zram->mem_pool, handle); + zs_free(meta->mem_pool, handle); if (size <= PAGE_SIZE / 2) zram_stat_dec(>stats.good_compress); zram_stat64_sub(zram, >stats.compr_size, - zram->table[index].size); + meta->table[index].size); zram_stat_dec(>stats.pages_stored); - zram->table[index].handle = 0; - zram->table[index].size = 0; + meta->table[index].handle = 0; + meta->table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram->table[index].handle; + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (zram->table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); + if (meta->table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram->table[index].size, + ret = lzo1x_decompress_safe(cmem, meta->table[index].size, mem, ); - zs_unmap_object(zram->mem_pool, handle); + zs_unmap_object(meta->mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -190,11 +192,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, int ret; struct page *page; unsigned char *user_mem, *uncmem =
[PATCH 3/3] zram: get rid of lockdep warning
Lockdep complains about recursive deadlock of zram-init_lock. [1] made it false positive because we can't request IO to zram before setting disksize. Anyway, we should shut lockdep up to avoid many reporting from user. This patch allocates zram's metadata out of lock so we can fix it. In addition, this patch replace GFP_KERNEL with GFP_NOIO/GFP_ATOMIC in request handle path for partion I/O. [1] zram: give up lazy initialization of zram metadata Signed-off-by: Minchan Kim minc...@kernel.org --- drivers/staging/zram/zram_drv.c | 139 - drivers/staging/zram/zram_drv.h | 12 +++- drivers/staging/zram/zram_sysfs.c | 13 ++-- 3 files changed, 63 insertions(+), 101 deletions(-) diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index e04aefc..a19059e 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -71,22 +71,22 @@ static void zram_stat64_inc(struct zram *zram, u64 *v) zram_stat64_add(zram, v, 1); } -static int zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram-table[index].flags BIT(flag); + return meta-table[index].flags BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags |= BIT(flag); + meta-table[index].flags |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram-table[index].flags = ~BIT(flag); + meta-table[index].flags = ~BIT(flag); } static int page_zero_filled(void *ptr) @@ -106,16 +106,17 @@ static int page_zero_filled(void *ptr) static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle = zram-table[index].handle; - u16 size = zram-table[index].size; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; + u16 size = meta-table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - zram_clear_flag(zram, index, ZRAM_ZERO); + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); zram_stat_dec(zram-stats.pages_zero); } return; @@ -124,17 +125,17 @@ static void zram_free_page(struct zram *zram, size_t index) if (unlikely(size max_zpage_size)) zram_stat_dec(zram-stats.bad_compress); - zs_free(zram-mem_pool, handle); + zs_free(meta-mem_pool, handle); if (size = PAGE_SIZE / 2) zram_stat_dec(zram-stats.good_compress); zram_stat64_sub(zram, zram-stats.compr_size, - zram-table[index].size); + meta-table[index].size); zram_stat_dec(zram-stats.pages_stored); - zram-table[index].handle = 0; - zram-table[index].size = 0; + meta-table[index].handle = 0; + meta-table[index].size = 0; } static void handle_zero_page(struct bio_vec *bvec) @@ -159,20 +160,21 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) int ret = LZO_E_OK; size_t clen = PAGE_SIZE; unsigned char *cmem; - unsigned long handle = zram-table[index].handle; + struct zram_meta *meta = zram-meta; + unsigned long handle = meta-table[index].handle; - if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) { + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { memset(mem, 0, PAGE_SIZE); return 0; } - cmem = zs_map_object(zram-mem_pool, handle, ZS_MM_RO); - if (zram-table[index].size == PAGE_SIZE) + cmem = zs_map_object(meta-mem_pool, handle, ZS_MM_RO); + if (meta-table[index].size == PAGE_SIZE) memcpy(mem, cmem, PAGE_SIZE); else - ret = lzo1x_decompress_safe(cmem, zram-table[index].size, + ret = lzo1x_decompress_safe(cmem, meta-table[index].size, mem, clen); - zs_unmap_object(zram-mem_pool, handle); + zs_unmap_object(meta-mem_pool, handle); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { @@ -190,11 +192,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, int ret; struct page *page; unsigned char *user_mem,