Re: [PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-26 Thread Minchan Kim
Hello Ganesh,

On Mon, Jan 26, 2015 at 10:53:57AM +0800, Ganesh Mahendran wrote:
> Hello, Minchan
> 
> 2015-01-21 14:14 GMT+08:00 Minchan Kim :
> > Currently, zram's handle encodes object's location directly so
> > it makes hard to support migration/compaction.
> >
> > This patch decouples handle and object via adding indirect layer.
> > For it, it allocates handle dynamically and returns it to user.
> > The handle is the address allocated by slab allocation so it's
> > unique and the memory allocated keeps object's position so that
> > we can get object's position from derefercing handle.
> >
> > Signed-off-by: Minchan Kim 
> > ---
> >  mm/zsmalloc.c | 90 
> > ---
> >  1 file changed, 68 insertions(+), 22 deletions(-)
> >
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 0dec1fa..9436ee8 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -110,6 +110,8 @@
> >  #define ZS_MAX_ZSPAGE_ORDER 2
> >  #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
> >
> > +#define ZS_HANDLE_SIZE (sizeof(unsigned long))
> > +
> >  /*
> >   * Object location (, ) is encoded as
> >   * as single (unsigned long) handle value.
> > @@ -241,6 +243,7 @@ struct zs_pool {
> > char *name;
> >
> > struct size_class **size_class;
> > +   struct kmem_cache *handle_cachep;
> >
> > gfp_t flags;/* allocation flags used when growing pool */
> > atomic_long_t pages_allocated;
> > @@ -269,6 +272,34 @@ struct mapping_area {
> > enum zs_mapmode vm_mm; /* mapping mode */
> >  };
> >
> > +static int create_handle_cache(struct zs_pool *pool)
> > +{
> > +   pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
> > +   0, 0, NULL);
> > +   return pool->handle_cachep ? 0 : 1;
> > +}
> > +
> > +static void destroy_handle_cache(struct zs_pool *pool)
> > +{
> > +   kmem_cache_destroy(pool->handle_cachep);
> > +}
> > +
> > +static unsigned long alloc_handle(struct zs_pool *pool)
> > +{
> > +   return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
> > +   pool->flags & ~__GFP_HIGHMEM);
> > +}
> > +
> > +static void free_handle(struct zs_pool *pool, unsigned long handle)
> > +{
> > +   kmem_cache_free(pool->handle_cachep, (void *)handle);
> > +}
> > +
> > +static void record_obj(unsigned long handle, unsigned long obj)
> > +{
> > +   *(unsigned long *)handle = obj;
> > +}
> > +
> >  /* zpool driver */
> >
> >  #ifdef CONFIG_ZPOOL
> > @@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page 
> > *page, unsigned long obj_idx)
> >   * decoded obj_idx back to its original value since it was adjusted in
> >   * obj_location_to_handle().
> >   */
> > -static void obj_handle_to_location(unsigned long handle, struct page 
> > **page,
> > +static void obj_to_location(unsigned long handle, struct page **page,
> > unsigned long *obj_idx)
> >  {
> > *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
> > *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
> >  }
> >
> > +static unsigned long handle_to_obj(unsigned long handle)
> > +{
> > +   return *(unsigned long *)handle;
> > +}
> > +
> >  static unsigned long obj_idx_to_offset(struct page *page,
> > unsigned long obj_idx, int class_size)
> >  {
> > @@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned 
> > long handle,
> > enum zs_mapmode mm)
> >  {
> > struct page *page;
> > -   unsigned long obj_idx, off;
> > +   unsigned long obj, obj_idx, off;
> >
> > unsigned int class_idx;
> > enum fullness_group fg;
> > @@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned 
> > long handle,
> >  */
> > BUG_ON(in_interrupt());
> >
> > -   obj_handle_to_location(handle, , _idx);
> > +   obj = handle_to_obj(handle);
> > +   obj_to_location(obj, , _idx);
> > get_zspage_mapping(get_first_page(page), _idx, );
> > class = pool->size_class[class_idx];
> > off = obj_idx_to_offset(page, obj_idx, class->size);
> > @@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
> >  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
> >  {
> > struct page *page;
> > -   unsigned long obj_idx, off;
> > +   unsigned long obj, obj_idx, off;
> >
> > unsigned int class_idx;
> > enum fullness_group fg;
> > @@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned 
> > long handle)
> >
> > BUG_ON(!handle);
> >
> > -   obj_handle_to_location(handle, , _idx);
> > +   obj = handle_to_obj(handle);
> > +   obj_to_location(obj, , _idx);
> > get_zspage_mapping(get_first_page(page), _idx, );
> > class = pool->size_class[class_idx];
> > off = obj_idx_to_offset(page, obj_idx, class->size);
> > @@ -1236,7 

Re: [PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-26 Thread Minchan Kim
Hello Ganesh,

On Mon, Jan 26, 2015 at 10:53:57AM +0800, Ganesh Mahendran wrote:
 Hello, Minchan
 
 2015-01-21 14:14 GMT+08:00 Minchan Kim minc...@kernel.org:
  Currently, zram's handle encodes object's location directly so
  it makes hard to support migration/compaction.
 
  This patch decouples handle and object via adding indirect layer.
  For it, it allocates handle dynamically and returns it to user.
  The handle is the address allocated by slab allocation so it's
  unique and the memory allocated keeps object's position so that
  we can get object's position from derefercing handle.
 
  Signed-off-by: Minchan Kim minc...@kernel.org
  ---
   mm/zsmalloc.c | 90 
  ---
   1 file changed, 68 insertions(+), 22 deletions(-)
 
  diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
  index 0dec1fa..9436ee8 100644
  --- a/mm/zsmalloc.c
  +++ b/mm/zsmalloc.c
  @@ -110,6 +110,8 @@
   #define ZS_MAX_ZSPAGE_ORDER 2
   #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL)  ZS_MAX_ZSPAGE_ORDER)
 
  +#define ZS_HANDLE_SIZE (sizeof(unsigned long))
  +
   /*
* Object location (PFN, obj_idx) is encoded as
* as single (unsigned long) handle value.
  @@ -241,6 +243,7 @@ struct zs_pool {
  char *name;
 
  struct size_class **size_class;
  +   struct kmem_cache *handle_cachep;
 
  gfp_t flags;/* allocation flags used when growing pool */
  atomic_long_t pages_allocated;
  @@ -269,6 +272,34 @@ struct mapping_area {
  enum zs_mapmode vm_mm; /* mapping mode */
   };
 
  +static int create_handle_cache(struct zs_pool *pool)
  +{
  +   pool-handle_cachep = kmem_cache_create(zs_handle, ZS_HANDLE_SIZE,
  +   0, 0, NULL);
  +   return pool-handle_cachep ? 0 : 1;
  +}
  +
  +static void destroy_handle_cache(struct zs_pool *pool)
  +{
  +   kmem_cache_destroy(pool-handle_cachep);
  +}
  +
  +static unsigned long alloc_handle(struct zs_pool *pool)
  +{
  +   return (unsigned long)kmem_cache_alloc(pool-handle_cachep,
  +   pool-flags  ~__GFP_HIGHMEM);
  +}
  +
  +static void free_handle(struct zs_pool *pool, unsigned long handle)
  +{
  +   kmem_cache_free(pool-handle_cachep, (void *)handle);
  +}
  +
  +static void record_obj(unsigned long handle, unsigned long obj)
  +{
  +   *(unsigned long *)handle = obj;
  +}
  +
   /* zpool driver */
 
   #ifdef CONFIG_ZPOOL
  @@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page 
  *page, unsigned long obj_idx)
* decoded obj_idx back to its original value since it was adjusted in
* obj_location_to_handle().
*/
  -static void obj_handle_to_location(unsigned long handle, struct page 
  **page,
  +static void obj_to_location(unsigned long handle, struct page **page,
  unsigned long *obj_idx)
   {
  *page = pfn_to_page(handle  OBJ_INDEX_BITS);
  *obj_idx = (handle  OBJ_INDEX_MASK) - 1;
   }
 
  +static unsigned long handle_to_obj(unsigned long handle)
  +{
  +   return *(unsigned long *)handle;
  +}
  +
   static unsigned long obj_idx_to_offset(struct page *page,
  unsigned long obj_idx, int class_size)
   {
  @@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned 
  long handle,
  enum zs_mapmode mm)
   {
  struct page *page;
  -   unsigned long obj_idx, off;
  +   unsigned long obj, obj_idx, off;
 
  unsigned int class_idx;
  enum fullness_group fg;
  @@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned 
  long handle,
   */
  BUG_ON(in_interrupt());
 
  -   obj_handle_to_location(handle, page, obj_idx);
  +   obj = handle_to_obj(handle);
  +   obj_to_location(obj, page, obj_idx);
  get_zspage_mapping(get_first_page(page), class_idx, fg);
  class = pool-size_class[class_idx];
  off = obj_idx_to_offset(page, obj_idx, class-size);
  @@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
   void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
   {
  struct page *page;
  -   unsigned long obj_idx, off;
  +   unsigned long obj, obj_idx, off;
 
  unsigned int class_idx;
  enum fullness_group fg;
  @@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned 
  long handle)
 
  BUG_ON(!handle);
 
  -   obj_handle_to_location(handle, page, obj_idx);
  +   obj = handle_to_obj(handle);
  +   obj_to_location(obj, page, obj_idx);
  get_zspage_mapping(get_first_page(page), class_idx, fg);
  class = pool-size_class[class_idx];
  off = obj_idx_to_offset(page, obj_idx, class-size);
  @@ -1236,7 +1274,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
*/
   unsigned long zs_malloc(struct zs_pool *pool, size_t size)
   {
  -   unsigned long obj;
  +   unsigned long handle, obj;
  struct 

Re: [PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-25 Thread Ganesh Mahendran
Hello, Minchan

2015-01-21 14:14 GMT+08:00 Minchan Kim :
> Currently, zram's handle encodes object's location directly so
> it makes hard to support migration/compaction.
>
> This patch decouples handle and object via adding indirect layer.
> For it, it allocates handle dynamically and returns it to user.
> The handle is the address allocated by slab allocation so it's
> unique and the memory allocated keeps object's position so that
> we can get object's position from derefercing handle.
>
> Signed-off-by: Minchan Kim 
> ---
>  mm/zsmalloc.c | 90 
> ---
>  1 file changed, 68 insertions(+), 22 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 0dec1fa..9436ee8 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -110,6 +110,8 @@
>  #define ZS_MAX_ZSPAGE_ORDER 2
>  #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
>
> +#define ZS_HANDLE_SIZE (sizeof(unsigned long))
> +
>  /*
>   * Object location (, ) is encoded as
>   * as single (unsigned long) handle value.
> @@ -241,6 +243,7 @@ struct zs_pool {
> char *name;
>
> struct size_class **size_class;
> +   struct kmem_cache *handle_cachep;
>
> gfp_t flags;/* allocation flags used when growing pool */
> atomic_long_t pages_allocated;
> @@ -269,6 +272,34 @@ struct mapping_area {
> enum zs_mapmode vm_mm; /* mapping mode */
>  };
>
> +static int create_handle_cache(struct zs_pool *pool)
> +{
> +   pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
> +   0, 0, NULL);
> +   return pool->handle_cachep ? 0 : 1;
> +}
> +
> +static void destroy_handle_cache(struct zs_pool *pool)
> +{
> +   kmem_cache_destroy(pool->handle_cachep);
> +}
> +
> +static unsigned long alloc_handle(struct zs_pool *pool)
> +{
> +   return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
> +   pool->flags & ~__GFP_HIGHMEM);
> +}
> +
> +static void free_handle(struct zs_pool *pool, unsigned long handle)
> +{
> +   kmem_cache_free(pool->handle_cachep, (void *)handle);
> +}
> +
> +static void record_obj(unsigned long handle, unsigned long obj)
> +{
> +   *(unsigned long *)handle = obj;
> +}
> +
>  /* zpool driver */
>
>  #ifdef CONFIG_ZPOOL
> @@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page *page, 
> unsigned long obj_idx)
>   * decoded obj_idx back to its original value since it was adjusted in
>   * obj_location_to_handle().
>   */
> -static void obj_handle_to_location(unsigned long handle, struct page **page,
> +static void obj_to_location(unsigned long handle, struct page **page,
> unsigned long *obj_idx)
>  {
> *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
> *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
>  }
>
> +static unsigned long handle_to_obj(unsigned long handle)
> +{
> +   return *(unsigned long *)handle;
> +}
> +
>  static unsigned long obj_idx_to_offset(struct page *page,
> unsigned long obj_idx, int class_size)
>  {
> @@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
> handle,
> enum zs_mapmode mm)
>  {
> struct page *page;
> -   unsigned long obj_idx, off;
> +   unsigned long obj, obj_idx, off;
>
> unsigned int class_idx;
> enum fullness_group fg;
> @@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
> handle,
>  */
> BUG_ON(in_interrupt());
>
> -   obj_handle_to_location(handle, , _idx);
> +   obj = handle_to_obj(handle);
> +   obj_to_location(obj, , _idx);
> get_zspage_mapping(get_first_page(page), _idx, );
> class = pool->size_class[class_idx];
> off = obj_idx_to_offset(page, obj_idx, class->size);
> @@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
>  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
>  {
> struct page *page;
> -   unsigned long obj_idx, off;
> +   unsigned long obj, obj_idx, off;
>
> unsigned int class_idx;
> enum fullness_group fg;
> @@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned 
> long handle)
>
> BUG_ON(!handle);
>
> -   obj_handle_to_location(handle, , _idx);
> +   obj = handle_to_obj(handle);
> +   obj_to_location(obj, , _idx);
> get_zspage_mapping(get_first_page(page), _idx, );
> class = pool->size_class[class_idx];
> off = obj_idx_to_offset(page, obj_idx, class->size);
> @@ -1236,7 +1274,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
>   */
>  unsigned long zs_malloc(struct zs_pool *pool, size_t size)
>  {
> -   unsigned long obj;
> +   unsigned long handle, obj;
> struct link_free *link;
> struct size_class *class;
> void *vaddr;
> @@ -1247,6 +1285,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t 
> 

Re: [PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-25 Thread Ganesh Mahendran
Hello, Minchan

2015-01-21 14:14 GMT+08:00 Minchan Kim minc...@kernel.org:
 Currently, zram's handle encodes object's location directly so
 it makes hard to support migration/compaction.

 This patch decouples handle and object via adding indirect layer.
 For it, it allocates handle dynamically and returns it to user.
 The handle is the address allocated by slab allocation so it's
 unique and the memory allocated keeps object's position so that
 we can get object's position from derefercing handle.

 Signed-off-by: Minchan Kim minc...@kernel.org
 ---
  mm/zsmalloc.c | 90 
 ---
  1 file changed, 68 insertions(+), 22 deletions(-)

 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
 index 0dec1fa..9436ee8 100644
 --- a/mm/zsmalloc.c
 +++ b/mm/zsmalloc.c
 @@ -110,6 +110,8 @@
  #define ZS_MAX_ZSPAGE_ORDER 2
  #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL)  ZS_MAX_ZSPAGE_ORDER)

 +#define ZS_HANDLE_SIZE (sizeof(unsigned long))
 +
  /*
   * Object location (PFN, obj_idx) is encoded as
   * as single (unsigned long) handle value.
 @@ -241,6 +243,7 @@ struct zs_pool {
 char *name;

 struct size_class **size_class;
 +   struct kmem_cache *handle_cachep;

 gfp_t flags;/* allocation flags used when growing pool */
 atomic_long_t pages_allocated;
 @@ -269,6 +272,34 @@ struct mapping_area {
 enum zs_mapmode vm_mm; /* mapping mode */
  };

 +static int create_handle_cache(struct zs_pool *pool)
 +{
 +   pool-handle_cachep = kmem_cache_create(zs_handle, ZS_HANDLE_SIZE,
 +   0, 0, NULL);
 +   return pool-handle_cachep ? 0 : 1;
 +}
 +
 +static void destroy_handle_cache(struct zs_pool *pool)
 +{
 +   kmem_cache_destroy(pool-handle_cachep);
 +}
 +
 +static unsigned long alloc_handle(struct zs_pool *pool)
 +{
 +   return (unsigned long)kmem_cache_alloc(pool-handle_cachep,
 +   pool-flags  ~__GFP_HIGHMEM);
 +}
 +
 +static void free_handle(struct zs_pool *pool, unsigned long handle)
 +{
 +   kmem_cache_free(pool-handle_cachep, (void *)handle);
 +}
 +
 +static void record_obj(unsigned long handle, unsigned long obj)
 +{
 +   *(unsigned long *)handle = obj;
 +}
 +
  /* zpool driver */

  #ifdef CONFIG_ZPOOL
 @@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page *page, 
 unsigned long obj_idx)
   * decoded obj_idx back to its original value since it was adjusted in
   * obj_location_to_handle().
   */
 -static void obj_handle_to_location(unsigned long handle, struct page **page,
 +static void obj_to_location(unsigned long handle, struct page **page,
 unsigned long *obj_idx)
  {
 *page = pfn_to_page(handle  OBJ_INDEX_BITS);
 *obj_idx = (handle  OBJ_INDEX_MASK) - 1;
  }

 +static unsigned long handle_to_obj(unsigned long handle)
 +{
 +   return *(unsigned long *)handle;
 +}
 +
  static unsigned long obj_idx_to_offset(struct page *page,
 unsigned long obj_idx, int class_size)
  {
 @@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
 handle,
 enum zs_mapmode mm)
  {
 struct page *page;
 -   unsigned long obj_idx, off;
 +   unsigned long obj, obj_idx, off;

 unsigned int class_idx;
 enum fullness_group fg;
 @@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
 handle,
  */
 BUG_ON(in_interrupt());

 -   obj_handle_to_location(handle, page, obj_idx);
 +   obj = handle_to_obj(handle);
 +   obj_to_location(obj, page, obj_idx);
 get_zspage_mapping(get_first_page(page), class_idx, fg);
 class = pool-size_class[class_idx];
 off = obj_idx_to_offset(page, obj_idx, class-size);
 @@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
  {
 struct page *page;
 -   unsigned long obj_idx, off;
 +   unsigned long obj, obj_idx, off;

 unsigned int class_idx;
 enum fullness_group fg;
 @@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned 
 long handle)

 BUG_ON(!handle);

 -   obj_handle_to_location(handle, page, obj_idx);
 +   obj = handle_to_obj(handle);
 +   obj_to_location(obj, page, obj_idx);
 get_zspage_mapping(get_first_page(page), class_idx, fg);
 class = pool-size_class[class_idx];
 off = obj_idx_to_offset(page, obj_idx, class-size);
 @@ -1236,7 +1274,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
   */
  unsigned long zs_malloc(struct zs_pool *pool, size_t size)
  {
 -   unsigned long obj;
 +   unsigned long handle, obj;
 struct link_free *link;
 struct size_class *class;
 void *vaddr;
 @@ -1247,6 +1285,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t 
 size)
 if (unlikely(!size || size  ZS_MAX_ALLOC_SIZE))
  

[PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-20 Thread Minchan Kim
Currently, zram's handle encodes object's location directly so
it makes hard to support migration/compaction.

This patch decouples handle and object via adding indirect layer.
For it, it allocates handle dynamically and returns it to user.
The handle is the address allocated by slab allocation so it's
unique and the memory allocated keeps object's position so that
we can get object's position from derefercing handle.

Signed-off-by: Minchan Kim 
---
 mm/zsmalloc.c | 90 ---
 1 file changed, 68 insertions(+), 22 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0dec1fa..9436ee8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -110,6 +110,8 @@
 #define ZS_MAX_ZSPAGE_ORDER 2
 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
 
+#define ZS_HANDLE_SIZE (sizeof(unsigned long))
+
 /*
  * Object location (, ) is encoded as
  * as single (unsigned long) handle value.
@@ -241,6 +243,7 @@ struct zs_pool {
char *name;
 
struct size_class **size_class;
+   struct kmem_cache *handle_cachep;
 
gfp_t flags;/* allocation flags used when growing pool */
atomic_long_t pages_allocated;
@@ -269,6 +272,34 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+static int create_handle_cache(struct zs_pool *pool)
+{
+   pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
+   0, 0, NULL);
+   return pool->handle_cachep ? 0 : 1;
+}
+
+static void destroy_handle_cache(struct zs_pool *pool)
+{
+   kmem_cache_destroy(pool->handle_cachep);
+}
+
+static unsigned long alloc_handle(struct zs_pool *pool)
+{
+   return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+   pool->flags & ~__GFP_HIGHMEM);
+}
+
+static void free_handle(struct zs_pool *pool, unsigned long handle)
+{
+   kmem_cache_free(pool->handle_cachep, (void *)handle);
+}
+
+static void record_obj(unsigned long handle, unsigned long obj)
+{
+   *(unsigned long *)handle = obj;
+}
+
 /* zpool driver */
 
 #ifdef CONFIG_ZPOOL
@@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page *page, 
unsigned long obj_idx)
  * decoded obj_idx back to its original value since it was adjusted in
  * obj_location_to_handle().
  */
-static void obj_handle_to_location(unsigned long handle, struct page **page,
+static void obj_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
 {
*page = pfn_to_page(handle >> OBJ_INDEX_BITS);
*obj_idx = (handle & OBJ_INDEX_MASK) - 1;
 }
 
+static unsigned long handle_to_obj(unsigned long handle)
+{
+   return *(unsigned long *)handle;
+}
+
 static unsigned long obj_idx_to_offset(struct page *page,
unsigned long obj_idx, int class_size)
 {
@@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
handle,
enum zs_mapmode mm)
 {
struct page *page;
-   unsigned long obj_idx, off;
+   unsigned long obj, obj_idx, off;
 
unsigned int class_idx;
enum fullness_group fg;
@@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
handle,
 */
BUG_ON(in_interrupt());
 
-   obj_handle_to_location(handle, , _idx);
+   obj = handle_to_obj(handle);
+   obj_to_location(obj, , _idx);
get_zspage_mapping(get_first_page(page), _idx, );
class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
@@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 {
struct page *page;
-   unsigned long obj_idx, off;
+   unsigned long obj, obj_idx, off;
 
unsigned int class_idx;
enum fullness_group fg;
@@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long 
handle)
 
BUG_ON(!handle);
 
-   obj_handle_to_location(handle, , _idx);
+   obj = handle_to_obj(handle);
+   obj_to_location(obj, , _idx);
get_zspage_mapping(get_first_page(page), _idx, );
class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
@@ -1236,7 +1274,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
  */
 unsigned long zs_malloc(struct zs_pool *pool, size_t size)
 {
-   unsigned long obj;
+   unsigned long handle, obj;
struct link_free *link;
struct size_class *class;
void *vaddr;
@@ -1247,6 +1285,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t 
size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
 
+   handle = alloc_handle(pool);
+   if (!handle)
+   return 0;
+
class = pool->size_class[get_size_class_index(size)];
 
spin_lock(>lock);
@@ -1255,8 +1297,10 @@ unsigned long zs_malloc(struct 

[PATCH v1 02/10] zsmalloc: decouple handle and object

2015-01-20 Thread Minchan Kim
Currently, zram's handle encodes object's location directly so
it makes hard to support migration/compaction.

This patch decouples handle and object via adding indirect layer.
For it, it allocates handle dynamically and returns it to user.
The handle is the address allocated by slab allocation so it's
unique and the memory allocated keeps object's position so that
we can get object's position from derefercing handle.

Signed-off-by: Minchan Kim minc...@kernel.org
---
 mm/zsmalloc.c | 90 ---
 1 file changed, 68 insertions(+), 22 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0dec1fa..9436ee8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -110,6 +110,8 @@
 #define ZS_MAX_ZSPAGE_ORDER 2
 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL)  ZS_MAX_ZSPAGE_ORDER)
 
+#define ZS_HANDLE_SIZE (sizeof(unsigned long))
+
 /*
  * Object location (PFN, obj_idx) is encoded as
  * as single (unsigned long) handle value.
@@ -241,6 +243,7 @@ struct zs_pool {
char *name;
 
struct size_class **size_class;
+   struct kmem_cache *handle_cachep;
 
gfp_t flags;/* allocation flags used when growing pool */
atomic_long_t pages_allocated;
@@ -269,6 +272,34 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+static int create_handle_cache(struct zs_pool *pool)
+{
+   pool-handle_cachep = kmem_cache_create(zs_handle, ZS_HANDLE_SIZE,
+   0, 0, NULL);
+   return pool-handle_cachep ? 0 : 1;
+}
+
+static void destroy_handle_cache(struct zs_pool *pool)
+{
+   kmem_cache_destroy(pool-handle_cachep);
+}
+
+static unsigned long alloc_handle(struct zs_pool *pool)
+{
+   return (unsigned long)kmem_cache_alloc(pool-handle_cachep,
+   pool-flags  ~__GFP_HIGHMEM);
+}
+
+static void free_handle(struct zs_pool *pool, unsigned long handle)
+{
+   kmem_cache_free(pool-handle_cachep, (void *)handle);
+}
+
+static void record_obj(unsigned long handle, unsigned long obj)
+{
+   *(unsigned long *)handle = obj;
+}
+
 /* zpool driver */
 
 #ifdef CONFIG_ZPOOL
@@ -595,13 +626,18 @@ static void *obj_location_to_handle(struct page *page, 
unsigned long obj_idx)
  * decoded obj_idx back to its original value since it was adjusted in
  * obj_location_to_handle().
  */
-static void obj_handle_to_location(unsigned long handle, struct page **page,
+static void obj_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
 {
*page = pfn_to_page(handle  OBJ_INDEX_BITS);
*obj_idx = (handle  OBJ_INDEX_MASK) - 1;
 }
 
+static unsigned long handle_to_obj(unsigned long handle)
+{
+   return *(unsigned long *)handle;
+}
+
 static unsigned long obj_idx_to_offset(struct page *page,
unsigned long obj_idx, int class_size)
 {
@@ -1153,7 +1189,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
handle,
enum zs_mapmode mm)
 {
struct page *page;
-   unsigned long obj_idx, off;
+   unsigned long obj, obj_idx, off;
 
unsigned int class_idx;
enum fullness_group fg;
@@ -1170,7 +1206,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
handle,
 */
BUG_ON(in_interrupt());
 
-   obj_handle_to_location(handle, page, obj_idx);
+   obj = handle_to_obj(handle);
+   obj_to_location(obj, page, obj_idx);
get_zspage_mapping(get_first_page(page), class_idx, fg);
class = pool-size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class-size);
@@ -1195,7 +1232,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 {
struct page *page;
-   unsigned long obj_idx, off;
+   unsigned long obj, obj_idx, off;
 
unsigned int class_idx;
enum fullness_group fg;
@@ -1204,7 +1241,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long 
handle)
 
BUG_ON(!handle);
 
-   obj_handle_to_location(handle, page, obj_idx);
+   obj = handle_to_obj(handle);
+   obj_to_location(obj, page, obj_idx);
get_zspage_mapping(get_first_page(page), class_idx, fg);
class = pool-size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class-size);
@@ -1236,7 +1274,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
  */
 unsigned long zs_malloc(struct zs_pool *pool, size_t size)
 {
-   unsigned long obj;
+   unsigned long handle, obj;
struct link_free *link;
struct size_class *class;
void *vaddr;
@@ -1247,6 +1285,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t 
size)
if (unlikely(!size || size  ZS_MAX_ALLOC_SIZE))
return 0;
 
+   handle = alloc_handle(pool);
+   if (!handle)
+   return 0;
+
class = pool-size_class[get_size_class_index(size)];
 
spin_lock(class-lock);