[PATCH] z3fold: add inter-page compaction

2019-10-05 Thread Vitaly Wool
From: Vitaly Wool 

For each page scheduled for compaction (e. g. by z3fold_free()),
try to apply inter-page compaction before running the traditional/
existing intra-page compaction. That means, if the page has only one
buddy, we treat that buddy as a new object that we aim to place into
an existing z3fold page. If such a page is found, that object is
transferred and the old page is freed completely. The transferred
object is named "foreign" and treated slightly differently thereafter.

Namely, we increase "foreign handle" counter for the new page. Pages
with non-zero "foreign handle" count become unmovable. This patch
implements "foreign handle" detection when a handle is freed to
decrement the foreign handle counter accordingly, so a page may as
well become movable again as the time goes by.

As a result, we almost always have exactly 3 objects per page and
significantly better average compression ratio.

Signed-off-by: Vitaly Wool 
---
 mm/z3fold.c | 363 +---
 1 file changed, 291 insertions(+), 72 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 6d3d3f698ebb..25713a4a7186 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -90,6 +91,7 @@ struct z3fold_buddy_slots {
 */
unsigned long slot[BUDDY_MASK + 1];
unsigned long pool; /* back link + flags */
+   rwlock_t lock;
 };
 #define HANDLE_FLAG_MASK   (0x03)
 
@@ -124,6 +126,7 @@ struct z3fold_header {
unsigned short start_middle;
unsigned short first_num:2;
unsigned short mapped_count:2;
+   unsigned short foreign_handles:2;
 };
 
 /**
@@ -178,6 +181,19 @@ enum z3fold_page_flags {
PAGE_CLAIMED, /* by either reclaim or free */
 };
 
+/*
+ * handle flags, go under HANDLE_FLAG_MASK
+ */
+enum z3fold_handle_flags {
+   HANDLES_ORPHANED = 0,
+};
+
+/*
+ * Forward declarations
+ */
+static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, 
bool);
+static void compact_page_work(struct work_struct *w);
+
 /*
  * Helpers
 */
@@ -191,8 +207,6 @@ static int size_to_chunks(size_t size)
 #define for_each_unbuddied_list(_iter, _begin) \
for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
 
-static void compact_page_work(struct work_struct *w);
-
 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
gfp_t gfp)
 {
@@ -204,6 +218,7 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct 
z3fold_pool *pool,
if (slots) {
memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool;
+   rwlock_init(&slots->lock);
}
 
return slots;
@@ -219,27 +234,108 @@ static inline struct z3fold_buddy_slots 
*handle_to_slots(unsigned long handle)
return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
 }
 
+/* Lock a z3fold page */
+static inline void z3fold_page_lock(struct z3fold_header *zhdr)
+{
+   spin_lock(&zhdr->page_lock);
+}
+
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+   return spin_trylock(&zhdr->page_lock);
+}
+
+/* Unlock a z3fold page */
+static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
+{
+   spin_unlock(&zhdr->page_lock);
+}
+
+
+static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
+   bool lock)
+{
+   struct z3fold_buddy_slots *slots;
+   struct z3fold_header *zhdr;
+   int locked = 0;
+
+   if (!(handle & (1 << PAGE_HEADLESS))) {
+   slots = handle_to_slots(handle);
+   do {
+   unsigned long addr;
+
+   read_lock(&slots->lock);
+   addr = *(unsigned long *)handle;
+   zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
+   if (lock)
+   locked = z3fold_page_trylock(zhdr);
+   read_unlock(&slots->lock);
+   if (locked)
+   break;
+   cpu_relax();
+   } while (lock);
+   } else {
+   zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
+   }
+
+   return zhdr;
+}
+
+/* Returns the z3fold page where a given handle is stored */
+static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
+{
+   return __get_z3fold_header(h, false);
+}
+
+/* return locked z3fold page if it's not headless */
+static inline struct z3fold_header *get_z3fold_header(unsigned long h)
+{
+   return __get_z3fold_header(h, true);
+}
+
+static inline void put_z3fold_header(struct z3fold_header *zhdr)
+{
+   struct page *page = virt_to_page(zhdr);
+
+   if (!test_bi

Re: [PATCH] z3fold: add inter-page compaction

2019-05-27 Thread Vitaly Wool
On Sun, May 26, 2019 at 12:09 AM Andrew Morton
 wrote:


> Forward-declaring inline functions is peculiar, but it does appear to work.
>
> z3fold is quite inline-happy.  Fortunately the compiler will ignore the
> inline hint if it seems a bad idea.  Even then, the below shrinks
> z3fold.o text from 30k to 27k.  Which might even make it faster

It is faster with inlines, I'll try to find a better balance between
size and performance in the next version of the patch though.


> >
> > ...
> >
> > +static inline struct z3fold_header *__get_z3fold_header(unsigned long 
> > handle,
> > + bool lock)
> > +{
> > + struct z3fold_buddy_slots *slots;
> > + struct z3fold_header *zhdr;
> > + unsigned int seq;
> > + bool is_valid;
> > +
> > + if (!(handle & (1 << PAGE_HEADLESS))) {
> > + slots = handle_to_slots(handle);
> > + do {
> > + unsigned long addr;
> > +
> > + seq = read_seqbegin(&slots->seqlock);
> > + addr = *(unsigned long *)handle;
> > + zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
> > + preempt_disable();
>
> Why is this done?
>
> > + is_valid = !read_seqretry(&slots->seqlock, seq);
> > + if (!is_valid) {
> > + preempt_enable();
> > + continue;
> > + }
> > + /*
> > +  * if we are here, zhdr is a pointer to a valid z3fold
> > +  * header. Lock it! And then re-check if someone has
> > +  * changed which z3fold page this handle points to
> > +  */
> > + if (lock)
> > + z3fold_page_lock(zhdr);
> > + preempt_enable();
> > + /*
> > +  * we use is_valid as a "cached" value: if it's false,
> > +  * no other checks needed, have to go one more round
> > +  */
> > + } while (!is_valid || (read_seqretry(&slots->seqlock, seq) &&
> > + (lock ? ({ z3fold_page_unlock(zhdr); 1; }) : 1)));
> > + } else {
> > + zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
> > + }
> > +
> > + return zhdr;
> > +}
> >
> > ...
> >
> >  static unsigned short handle_to_chunks(unsigned long handle)
> >  {
> > - unsigned long addr = *(unsigned long *)handle;
> > + unsigned long addr;
> > + struct z3fold_buddy_slots *slots = handle_to_slots(handle);
> > + unsigned int seq;
> > +
> > + do {
> > + seq = read_seqbegin(&slots->seqlock);
> > + addr = *(unsigned long *)handle;
> > + } while (read_seqretry(&slots->seqlock, seq));
>
> It isn't done here (I think).

handle_to_chunks() is always called with z3fold header locked which
makes it a lot easier in this case. I'll add some comments in V2.

Thanks,
   Vitaly


Re: [PATCH] z3fold: add inter-page compaction

2019-05-25 Thread Andrew Morton
On Fri, 24 May 2019 17:49:18 +0200 Vitaly Wool  wrote:

> For each page scheduled for compaction (e. g. by z3fold_free()),
> try to apply inter-page compaction before running the traditional/
> existing intra-page compaction. That means, if the page has only one
> buddy, we treat that buddy as a new object that we aim to place into
> an existing z3fold page. If such a page is found, that object is
> transferred and the old page is freed completely. The transferred
> object is named "foreign" and treated slightly differently thereafter.
> 
> Namely, we increase "foreign handle" counter for the new page. Pages
> with non-zero "foreign handle" count become unmovable. This patch
> implements "foreign handle" detection when a handle is freed to
> decrement the foreign handle counter accordingly, so a page may as
> well become movable again as the time goes by.
> 
> As a result, we almost always have exactly 3 objects per page and
> significantly better average compression ratio.
> 
> ...
>
> +static inline struct z3fold_header *handle_to_z3fold_header(unsigned long);
> +static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *);

Forward-declaring inline functions is peculiar, but it does appear to work.

z3fold is quite inline-happy.  Fortunately the compiler will ignore the
inline hint if it seems a bad idea.  Even then, the below shrinks
z3fold.o text from 30k to 27k.  Which might even make it faster

--- a/mm/z3fold.c~a
+++ a/mm/z3fold.c
@@ -185,8 +185,8 @@ enum z3fold_handle_flags {
HANDLES_ORPHANED = 0,
 };
 
-static inline struct z3fold_header *handle_to_z3fold_header(unsigned long);
-static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *);
+static struct z3fold_header *handle_to_z3fold_header(unsigned long);
+static struct z3fold_pool *zhdr_to_pool(struct z3fold_header *);
 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, 
bool);
 static void add_to_unbuddied(struct z3fold_pool *, struct z3fold_header *);
 
@@ -205,7 +205,7 @@ static int size_to_chunks(size_t size)
 
 static void compact_page_work(struct work_struct *w);
 
-static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
+static struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
gfp_t gfp)
 {
struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
@@ -220,17 +220,17 @@ static inline struct z3fold_buddy_slots
return slots;
 }
 
-static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
+static struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
 {
return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
 }
 
-static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
+static struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
 {
return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
 }
 
-static inline void free_handle(unsigned long handle)
+static void free_handle(unsigned long handle)
 {
struct z3fold_buddy_slots *slots;
struct z3fold_header *zhdr;
@@ -423,7 +423,7 @@ static unsigned long encode_handle(struc
return (unsigned long)&slots->slot[idx];
 }
 
-static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
+static struct z3fold_header *__get_z3fold_header(unsigned long handle,
bool lock)
 {
struct z3fold_buddy_slots *slots;
@@ -648,7 +648,7 @@ static int num_free_chunks(struct z3fold
 }
 
 /* Add to the appropriate unbuddied list */
-static inline void add_to_unbuddied(struct z3fold_pool *pool,
+static void add_to_unbuddied(struct z3fold_pool *pool,
struct z3fold_header *zhdr)
 {
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
@@ -664,7 +664,7 @@ static inline void add_to_unbuddied(stru
}
 }
 
-static inline void *mchunk_memmove(struct z3fold_header *zhdr,
+static void *mchunk_memmove(struct z3fold_header *zhdr,
unsigned short dst_chunk)
 {
void *beg = zhdr;
@@ -673,7 +673,7 @@ static inline void *mchunk_memmove(struc
   zhdr->middle_chunks << CHUNK_SHIFT);
 }
 
-static inline bool buddy_single(struct z3fold_header *zhdr)
+static bool buddy_single(struct z3fold_header *zhdr)
 {
return !((zhdr->first_chunks && zhdr->middle_chunks) ||
(zhdr->first_chunks && zhdr->last_chunks) ||
@@ -884,7 +884,7 @@ static void compact_page_work(struct wor
 }
 
 /* returns _locked_ z3fold page header or NULL */
-static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
+static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
size_t size, bool can_sleep)
 {
struct z3fold_header *zhdr = NULL;
_


>
> ...
>
> +static inline struct z3fold_header *_

[PATCH] z3fold: add inter-page compaction

2019-05-24 Thread Vitaly Wool
For each page scheduled for compaction (e. g. by z3fold_free()),
try to apply inter-page compaction before running the traditional/
existing intra-page compaction. That means, if the page has only one
buddy, we treat that buddy as a new object that we aim to place into
an existing z3fold page. If such a page is found, that object is
transferred and the old page is freed completely. The transferred
object is named "foreign" and treated slightly differently thereafter.

Namely, we increase "foreign handle" counter for the new page. Pages
with non-zero "foreign handle" count become unmovable. This patch
implements "foreign handle" detection when a handle is freed to
decrement the foreign handle counter accordingly, so a page may as
well become movable again as the time goes by.

As a result, we almost always have exactly 3 objects per page and
significantly better average compression ratio.

Signed-off-by: Vitaly Wool 
---
 mm/z3fold.c | 328 +---
 1 file changed, 285 insertions(+), 43 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 985732c8b025..d82bccc8bc90 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 /*
@@ -89,6 +90,7 @@ struct z3fold_buddy_slots {
 */
unsigned long slot[BUDDY_MASK + 1];
unsigned long pool; /* back link + flags */
+   seqlock_t seqlock;
 };
 #define HANDLE_FLAG_MASK   (0x03)
 
@@ -121,6 +123,7 @@ struct z3fold_header {
unsigned short start_middle;
unsigned short first_num:2;
unsigned short mapped_count:2;
+   unsigned short foreign_handles:2;
 };
 
 /**
@@ -175,6 +178,18 @@ enum z3fold_page_flags {
PAGE_CLAIMED, /* by either reclaim or free */
 };
 
+/*
+ * handle flags, go under HANDLE_FLAG_MASK
+ */
+enum z3fold_handle_flags {
+   HANDLES_ORPHANED = 0,
+};
+
+static inline struct z3fold_header *handle_to_z3fold_header(unsigned long);
+static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *);
+static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, 
bool);
+static void add_to_unbuddied(struct z3fold_pool *, struct z3fold_header *);
+
 /*
  * Helpers
 */
@@ -199,6 +214,7 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct 
z3fold_pool *pool,
if (slots) {
memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool;
+   seqlock_init(&slots->seqlock);
}
 
return slots;
@@ -217,24 +233,39 @@ static inline struct z3fold_buddy_slots 
*handle_to_slots(unsigned long handle)
 static inline void free_handle(unsigned long handle)
 {
struct z3fold_buddy_slots *slots;
+   struct z3fold_header *zhdr;
int i;
bool is_free;
+   unsigned int seq;
 
if (handle & (1 << PAGE_HEADLESS))
return;
 
-   WARN_ON(*(unsigned long *)handle == 0);
-   *(unsigned long *)handle = 0;
+   if (WARN_ON(*(unsigned long *)handle == 0))
+   return;
+
+   zhdr = handle_to_z3fold_header(handle);
slots = handle_to_slots(handle);
-   is_free = true;
-   for (i = 0; i <= BUDDY_MASK; i++) {
-   if (slots->slot[i]) {
-   is_free = false;
-   break;
+   write_seqlock(&slots->seqlock);
+   *(unsigned long *)handle = 0;
+   write_sequnlock(&slots->seqlock);
+   if (zhdr->slots == slots)
+   return; /* simple case, nothing else to do */
+
+   /* we are freeing a foreign handle if we are here */
+   zhdr->foreign_handles--;
+   do {
+   is_free = true;
+   seq = read_seqbegin(&slots->seqlock);
+   for (i = 0; i <= BUDDY_MASK; i++) {
+   if (slots->slot[i]) {
+   is_free = false;
+   break;
+   }
}
-   }
+   } while (read_seqretry(&slots->seqlock, seq));
 
-   if (is_free) {
+   if (is_free && test_and_clear_bit(HANDLES_ORPHANED, &slots->pool)) {
struct z3fold_pool *pool = slots_to_pool(slots);
 
kmem_cache_free(pool->c_handle, slots);
@@ -320,6 +351,7 @@ static struct z3fold_header *init_z3fold_page(struct page 
*page,
zhdr->start_middle = 0;
zhdr->cpu = -1;
zhdr->slots = slots;
+   zhdr->foreign_handles = 0;
INIT_LIST_HEAD(&zhdr->buddy);
INIT_WORK(&zhdr->work, compact_page_work);
return zhdr;
@@ -385,25 +417,87 @@ static unsigned long encode_handle(struct z3fold_header 
*zhdr, enum buddy bud)
h |= (zhdr->last_chunks << BUDDY_SHIFT);
 
slots = zhdr->slots;
+   write_seqlock(&slots->seqlock);
slots->slot[idx] = h;
+   write_sequnlock(&slots->seqlock);
return (unsigned long)&slots->slot[idx];
 }
 
+stati