[PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Prasad Joshi
QCOW uses two tables level1 (L1) table and level2
(L2) table. The L1 table points to offset of L2
table. When a QCOW image is probed, the L1 table is
cached in the memory to avoid reading it from disk
on every reference. This caching imporves the
performance. The similar performance improvment can
be observed when L2 tables are also cached. It is
impossible to cache all of the L2 tables because of
the memory constraint. The patch adds L2 table
caching capability for upto 128 L2 tables, it uses
combination of RB tree and List to manage the L2
cached tables. The link list implementation helps
in building simple LRU structure and RB tree helps
in improving the search time during read/write
operations.

Signed-off-by: Prasad Joshi prasadjoshi...@gmail.com
---
 tools/kvm/include/kvm/qcow.h |   18 
 tools/kvm/qcow.c |  230 ++
 2 files changed, 230 insertions(+), 18 deletions(-)

diff --git a/tools/kvm/include/kvm/qcow.h b/tools/kvm/include/kvm/qcow.h
index b6e7493..095566a 100644
--- a/tools/kvm/include/kvm/qcow.h
+++ b/tools/kvm/include/kvm/qcow.h
@@ -3,6 +3,8 @@
 
 #include linux/types.h
 #include stdbool.h
+#include linux/rbtree.h
+#include linux/list.h
 
 #define QCOW_MAGIC (('Q'  24) | ('F'  16) | ('I'  8) | 0xfb)
 
@@ -17,6 +19,16 @@
 #define QCOW2_OFLAG_COMPRESSED (1LL  62)
 #define QCOW2_OFLAG_MASK   (QCOW2_OFLAG_COPIED|QCOW2_OFLAG_COMPRESSED)
 
+#define MAX_CACHE_NODES 128
+
+struct qcow_l2_cache {
+   u64 offset;
+   u64 *table;
+   struct rb_node  node;
+   struct list_headlist;
+   struct qcow *qcow;
+};
+
 struct qcow_table {
u32 table_size;
u64 *l1_table;
@@ -26,6 +38,12 @@ struct qcow {
void*header;
struct qcow_table   table;
int fd;
+
+   /* Level2 caching data structures */
+   struct qcow_l2_cachecache;
+   struct rb_root  root;
+   struct list_headlru_list;
+   int no_cached;
 };
 
 struct qcow_header {
diff --git a/tools/kvm/qcow.c b/tools/kvm/qcow.c
index bb2345c..ca61374 100644
--- a/tools/kvm/qcow.c
+++ b/tools/kvm/qcow.c
@@ -16,6 +16,153 @@
 #include linux/kernel.h
 #include linux/types.h
 
+static inline int insert(struct rb_root *root, struct qcow_l2_cache *new)
+{
+   struct rb_node **link = (root-rb_node), *parent = NULL;
+   u64 offset = new-offset;
+   
+   /* search the tree */
+   while (*link) {
+   struct qcow_l2_cache *t;
+
+   t = rb_entry(*link, struct qcow_l2_cache, node);
+   if (!t)
+   goto error;
+
+   parent = *link;
+
+   if (t-offset  offset)
+   link = (*link)-rb_left;
+   else if (t-offset  offset)
+   link = (*link)-rb_right;
+   else
+   goto out;
+   }
+
+   /* add new node */
+   rb_link_node(new-node, parent, link);
+   rb_insert_color(new-node, root);
+out:
+   return 0;
+error:
+   return -1;
+}
+
+static inline struct qcow_l2_cache *search(struct rb_root *root, u64 offset)
+{
+   struct rb_node *link = root-rb_node;
+
+   while (link) {
+   struct qcow_l2_cache *t;
+
+   t = rb_entry(link, struct qcow_l2_cache, node);
+   if (!t)
+   goto out;
+
+   if (t-offset  offset)
+   link = link-rb_left;
+   else if (t-offset  offset)
+   link = link-rb_right;
+   else
+   return t;
+   }
+out:
+   return NULL;
+}
+
+static inline void free_cache(struct qcow *q)
+{
+   struct list_head *pos, *n;
+   struct qcow_l2_cache *t;
+   struct rb_root *r = q-root;
+
+   list_for_each_safe(pos, n, q-lru_list) {
+   /* Remove cache table from the list and RB tree */
+   list_del(pos);
+   t = list_entry(pos, struct qcow_l2_cache, list);
+   rb_erase(t-node, r);
+
+   /* Free the cached node */
+   free(t-table);
+   free(t);
+   }
+}
+
+static inline int cache_table(struct qcow *q, u64 *table, u64 offset)
+{
+   struct qcow_l2_cache *n;
+   struct rb_root *r = q-root;
+   struct qcow_l2_cache *lru;
+
+   n = search(r, offset);
+   if (n) {
+   /* Update the LRU state */
+   list_del_init(n-list);
+   list_add_tail(n-list, q-lru_list);
+   return 0;
+   }
+
+   lru = NULL;
+   if (q-no_cached == MAX_CACHE_NODES) {
+   /* Find the node for LRU replacement */
+   lru = list_first_entry(q-lru_list, struct qcow_l2_cache, 
list);
+   }
+
+   n = calloc(1, 

Re: [PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Pekka Enberg
On Wed, May 18, 2011 at 1:17 PM, Prasad Joshi prasadjoshi...@gmail.com wrote:
 QCOW uses two tables level1 (L1) table and level2
 (L2) table. The L1 table points to offset of L2
 table. When a QCOW image is probed, the L1 table is
 cached in the memory to avoid reading it from disk
 on every reference. This caching imporves the
 performance. The similar performance improvment can
 be observed when L2 tables are also cached. It is
 impossible to cache all of the L2 tables because of
 the memory constraint. The patch adds L2 table
 caching capability for upto 128 L2 tables, it uses
 combination of RB tree and List to manage the L2
 cached tables. The link list implementation helps
 in building simple LRU structure and RB tree helps
 in improving the search time during read/write
 operations.

Can you please split that text into more readable paragraphs? The line
wrapping column seems rather aggressive that's also contributing to
unreadability.

 @@ -16,6 +16,153 @@
  #include linux/kernel.h
  #include linux/types.h

 +static inline int insert(struct rb_root *root, struct qcow_l2_cache *new)
 +{
 +       struct rb_node **link = (root-rb_node), *parent = NULL;
 +       u64 offset = new-offset;
 +
 +       /* search the tree */
 +       while (*link) {
 +               struct qcow_l2_cache *t;
 +
 +               t = rb_entry(*link, struct qcow_l2_cache, node);
 +               if (!t)
 +                       goto error;
 +

[snip, snip]

This function and others are way to big to be marked as inline.

   Pekka
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Prasad Joshi
On Wed, May 18, 2011 at 11:27 AM, Pekka Enberg penb...@kernel.org wrote:
 On Wed, May 18, 2011 at 1:17 PM, Prasad Joshi prasadjoshi...@gmail.com 
 wrote:
 QCOW uses two tables level1 (L1) table and level2
 (L2) table. The L1 table points to offset of L2
 table. When a QCOW image is probed, the L1 table is
 cached in the memory to avoid reading it from disk
 on every reference. This caching imporves the
 performance. The similar performance improvment can
 be observed when L2 tables are also cached. It is
 impossible to cache all of the L2 tables because of
 the memory constraint. The patch adds L2 table
 caching capability for upto 128 L2 tables, it uses
 combination of RB tree and List to manage the L2
 cached tables. The link list implementation helps
 in building simple LRU structure and RB tree helps
 in improving the search time during read/write
 operations.

 Can you please split that text into more readable paragraphs?

Okay will do.

 The line
 wrapping column seems rather aggressive that's also contributing to
 unreadability.

 @@ -16,6 +16,153 @@
  #include linux/kernel.h
  #include linux/types.h

 +static inline int insert(struct rb_root *root, struct qcow_l2_cache *new)
 +{
 +       struct rb_node **link = (root-rb_node), *parent = NULL;
 +       u64 offset = new-offset;
 +
 +       /* search the tree */
 +       while (*link) {
 +               struct qcow_l2_cache *t;
 +
 +               t = rb_entry(*link, struct qcow_l2_cache, node);
 +               if (!t)
 +                       goto error;
 +

 [snip, snip]

 This function and others are way to big to be marked as inline.

                       Pekka

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Ingo Molnar

* Pekka Enberg penb...@kernel.org wrote:

 On Wed, May 18, 2011 at 1:17 PM, Prasad Joshi prasadjoshi...@gmail.com 
 wrote:
  QCOW uses two tables level1 (L1) table and level2
  (L2) table. The L1 table points to offset of L2
  table. When a QCOW image is probed, the L1 table is
  cached in the memory to avoid reading it from disk
  on every reference. This caching imporves the
  performance. The similar performance improvment can
  be observed when L2 tables are also cached. It is
  impossible to cache all of the L2 tables because of
  the memory constraint. The patch adds L2 table
  caching capability for upto 128 L2 tables, it uses
  combination of RB tree and List to manage the L2
  cached tables. The link list implementation helps
  in building simple LRU structure and RB tree helps
  in improving the search time during read/write
  operations.
 
 Can you please split that text into more readable paragraphs? The line
 wrapping column seems rather aggressive that's also contributing to
 unreadability.
 
  @@ -16,6 +16,153 @@
   #include linux/kernel.h
   #include linux/types.h
 
  +static inline int insert(struct rb_root *root, struct qcow_l2_cache *new)
  +{
  +       struct rb_node **link = (root-rb_node), *parent = NULL;
  +       u64 offset = new-offset;
  +
  +       /* search the tree */
  +       while (*link) {
  +               struct qcow_l2_cache *t;
  +
  +               t = rb_entry(*link, struct qcow_l2_cache, node);
  +               if (!t)
  +                       goto error;
  +
 
 [snip, snip]
 
 This function and others are way to big to be marked as inline.

yep - just leave it static and the compiler should be able to sort it out.

If not we'll write a compiler too ;-)

Thanks,

Ingo
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Pekka Enberg
On Wed, May 18, 2011 at 1:41 PM, Prasad Joshi prasadjoshi...@gmail.com wrote:
 On Wed, May 18, 2011 at 11:27 AM, Pekka Enberg penb...@kernel.org wrote:
 On Wed, May 18, 2011 at 1:17 PM, Prasad Joshi prasadjoshi...@gmail.com 
 wrote:
 QCOW uses two tables level1 (L1) table and level2
 (L2) table. The L1 table points to offset of L2
 table. When a QCOW image is probed, the L1 table is
 cached in the memory to avoid reading it from disk
 on every reference. This caching imporves the
 performance. The similar performance improvment can
 be observed when L2 tables are also cached. It is
 impossible to cache all of the L2 tables because of
 the memory constraint. The patch adds L2 table
 caching capability for upto 128 L2 tables, it uses
 combination of RB tree and List to manage the L2
 cached tables. The link list implementation helps
 in building simple LRU structure and RB tree helps
 in improving the search time during read/write
 operations.

 Can you please split that text into more readable paragraphs?

 Okay will do.

And oh, what happened to the actual benchmark numbers? It's the most
interesting part of this patch.

Pekka
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] kvm tools: Add QCOW level2 caching support

2011-05-18 Thread Sasha Levin
On Wed, 2011-05-18 at 11:17 +0100, Prasad Joshi wrote:
 QCOW uses two tables level1 (L1) table and level2
 (L2) table. The L1 table points to offset of L2
 table. When a QCOW image is probed, the L1 table is
 cached in the memory to avoid reading it from disk
 on every reference. This caching imporves the
 performance. The similar performance improvment can
 be observed when L2 tables are also cached. It is
 impossible to cache all of the L2 tables because of
 the memory constraint. The patch adds L2 table
 caching capability for upto 128 L2 tables, it uses
 combination of RB tree and List to manage the L2
 cached tables. The link list implementation helps
 in building simple LRU structure and RB tree helps
 in improving the search time during read/write
 operations.
 
 Signed-off-by: Prasad Joshi prasadjoshi...@gmail.com
 ---
  tools/kvm/include/kvm/qcow.h |   18 
  tools/kvm/qcow.c |  230 
 ++
  2 files changed, 230 insertions(+), 18 deletions(-)
 
 diff --git a/tools/kvm/include/kvm/qcow.h b/tools/kvm/include/kvm/qcow.h
 index b6e7493..095566a 100644
 --- a/tools/kvm/include/kvm/qcow.h
 +++ b/tools/kvm/include/kvm/qcow.h
 @@ -3,6 +3,8 @@
  
  #include linux/types.h
  #include stdbool.h
 +#include linux/rbtree.h
 +#include linux/list.h
  
  #define QCOW_MAGIC   (('Q'  24) | ('F'  16) | ('I'  8) | 0xfb)
  
 @@ -17,6 +19,16 @@
  #define QCOW2_OFLAG_COMPRESSED   (1LL  62)
  #define QCOW2_OFLAG_MASK (QCOW2_OFLAG_COPIED|QCOW2_OFLAG_COMPRESSED)
  
 +#define MAX_CACHE_NODES 128
 +
 +struct qcow_l2_cache {
 + u64 offset;
 + u64 *table;
 + struct rb_node  node;
 + struct list_headlist;
 + struct qcow *qcow;
 +};
 +
  struct qcow_table {
   u32 table_size;
   u64 *l1_table;
 @@ -26,6 +38,12 @@ struct qcow {
   void*header;
   struct qcow_table   table;
   int fd;
 +
 + /* Level2 caching data structures */
 + struct qcow_l2_cachecache;

Why is 'cache' here?

 + struct rb_root  root;
 + struct list_headlru_list;
 + int no_cached;
  };
  
  struct qcow_header {
 diff --git a/tools/kvm/qcow.c b/tools/kvm/qcow.c
 index bb2345c..ca61374 100644
 --- a/tools/kvm/qcow.c
 +++ b/tools/kvm/qcow.c
 @@ -16,6 +16,153 @@
  #include linux/kernel.h
  #include linux/types.h
  
 +static inline int insert(struct rb_root *root, struct qcow_l2_cache *new)
 +{
 + struct rb_node **link = (root-rb_node), *parent = NULL;
 + u64 offset = new-offset;
 + 
 + /* search the tree */
 + while (*link) {
 + struct qcow_l2_cache *t;
 +
 + t = rb_entry(*link, struct qcow_l2_cache, node);
 + if (!t)
 + goto error;
 +
 + parent = *link;
 +
 + if (t-offset  offset)
 + link = (*link)-rb_left;
 + else if (t-offset  offset)
 + link = (*link)-rb_right;
 + else
 + goto out;
 + }
 +
 + /* add new node */
 + rb_link_node(new-node, parent, link);
 + rb_insert_color(new-node, root);
 +out:
 + return 0;
 +error:
 + return -1;
 +}
 +
 +static inline struct qcow_l2_cache *search(struct rb_root *root, u64 offset)
 +{
 + struct rb_node *link = root-rb_node;
 +
 + while (link) {
 + struct qcow_l2_cache *t;
 +
 + t = rb_entry(link, struct qcow_l2_cache, node);
 + if (!t)
 + goto out;
 +
 + if (t-offset  offset)
 + link = link-rb_left;
 + else if (t-offset  offset)
 + link = link-rb_right;
 + else
 + return t;
 + }
 +out:
 + return NULL;
 +}
 +
 +static inline void free_cache(struct qcow *q)
 +{
 + struct list_head *pos, *n;
 + struct qcow_l2_cache *t;
 + struct rb_root *r = q-root;
 +
 + list_for_each_safe(pos, n, q-lru_list) {
 + /* Remove cache table from the list and RB tree */
 + list_del(pos);
 + t = list_entry(pos, struct qcow_l2_cache, list);
 + rb_erase(t-node, r);
 +
 + /* Free the cached node */
 + free(t-table);
 + free(t);
 + }
 +}
 +
 +static inline int cache_table(struct qcow *q, u64 *table, u64 offset)
 +{
 + struct qcow_l2_cache *n;
 + struct rb_root *r = q-root;
 + struct qcow_l2_cache *lru;
 +
 + n = search(r, offset);
 + if (n) {
 + /* Update the LRU state */
 + list_del_init(n-list);
 + list_add_tail(n-list, q-lru_list);
 + return 0;
 + }
 +
 + lru = NULL;
 + if (q-no_cached == MAX_CACHE_NODES) {
 + /* Find the node for LRU replacement */
 + lru =