This CL adds the new KMALLOC_ERROR flag we chatted about a few days ago.

https://github.com/brho/akaros/compare/master...dlibenzi:kmalloc_error


The following changes since commit 2fa42319139e4cc5ca853546363f84443d0ead00:

  Rename 'reallocarray' to 'kreallocarray'. (2015-11-25 18:02:04 -0500)

are available in the git repository at:

  [email protected]:dlibenzi/akaros kmalloc_error

for you to fetch changes up to 989a2b9a7bbd409e4c478dd8e31d738dc6f892fe:

  Added new kmalloc flag KMALLOC_ERROR (2015-12-08 11:58:57 -0800)

----------------------------------------------------------------
Davide Libenzi (1):
      Added new kmalloc flag KMALLOC_ERROR

 kern/include/kmalloc.h |  3 ++-
 kern/src/page_alloc.c  |  8 +++++++-
 kern/src/slab.c        | 31 +++++++++++++++++++++++--------
 3 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/kern/include/kmalloc.h b/kern/include/kmalloc.h
index 268cf29..84c0275 100644
--- a/kern/include/kmalloc.h
+++ b/kern/include/kmalloc.h
@@ -30,7 +30,8 @@ void *debug_canary;

 /* Flags to pass to kmalloc */
 /* Not implemented yet. Block until it is available. */
-#define KMALLOC_WAIT 4
+#define KMALLOC_WAIT (1 << 2)
+#define KMALLOC_ERROR (1 << 3)

 /* Kmalloc tag flags looks like this:
  *
diff --git a/kern/src/page_alloc.c b/kern/src/page_alloc.c
index c2910c7..9e55457 100644
--- a/kern/src/page_alloc.c
+++ b/kern/src/page_alloc.c
@@ -5,10 +5,12 @@
  * Kevin Klues <[email protected]>
  * Barret Rhoden <[email protected]> */

+#include <ros/errno.h>
 #include <sys/queue.h>
 #include <bitmask.h>
 #include <page_alloc.h>
 #include <pmap.h>
+#include <err.h>
 #include <string.h>
 #include <kmalloc.h>
 #include <blockdev.h>
@@ -206,6 +208,8 @@ void *get_cont_pages(size_t order, int flags)
  //If we couldn't find them, return NULL
  if( first == -1 ) {
  spin_unlock_irqsave(&colored_page_free_list_lock);
+ if (flags & KMALLOC_ERROR)
+ error(ENOMEM, NULL);
  return NULL;
  }

@@ -262,7 +266,9 @@ void *get_cont_phys_pages_at(size_t order, physaddr_t
at, int flags)
  for (unsigned long i = first_pg_nr; i < first_pg_nr + nr_pgs; i++) {
  if (!page_is_free(i)) {
  spin_unlock_irqsave(&colored_page_free_list_lock);
- return 0;
+ if (flags & KMALLOC_ERROR)
+ error(ENOMEM, NULL);
+ return NULL;
  }
  }
  for (unsigned long i = first_pg_nr; i < first_pg_nr + nr_pgs; i++)
diff --git a/kern/src/slab.c b/kern/src/slab.c
index 91d9137..e7e53ad 100644
--- a/kern/src/slab.c
+++ b/kern/src/slab.c
@@ -15,13 +15,14 @@
 #include <stdio.h>
 #include <assert.h>
 #include <pmap.h>
+#include <kmalloc.h>

 struct kmem_cache_list kmem_caches;
 spinlock_t kmem_caches_lock;

 /* Backend/internal functions, defined later.  Grab the lock before calling
  * these. */
-static void kmem_cache_grow(struct kmem_cache *cp);
+static bool kmem_cache_grow(struct kmem_cache *cp);

 /* Cache of the kmem_cache objects, needed for bootstrapping */
 struct kmem_cache kmem_cache_cache;
@@ -161,9 +162,15 @@ void *kmem_cache_alloc(struct kmem_cache *cp, int
flags)
  struct kmem_slab *a_slab = TAILQ_FIRST(&cp->partial_slab_list);
  // if none, go to empty list and get an empty and make it partial
  if (!a_slab) {
- if (TAILQ_EMPTY(&cp->empty_slab_list))
- // TODO: think about non-sleeping flags
- kmem_cache_grow(cp);
+ // TODO: think about non-sleeping flags
+ if (TAILQ_EMPTY(&cp->empty_slab_list) &&
+ !kmem_cache_grow(cp)) {
+ spin_unlock_irqsave(&cp->cache_lock);
+ if (flags & KMALLOC_ERROR)
+ error(ENOMEM, NULL);
+ else
+ panic("[German Accent]: OOM for a small slab growth!!!");
+ }
  // move to partial list
  a_slab = TAILQ_FIRST(&cp->empty_slab_list);
  TAILQ_REMOVE(&cp->empty_slab_list, a_slab, link);
@@ -242,15 +249,16 @@ void kmem_cache_free(struct kmem_cache *cp, void *buf)
  * Grab the cache lock before calling this.
  *
  * TODO: think about page colouring issues with kernel memory allocation.
*/
-static void kmem_cache_grow(struct kmem_cache *cp)
+static bool kmem_cache_grow(struct kmem_cache *cp)
 {
  struct kmem_slab *a_slab;
  struct kmem_bufctl *a_bufctl;
  if (cp->obj_size <= SLAB_LARGE_CUTOFF) {
  // Just get a single page for small slabs
  page_t *a_page;
+
  if (kpage_alloc(&a_page))
- panic("[German Accent]: OOM for a small slab growth!!!");
+ return FALSE;
  // the slab struct is stored at the end of the page
  a_slab = (struct kmem_slab*)(page2kva(a_page) + PGSIZE -
                              sizeof(struct kmem_slab));
@@ -274,6 +282,8 @@ static void kmem_cache_grow(struct kmem_cache *cp)
  *((uintptr_t**)(buf + cp->obj_size)) = NULL;
  } else {
  a_slab = kmem_cache_alloc(kmem_slab_cache, 0);
+ if (!a_slab)
+ return FALSE;
  // TODO: hash table for back reference (BUF)
  a_slab->obj_size = ROUNDUP(cp->obj_size + sizeof(uintptr_t), cp->align);
  /* Figure out how much memory we want.  We need at least min_pgs.  We'll
@@ -282,8 +292,11 @@ static void kmem_cache_grow(struct kmem_cache *cp)
                          PGSIZE;
  size_t order_pg_alloc = LOG2_UP(min_pgs);
  void *buf = get_cont_pages(order_pg_alloc, 0);
- if (!buf)
- panic("[German Accent]: OOM for a large slab growth!!!");
+
+ if (!buf) {
+ kmem_slab_destroy(kmem_slab_cache, a_slab);
+ return FALSE;
+ }
  a_slab->num_busy_obj = 0;
  /* The number of objects is based on the rounded up amt requested. */
  a_slab->num_total_obj = ((1 << order_pg_alloc) * PGSIZE) /
@@ -305,6 +318,8 @@ static void kmem_cache_grow(struct kmem_cache *cp)
  }
  // add a_slab to the empty_list
  TAILQ_INSERT_HEAD(&cp->empty_slab_list, a_slab, link);
+
+ return TRUE;
 }

 /* This deallocs every slab from the empty list.  TODO: think a bit more
about

-- 
You received this message because you are subscribed to the Google Groups 
"Akaros" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To post to this group, send email to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to