Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=3a8d4642861fb69b62401949e490c0bcb19ceb40
Commit:     3a8d4642861fb69b62401949e490c0bcb19ceb40
Parent:     01f7b309e453dc8499c318f6810f76b606b66134
Author:     Artem Bityutskiy <[EMAIL PROTECTED]>
AuthorDate: Sun Dec 16 12:32:51 2007 +0200
Committer:  Artem Bityutskiy <[EMAIL PROTECTED]>
CommitDate: Wed Dec 26 19:15:14 2007 +0200

    UBI: create ltree_entry slab on initialization
    
    Since the ltree_entry slab cache is a global entity, which is
    used by all UBI devices, it is more logical to create it on
    module initialization time and destro on module exit time.
    
    Signed-off-by: Artem Bityutskiy <[EMAIL PROTECTED]>
---
 drivers/mtd/ubi/build.c |   26 ++++++++++++++
 drivers/mtd/ubi/eba.c   |   85 ++++++++++-------------------------------------
 drivers/mtd/ubi/ubi.h   |   23 +++++++++++++
 3 files changed, 67 insertions(+), 67 deletions(-)

diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5490a73..44c8521 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -67,6 +67,9 @@ struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
 struct class *ubi_class;
 
+/* Slab cache for lock-tree entries */
+struct kmem_cache *ubi_ltree_slab;
+
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
 static ssize_t ubi_version_show(struct class *class, char *buf)
 {
@@ -687,6 +690,20 @@ static void detach_mtd_dev(struct ubi_device *ubi)
        ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
 }
 
+/**
+ * ltree_entry_ctor - lock tree entries slab cache constructor.
+ * @obj: the lock-tree entry to construct
+ * @cache: the lock tree entry slab cache
+ * @flags: constructor flags
+ */
+static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
+{
+       struct ubi_ltree_entry *le = obj;
+
+       le->users = 0;
+       init_rwsem(&le->mutex);
+}
+
 static int __init ubi_init(void)
 {
        int err, i, k;
@@ -709,6 +726,12 @@ static int __init ubi_init(void)
        if (err)
                goto out_class;
 
+       ubi_ltree_slab = kmem_cache_create("ubi_ltree_slab",
+                                          sizeof(struct ubi_ltree_entry), 0,
+                                          0, &ltree_entry_ctor);
+       if (!ubi_ltree_slab)
+               goto out_version;
+
        /* Attach MTD devices */
        for (i = 0; i < mtd_devs; i++) {
                struct mtd_dev_param *p = &mtd_dev_param[i];
@@ -724,6 +747,8 @@ static int __init ubi_init(void)
 out_detach:
        for (k = 0; k < i; k++)
                detach_mtd_dev(ubi_devices[k]);
+       kmem_cache_destroy(ubi_ltree_slab);
+out_version:
        class_remove_file(ubi_class, &ubi_version);
 out_class:
        class_destroy(ubi_class);
@@ -737,6 +762,7 @@ static void __exit ubi_exit(void)
 
        for (i = 0; i < n; i++)
                detach_mtd_dev(ubi_devices[i]);
+       kmem_cache_destroy(ubi_ltree_slab);
        class_remove_file(ubi_class, &ubi_version);
        class_destroy(ubi_class);
 }
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index c87db07..5fdb31b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
  * logical eraseblock it is locked for reading or writing. The per-logical
  * eraseblock locking is implemented by means of the lock tree. The lock tree
  * is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ltree_entry objects. They are indexed by
+ * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
  * (@vol_id, @lnum) pairs.
  *
  * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
 #define EBA_RESERVED_PEBS 1
 
 /**
- * struct ltree_entry - an entry in the lock tree.
- * @rb: links RB-tree nodes
- * @vol_id: volume ID of the locked logical eraseblock
- * @lnum: locked logical eraseblock number
- * @users: how many tasks are using this logical eraseblock or wait for it
- * @mutex: read/write mutex to implement read/write access serialization to
- * the (@vol_id, @lnum) logical eraseblock
- *
- * When a logical eraseblock is being locked - corresponding &struct 
ltree_entry
- * object is inserted to the lock tree (@ubi->ltree).
- */
-struct ltree_entry {
-       struct rb_node rb;
-       int vol_id;
-       int lnum;
-       int users;
-       struct rw_semaphore mutex;
-};
-
-/* Slab cache for lock-tree entries */
-static struct kmem_cache *ltree_slab;
-
-/**
  * next_sqnum - get next sequence number.
  * @ubi: UBI device description object
  *
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, 
int vol_id)
  * @vol_id: volume ID
  * @lnum: logical eraseblock number
  *
- * This function returns a pointer to the corresponding &struct ltree_entry
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
  * object if the logical eraseblock is locked and %NULL if it is not.
  * @ubi->ltree_lock has to be locked.
  */
-static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
-                                       int lnum)
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+                                           int lnum)
 {
        struct rb_node *p;
 
        p = ubi->ltree.rb_node;
        while (p) {
-               struct ltree_entry *le;
+               struct ubi_ltree_entry *le;
 
-               le = rb_entry(p, struct ltree_entry, rb);
+               le = rb_entry(p, struct ubi_ltree_entry, rb);
 
                if (vol_id < le->vol_id)
                        p = p->rb_left;
@@ -155,12 +132,12 @@ static struct ltree_entry *ltree_lookup(struct ubi_device 
*ubi, int vol_id,
  * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
  * failed.
  */
-static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
-                                          int lnum)
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+                                              int vol_id, int lnum)
 {
-       struct ltree_entry *le, *le1, *le_free;
+       struct ubi_ltree_entry *le, *le1, *le_free;
 
-       le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
+       le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
        if (!le)
                return ERR_PTR(-ENOMEM);
 
@@ -189,7 +166,7 @@ static struct ltree_entry *ltree_add_entry(struct 
ubi_device *ubi, int vol_id,
                p = &ubi->ltree.rb_node;
                while (*p) {
                        parent = *p;
-                       le1 = rb_entry(parent, struct ltree_entry, rb);
+                       le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
 
                        if (vol_id < le1->vol_id)
                                p = &(*p)->rb_left;
@@ -211,7 +188,7 @@ static struct ltree_entry *ltree_add_entry(struct 
ubi_device *ubi, int vol_id,
        spin_unlock(&ubi->ltree_lock);
 
        if (le_free)
-               kmem_cache_free(ltree_slab, le_free);
+               kmem_cache_free(ubi_ltree_slab, le_free);
 
        return le;
 }
@@ -227,7 +204,7 @@ static struct ltree_entry *ltree_add_entry(struct 
ubi_device *ubi, int vol_id,
  */
 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-       struct ltree_entry *le;
+       struct ubi_ltree_entry *le;
 
        le = ltree_add_entry(ubi, vol_id, lnum);
        if (IS_ERR(le))
@@ -245,7 +222,7 @@ static int leb_read_lock(struct ubi_device *ubi, int 
vol_id, int lnum)
 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
        int free = 0;
-       struct ltree_entry *le;
+       struct ubi_ltree_entry *le;
 
        spin_lock(&ubi->ltree_lock);
        le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +236,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int 
vol_id, int lnum)
 
        up_read(&le->mutex);
        if (free)
-               kmem_cache_free(ltree_slab, le);
+               kmem_cache_free(ubi_ltree_slab, le);
 }
 
 /**
@@ -273,7 +250,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int 
vol_id, int lnum)
  */
 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-       struct ltree_entry *le;
+       struct ubi_ltree_entry *le;
 
        le = ltree_add_entry(ubi, vol_id, lnum);
        if (IS_ERR(le))
@@ -291,7 +268,7 @@ static int leb_write_lock(struct ubi_device *ubi, int 
vol_id, int lnum)
 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
        int free;
-       struct ltree_entry *le;
+       struct ubi_ltree_entry *le;
 
        spin_lock(&ubi->ltree_lock);
        le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,7 +283,7 @@ static void leb_write_unlock(struct ubi_device *ubi, int 
vol_id, int lnum)
 
        up_write(&le->mutex);
        if (free)
-               kmem_cache_free(ltree_slab, le);
+               kmem_cache_free(ubi_ltree_slab, le);
 }
 
 /**
@@ -931,20 +908,6 @@ write_error:
 }
 
 /**
- * ltree_entry_ctor - lock tree entries slab cache constructor.
- * @obj: the lock-tree entry to construct
- * @cache: the lock tree entry slab cache
- * @flags: constructor flags
- */
-static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
-{
-       struct ltree_entry *le = obj;
-
-       le->users = 0;
-       init_rwsem(&le->mutex);
-}
-
-/**
  * ubi_eba_copy_leb - copy logical eraseblock.
  * @ubi: UBI device description object
  * @from: physical eraseblock number from where to copy
@@ -1128,14 +1091,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct 
ubi_scan_info *si)
        mutex_init(&ubi->alc_mutex);
        ubi->ltree = RB_ROOT;
 
-       if (ubi_devices_cnt == 0) {
-               ltree_slab = kmem_cache_create("ubi_ltree_slab",
-                                              sizeof(struct ltree_entry), 0,
-                                              0, &ltree_entry_ctor);
-               if (!ltree_slab)
-                       return -ENOMEM;
-       }
-
        ubi->global_sqnum = si->max_sqnum + 1;
        num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
 
@@ -1205,8 +1160,6 @@ out_free:
                        continue;
                kfree(ubi->volumes[i]->eba_tbl);
        }
-       if (ubi_devices_cnt == 0)
-               kmem_cache_destroy(ltree_slab);
        return err;
 }
 
@@ -1225,6 +1178,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
                        continue;
                kfree(ubi->volumes[i]->eba_tbl);
        }
-       if (ubi_devices_cnt == 1)
-               kmem_cache_destroy(ltree_slab);
 }
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 318ce25..0f2ea81 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -97,6 +97,28 @@ enum {
 extern int ubi_devices_cnt;
 extern struct ubi_device *ubi_devices[];
 
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ *         the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA unit to implement per-LEB locking.
+ * When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA unit for details.
+ */
+struct ubi_ltree_entry {
+       struct rb_node rb;
+       int vol_id;
+       int lnum;
+       int users;
+       struct rw_semaphore mutex;
+};
+
 struct ubi_volume_desc;
 
 /**
@@ -359,6 +381,7 @@ struct ubi_device {
 #endif
 };
 
+extern struct kmem_cache *ubi_ltree_slab;
 extern struct file_operations ubi_cdev_operations;
 extern struct file_operations ubi_vol_cdev_operations;
 extern struct class *ubi_class;
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to