# HG changeset patch
# User Andrea Arcangeli <[EMAIL PROTECTED]>
# Date 1209740226 -7200
# Node ID 721c3787cd42043734331e54a42eb20c51766f71
# Parent  0be678c52e540d5f5d5fd9af549b57b9bb018d32
mm_lock-rwsem

Convert mm_lock to use semaphores after i_mmap_lock and anon_vma_lock
conversion.

Signed-off-by: Andrea Arcangeli <[EMAIL PROTECTED]>

diff --git a/include/linux/mm.h b/include/linux/mm.h
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1084,10 +1084,10 @@ extern int install_special_mapping(struc
                                   unsigned long flags, struct page **pages);
 
 struct mm_lock_data {
-       spinlock_t **i_mmap_locks;
-       spinlock_t **anon_vma_locks;
-       size_t nr_i_mmap_locks;
-       size_t nr_anon_vma_locks;
+       struct rw_semaphore **i_mmap_sems;
+       struct rw_semaphore **anon_vma_sems;
+       size_t nr_i_mmap_sems;
+       size_t nr_anon_vma_sems;
 };
 extern int mm_lock(struct mm_struct *mm, struct mm_lock_data *data);
 extern void mm_unlock(struct mm_struct *mm, struct mm_lock_data *data);
diff --git a/mm/mmap.c b/mm/mmap.c
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2255,8 +2255,8 @@ int install_special_mapping(struct mm_st
 
 static int mm_lock_cmp(const void *a, const void *b)
 {
-       unsigned long _a = (unsigned long)*(spinlock_t **)a;
-       unsigned long _b = (unsigned long)*(spinlock_t **)b;
+       unsigned long _a = (unsigned long)*(struct rw_semaphore **)a;
+       unsigned long _b = (unsigned long)*(struct rw_semaphore **)b;
 
        cond_resched();
        if (_a < _b)
@@ -2266,7 +2266,7 @@ static int mm_lock_cmp(const void *a, co
        return 0;
 }
 
-static unsigned long mm_lock_sort(struct mm_struct *mm, spinlock_t **locks,
+static unsigned long mm_lock_sort(struct mm_struct *mm, struct rw_semaphore 
**sems,
                                  int anon)
 {
        struct vm_area_struct *vma;
@@ -2275,59 +2275,59 @@ static unsigned long mm_lock_sort(struct
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (anon) {
                        if (vma->anon_vma)
-                               locks[i++] = &vma->anon_vma->lock;
+                               sems[i++] = &vma->anon_vma->sem;
                } else {
                        if (vma->vm_file && vma->vm_file->f_mapping)
-                               locks[i++] = 
&vma->vm_file->f_mapping->i_mmap_lock;
+                               sems[i++] = 
&vma->vm_file->f_mapping->i_mmap_sem;
                }
        }
 
        if (!i)
                goto out;
 
-       sort(locks, i, sizeof(spinlock_t *), mm_lock_cmp, NULL);
+       sort(sems, i, sizeof(struct rw_semaphore *), mm_lock_cmp, NULL);
 
 out:
        return i;
 }
 
 static inline unsigned long mm_lock_sort_anon_vma(struct mm_struct *mm,
-                                                 spinlock_t **locks)
+                                                 struct rw_semaphore **sems)
 {
-       return mm_lock_sort(mm, locks, 1);
+       return mm_lock_sort(mm, sems, 1);
 }
 
 static inline unsigned long mm_lock_sort_i_mmap(struct mm_struct *mm,
-                                               spinlock_t **locks)
+                                               struct rw_semaphore **sems)
 {
-       return mm_lock_sort(mm, locks, 0);
+       return mm_lock_sort(mm, sems, 0);
 }
 
-static void mm_lock_unlock(spinlock_t **locks, size_t nr, int lock)
+static void mm_lock_unlock(struct rw_semaphore **sems, size_t nr, int lock)
 {
-       spinlock_t *last = NULL;
+       struct rw_semaphore *last = NULL;
        size_t i;
 
        for (i = 0; i < nr; i++)
                /*  Multiple vmas may use the same lock. */
-               if (locks[i] != last) {
-                       BUG_ON((unsigned long) last > (unsigned long) locks[i]);
-                       last = locks[i];
+               if (sems[i] != last) {
+                       BUG_ON((unsigned long) last > (unsigned long) sems[i]);
+                       last = sems[i];
                        if (lock)
-                               spin_lock(last);
+                               down_write(last);
                        else
-                               spin_unlock(last);
+                               up_write(last);
                }
 }
 
-static inline void __mm_lock(spinlock_t **locks, size_t nr)
+static inline void __mm_lock(struct rw_semaphore **sems, size_t nr)
 {
-       mm_lock_unlock(locks, nr, 1);
+       mm_lock_unlock(sems, nr, 1);
 }
 
-static inline void __mm_unlock(spinlock_t **locks, size_t nr)
+static inline void __mm_unlock(struct rw_semaphore **sems, size_t nr)
 {
-       mm_lock_unlock(locks, nr, 0);
+       mm_lock_unlock(sems, nr, 0);
 }
 
 /*
@@ -2351,10 +2351,10 @@ static inline void __mm_unlock(spinlock_
  * of vmas is defined in /proc/sys/vm/max_map_count.
  *
  * mm_lock() can fail if memory allocation fails. The worst case
- * vmalloc allocation required is 2*max_map_count*sizeof(spinlock_t *),
- * so around 1Mbyte, but in practice it'll be much less because
- * normally there won't be max_map_count vmas allocated in the task
- * that runs mm_lock().
+ * vmalloc allocation required is 2*max_map_count*sizeof(struct
+ * rw_semaphore *), so around 1Mbyte, but in practice it'll be much
+ * less because normally there won't be max_map_count vmas allocated
+ * in the task that runs mm_lock().
  *
  * The vmalloc memory allocated by mm_lock is stored in the
  * mm_lock_data structure that must be allocated by the caller and it
@@ -2368,20 +2368,20 @@ static inline void __mm_unlock(spinlock_
  */
 int mm_lock(struct mm_struct *mm, struct mm_lock_data *data)
 {
-       spinlock_t **anon_vma_locks, **i_mmap_locks;
+       struct rw_semaphore **anon_vma_sems, **i_mmap_sems;
 
        down_write(&mm->mmap_sem);
        if (mm->map_count) {
-               anon_vma_locks = vmalloc(sizeof(spinlock_t *) * mm->map_count);
-               if (unlikely(!anon_vma_locks)) {
+               anon_vma_sems = vmalloc(sizeof(struct rw_semaphore *) * 
mm->map_count);
+               if (unlikely(!anon_vma_sems)) {
                        up_write(&mm->mmap_sem);
                        return -ENOMEM;
                }
 
-               i_mmap_locks = vmalloc(sizeof(spinlock_t *) * mm->map_count);
-               if (unlikely(!i_mmap_locks)) {
+               i_mmap_sems = vmalloc(sizeof(struct rw_semaphore *) * 
mm->map_count);
+               if (unlikely(!i_mmap_sems)) {
                        up_write(&mm->mmap_sem);
-                       vfree(anon_vma_locks);
+                       vfree(anon_vma_sems);
                        return -ENOMEM;
                }
 
@@ -2389,31 +2389,31 @@ int mm_lock(struct mm_struct *mm, struct
                 * When mm_lock_sort_anon_vma/i_mmap returns zero it
                 * means there's no lock to take and so we can free
                 * the array here without waiting mm_unlock. mm_unlock
-                * will do nothing if nr_i_mmap/anon_vma_locks is
+                * will do nothing if nr_i_mmap/anon_vma_sems is
                 * zero.
                 */
-               data->nr_anon_vma_locks = mm_lock_sort_anon_vma(mm, 
anon_vma_locks);
-               data->nr_i_mmap_locks = mm_lock_sort_i_mmap(mm, i_mmap_locks);
+               data->nr_anon_vma_sems = mm_lock_sort_anon_vma(mm, 
anon_vma_sems);
+               data->nr_i_mmap_sems = mm_lock_sort_i_mmap(mm, i_mmap_sems);
 
-               if (data->nr_anon_vma_locks) {
-                       __mm_lock(anon_vma_locks, data->nr_anon_vma_locks);
-                       data->anon_vma_locks = anon_vma_locks;
+               if (data->nr_anon_vma_sems) {
+                       __mm_lock(anon_vma_sems, data->nr_anon_vma_sems);
+                       data->anon_vma_sems = anon_vma_sems;
                } else
-                       vfree(anon_vma_locks);
+                       vfree(anon_vma_sems);
 
-               if (data->nr_i_mmap_locks) {
-                       __mm_lock(i_mmap_locks, data->nr_i_mmap_locks);
-                       data->i_mmap_locks = i_mmap_locks;
+               if (data->nr_i_mmap_sems) {
+                       __mm_lock(i_mmap_sems, data->nr_i_mmap_sems);
+                       data->i_mmap_sems = i_mmap_sems;
                } else
-                       vfree(i_mmap_locks);
+                       vfree(i_mmap_sems);
        }
        return 0;
 }
 
-static void mm_unlock_vfree(spinlock_t **locks, size_t nr)
+static void mm_unlock_vfree(struct rw_semaphore **sems, size_t nr)
 {
-       __mm_unlock(locks, nr);
-       vfree(locks);
+       __mm_unlock(sems, nr);
+       vfree(sems);
 }
 
 /*
@@ -2430,12 +2430,12 @@ void mm_unlock(struct mm_struct *mm, str
 void mm_unlock(struct mm_struct *mm, struct mm_lock_data *data)
 {
        if (mm->map_count) {
-               if (data->nr_anon_vma_locks)
-                       mm_unlock_vfree(data->anon_vma_locks,
-                                       data->nr_anon_vma_locks);
-               if (data->nr_i_mmap_locks)
-                       mm_unlock_vfree(data->i_mmap_locks,
-                                       data->nr_i_mmap_locks);
+               if (data->nr_anon_vma_sems)
+                       mm_unlock_vfree(data->anon_vma_sems,
+                                       data->nr_anon_vma_sems);
+               if (data->nr_i_mmap_sems)
+                       mm_unlock_vfree(data->i_mmap_sems,
+                                       data->nr_i_mmap_sems);
        }
        up_write(&mm->mmap_sem);
 }

-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to