The range locking framework doesn't yet provide nest locking
operation.

Once the range locking API while provide nested operation support,
this patch will have to be reviewed.

Signed-off-by: Laurent Dufour <[email protected]>
---
 mm/mmap.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/mm/mmap.c b/mm/mmap.c
index f82741e199c0..87c8625ae91d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3284,7 +3284,11 @@ static void vm_lock_anon_vma(struct mm_struct *mm, 
struct anon_vma *anon_vma)
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
+#ifndef CONFIG_MEM_RANGE_LOCK
                down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
+#else
+               down_write(&anon_vma->root->rwsem);
+#endif
                /*
                 * We can safely modify head.next after taking the
                 * anon_vma->root->rwsem. If some other vma in this mm shares
@@ -3314,7 +3318,11 @@ static void vm_lock_mapping(struct mm_struct *mm, struct 
address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
+#ifndef CONFIG_MEM_RANGE_LOCK
                down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
+#else
+               down_write(&mapping->i_mmap_rwsem);
+#endif
        }
 }
 
-- 
2.7.4

Reply via email to