Hey guys,

do you have anything against the below renaming?

ldt->size is simply confusing while it wants to be called ldt->n_entries
as it is used like this throughout the code.

Let me know before I go and run it to check I'm not breaking anything.

Thanks!

---
... because this is exactly what it is: the number of entries in the
LDT. Calling it "size" is simply confusing and it is actually begging to
be called "n_entries" or somesuch if you see constructs like:

        alloc_size = size * LDT_ENTRY_SIZE;

since LDT_ENTRY_SIZE is the size of a single entry.

There should be no functionality change resulting from this patch.

Signed-off-by: Borislav Petkov <[email protected]>
---
 arch/x86/events/core.c             |  2 +-
 arch/x86/include/asm/mmu_context.h |  4 ++--
 arch/x86/kernel/ldt.c              | 38 +++++++++++++++++++-------------------
 arch/x86/kernel/process_64.c       |  2 +-
 arch/x86/kernel/step.c             |  2 +-
 5 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 637feed9a594..82c3083f53b3 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2343,7 +2343,7 @@ static unsigned long get_segment_base(unsigned int 
segment)
 
                /* IRQs are off, so this synchronizes with smp_store_release */
                ldt = lockless_dereference(current->active_mm->context.ldt);
-               if (!ldt || idx > ldt->size)
+               if (!ldt || idx > ldt->n_entries)
                        return 0;
 
                desc = &ldt->entries[idx];
diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
index 68b329d77b3a..872e94933b9b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -47,7 +47,7 @@ struct ldt_struct {
         * allocations, but it's not worth trying to optimize.
         */
        struct desc_struct *entries;
-       unsigned int size;
+       unsigned int n_entries;
 };
 
 /*
@@ -87,7 +87,7 @@ static inline void load_mm_ldt(struct mm_struct *mm)
         */
 
        if (unlikely(ldt))
-               set_ldt(ldt->entries, ldt->size);
+               set_ldt(ldt->entries, ldt->n_entries);
        else
                clear_LDT();
 #else
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index d4a15831ac58..da2184f3d100 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -30,16 +30,16 @@ static void flush_ldt(void *current_mm)
                return;
 
        pc = &current->active_mm->context;
-       set_ldt(pc->ldt->entries, pc->ldt->size);
+       set_ldt(pc->ldt->entries, pc->ldt->n_entries);
 }
 
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
-static struct ldt_struct *alloc_ldt_struct(unsigned int size)
+static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
        struct ldt_struct *new_ldt;
        unsigned int alloc_size;
 
-       if (size > LDT_ENTRIES)
+       if (num_entries > LDT_ENTRIES)
                return NULL;
 
        new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
@@ -47,7 +47,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
                return NULL;
 
        BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
-       alloc_size = size * LDT_ENTRY_SIZE;
+       alloc_size = num_entries * LDT_ENTRY_SIZE;
 
        /*
         * Xen is very picky: it requires a page-aligned LDT that has no
@@ -65,14 +65,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int 
size)
                return NULL;
        }
 
-       new_ldt->size = size;
+       new_ldt->n_entries = num_entries;
        return new_ldt;
 }
 
 /* After calling this, the LDT is immutable. */
 static void finalize_ldt_struct(struct ldt_struct *ldt)
 {
-       paravirt_alloc_ldt(ldt->entries, ldt->size);
+       paravirt_alloc_ldt(ldt->entries, ldt->n_entries);
 }
 
 /* context.lock is held */
@@ -91,8 +91,8 @@ static void free_ldt_struct(struct ldt_struct *ldt)
        if (likely(!ldt))
                return;
 
-       paravirt_free_ldt(ldt->entries, ldt->size);
-       if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+       paravirt_free_ldt(ldt->entries, ldt->n_entries);
+       if (ldt->n_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
                vfree_atomic(ldt->entries);
        else
                free_page((unsigned long)ldt->entries);
@@ -122,14 +122,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct 
mm_struct *mm)
                goto out_unlock;
        }
 
-       new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+       new_ldt = alloc_ldt_struct(old_mm->context.ldt->n_entries);
        if (!new_ldt) {
                retval = -ENOMEM;
                goto out_unlock;
        }
 
        memcpy(new_ldt->entries, old_mm->context.ldt->entries,
-              new_ldt->size * LDT_ENTRY_SIZE);
+              new_ldt->n_entries * LDT_ENTRY_SIZE);
        finalize_ldt_struct(new_ldt);
 
        mm->context.ldt = new_ldt;
@@ -152,9 +152,9 @@ void destroy_context_ldt(struct mm_struct *mm)
 
 static int read_ldt(void __user *ptr, unsigned long bytecount)
 {
-       int retval;
-       unsigned long size;
        struct mm_struct *mm = current->mm;
+       unsigned long num_entries;
+       int retval;
 
        mutex_lock(&mm->context.lock);
 
@@ -166,18 +166,18 @@ static int read_ldt(void __user *ptr, unsigned long 
bytecount)
        if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
                bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 
-       size = mm->context.ldt->size * LDT_ENTRY_SIZE;
-       if (size > bytecount)
-               size = bytecount;
+       num_entries = mm->context.ldt->n_entries * LDT_ENTRY_SIZE;
+       if (num_entries > bytecount)
+               num_entries = bytecount;
 
-       if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+       if (copy_to_user(ptr, mm->context.ldt->entries, num_entries)) {
                retval = -EFAULT;
                goto out_unlock;
        }
 
-       if (size != bytecount) {
+       if (num_entries != bytecount) {
                /* Zero-fill the rest and pretend we read bytecount bytes. */
-               if (clear_user(ptr + size, bytecount - size)) {
+               if (clear_user(ptr + num_entries, bytecount - num_entries)) {
                        retval = -EFAULT;
                        goto out_unlock;
                }
@@ -248,7 +248,7 @@ static int write_ldt(void __user *ptr, unsigned long 
bytecount, int oldmode)
        mutex_lock(&mm->context.lock);
 
        old_ldt = mm->context.ldt;
-       oldsize = old_ldt ? old_ldt->size : 0;
+       oldsize = old_ldt ? old_ldt->n_entries : 0;
        newsize = max(ldt_info.entry_number + 1, oldsize);
 
        error = -ENOMEM;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b6840bf3940b..95c76edc4587 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task)
                        pr_warn("WARNING: dead process %s still has LDT? 
<%p/%d>\n",
                                dead_task->comm,
                                dead_task->mm->context.ldt->entries,
-                               dead_task->mm->context.ldt->size);
+                               dead_task->mm->context.ldt->n_entries);
                        BUG();
                }
 #endif
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index f07f83b3611b..59b8b9470ed6 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, 
struct pt_regs *re
 
                mutex_lock(&child->mm->context.lock);
                if (unlikely(!child->mm->context.ldt ||
-                            seg >= child->mm->context.ldt->size))
+                            seg >= child->mm->context.ldt->n_entries))
                        addr = -1L; /* bogus selector, access would fault */
                else {
                        desc = &child->mm->context.ldt->entries[seg];
-- 
2.13.0


-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Reply via email to