[PATCH] slub: fix coding style problems

2014-10-02 Thread Min-Hua Chen
fix most obvious coding style problems reported by checkpatch.pl -f mm/slub.c

Signed-off-by: Min-Hua Chen 
---
 mm/slub.c |  121 -
 1 file changed, 63 insertions(+), 58 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 3e8afcc..7ea162f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -93,25 +93,25 @@
  *
  * Overloading of page flags that are otherwise used for LRU management.
  *
- * PageActive The slab is frozen and exempt from list processing.
- * This means that the slab is dedicated to a purpose
- * such as satisfying allocations for a specific
- * processor. Objects may be freed in the slab while
- * it is frozen but slab_free will then skip the usual
- * list operations. It is up to the processor holding
- * the slab to integrate the slab into the slab lists
- * when the slab is no longer needed.
+ * PageActiveThe slab is frozen and exempt from list processing.
+ *This means that the slab is dedicated to a purpose
+ *such as satisfying allocations for a specific
+ *processor. Objects may be freed in the slab while
+ *it is frozen but slab_free will then skip the usual
+ *list operations. It is up to the processor holding
+ *the slab to integrate the slab into the slab lists
+ *when the slab is no longer needed.
  *
- * One use of this flag is to mark slabs that are
- * used for allocations. Then such a slab becomes a cpu
- * slab. The cpu slab may be equipped with an additional
- * freelist that allows lockless access to
- * free objects in addition to the regular freelist
- * that requires the slab lock.
+ *One use of this flag is to mark slabs that are
+ *used for allocations. Then such a slab becomes a cpu
+ *slab. The cpu slab may be equipped with an additional
+ *freelist that allows lockless access to
+ *free objects in addition to the regular freelist
+ *that requires the slab lock.
  *
  * PageErrorSlab requires special handling due to debug
- * options set. This movesslab handling out of
- * the fast path and disables lockless freelists.
+ *options set. This movesslab handling out of
+ *the fast path and disables lockless freelists.
  */

 static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -230,7 +230,7 @@ static inline void stat(const struct kmem_cache
*s, enum stat_item si)
 }

 /
- * Core slab cache functions
+ *Core slab cache functions
  ***/

 /* Verify that a pointer has an address that is valid within a slab page */
@@ -355,9 +355,11 @@ static __always_inline void slab_unlock(struct page *page)
 __bit_spin_unlock(PG_locked, >flags);
 }

-static inline void set_page_slub_counters(struct page *page, unsigned
long counters_new)
+static inline void set_page_slub_counters(struct page *page,
+  unsigned long counters_new)
 {
 struct page tmp;
+
 tmp.counters = counters_new;
 /*
  * page->counters can cover frozen/inuse/objects as well
@@ -371,14 +373,14 @@ static inline void set_page_slub_counters(struct
page *page, unsigned long count
 }

 /* Interrupts must be disabled (for the fallback code to work right) */
-static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct
page *page,
-void *freelist_old, unsigned long counters_old,
-void *freelist_new, unsigned long counters_new,
-const char *n)
+static inline bool __cmpxchg_double_slab(struct kmem_cache *s,
+struct page *page, void *freelist_old,
+unsigned long counters_old, void *freelist_new,
+unsigned long counters_new, const char *n)
 {
 VM_BUG_ON(!irqs_disabled());
 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
-defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 if (s->flags & __CMPXCHG_DOUBLE) {
 if (cmpxchg_double(>freelist, >counters,
freelist_old, counters_old,
@@ -414,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct
kmem_cache *s, struct page *page,
 const char *n)
 {
 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
-defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 if (s->flags & __CMPXCHG_DOUBLE) {
 if (cmpxchg_double(>freelist, >counters,
freelist_old, counters_old,
@@ -550,6 +552,7 @@ static void print_track(const char *s, struct track *t)
 #ifdef CONFIG_STACKTRACE
 {
 int i;
+
 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 if 

[PATCH] slub: fix coding style problems

2014-10-02 Thread Min-Hua Chen
fix most obvious coding style problems reported by checkpatch.pl -f mm/slub.c

Signed-off-by: Min-Hua Chen orca.c...@gmail.com
---
 mm/slub.c |  121 -
 1 file changed, 63 insertions(+), 58 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 3e8afcc..7ea162f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -93,25 +93,25 @@
  *
  * Overloading of page flags that are otherwise used for LRU management.
  *
- * PageActive The slab is frozen and exempt from list processing.
- * This means that the slab is dedicated to a purpose
- * such as satisfying allocations for a specific
- * processor. Objects may be freed in the slab while
- * it is frozen but slab_free will then skip the usual
- * list operations. It is up to the processor holding
- * the slab to integrate the slab into the slab lists
- * when the slab is no longer needed.
+ * PageActiveThe slab is frozen and exempt from list processing.
+ *This means that the slab is dedicated to a purpose
+ *such as satisfying allocations for a specific
+ *processor. Objects may be freed in the slab while
+ *it is frozen but slab_free will then skip the usual
+ *list operations. It is up to the processor holding
+ *the slab to integrate the slab into the slab lists
+ *when the slab is no longer needed.
  *
- * One use of this flag is to mark slabs that are
- * used for allocations. Then such a slab becomes a cpu
- * slab. The cpu slab may be equipped with an additional
- * freelist that allows lockless access to
- * free objects in addition to the regular freelist
- * that requires the slab lock.
+ *One use of this flag is to mark slabs that are
+ *used for allocations. Then such a slab becomes a cpu
+ *slab. The cpu slab may be equipped with an additional
+ *freelist that allows lockless access to
+ *free objects in addition to the regular freelist
+ *that requires the slab lock.
  *
  * PageErrorSlab requires special handling due to debug
- * options set. This movesslab handling out of
- * the fast path and disables lockless freelists.
+ *options set. This movesslab handling out of
+ *the fast path and disables lockless freelists.
  */

 static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -230,7 +230,7 @@ static inline void stat(const struct kmem_cache
*s, enum stat_item si)
 }

 /
- * Core slab cache functions
+ *Core slab cache functions
  ***/

 /* Verify that a pointer has an address that is valid within a slab page */
@@ -355,9 +355,11 @@ static __always_inline void slab_unlock(struct page *page)
 __bit_spin_unlock(PG_locked, page-flags);
 }

-static inline void set_page_slub_counters(struct page *page, unsigned
long counters_new)
+static inline void set_page_slub_counters(struct page *page,
+  unsigned long counters_new)
 {
 struct page tmp;
+
 tmp.counters = counters_new;
 /*
  * page-counters can cover frozen/inuse/objects as well
@@ -371,14 +373,14 @@ static inline void set_page_slub_counters(struct
page *page, unsigned long count
 }

 /* Interrupts must be disabled (for the fallback code to work right) */
-static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct
page *page,
-void *freelist_old, unsigned long counters_old,
-void *freelist_new, unsigned long counters_new,
-const char *n)
+static inline bool __cmpxchg_double_slab(struct kmem_cache *s,
+struct page *page, void *freelist_old,
+unsigned long counters_old, void *freelist_new,
+unsigned long counters_new, const char *n)
 {
 VM_BUG_ON(!irqs_disabled());
 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)  \
-defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 if (s-flags  __CMPXCHG_DOUBLE) {
 if (cmpxchg_double(page-freelist, page-counters,
freelist_old, counters_old,
@@ -414,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct
kmem_cache *s, struct page *page,
 const char *n)
 {
 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)  \
-defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 if (s-flags  __CMPXCHG_DOUBLE) {
 if (cmpxchg_double(page-freelist, page-counters,
freelist_old, counters_old,
@@ -550,6 +552,7 @@ static void print_track(const char *s, struct track *t)
 #ifdef CONFIG_STACKTRACE
 {
 int i;
+
 for (i = 0; i  TRACK_ADDRS_COUNT;