[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2008-01-08 Thread travis
The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell <[EMAIL PROTECTED]>
Cc: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Signed-off-by: Mike Travis <[EMAIL PROTECTED]>
---

V1->V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

V2->V3:
- remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case.
---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   24 
 8 files changed, 26 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include 
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(".data.percpu")))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(".data.percpu.shared_aligned"))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()->data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) 

[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2008-01-08 Thread travis
The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell [EMAIL PROTECTED]
Cc: Andi Kleen [EMAIL PROTECTED]
Signed-off-by: Christoph Lameter [EMAIL PROTECTED]
Signed-off-by: Mike Travis [EMAIL PROTECTED]
---

V1-V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

V2-V3:
- remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case.
---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   24 
 8 files changed, 26 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
per_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include linux/threads.h
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(.data.percpu)))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(.data.percpu.shared_aligned))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()-data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) 

[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2008-01-07 Thread travis
V1->V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

V2->V3:
- remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case.

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell <[EMAIL PROTECTED]>
Cc: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Signed-off-by: Mike Travis <[EMAIL PROTECTED]>

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   24 
 8 files changed, 26 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -35,12 +26,6 @@ do { 
\
 } while (0)
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -49,7 +34,4 @@ do {  
\
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include 
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(".data.percpu")))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(".data.percpu.shared_aligned"))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 /*
  * Pretty much a literal copy of asm-generic/percpu.h, except that 
percpu_modcopy() is an
@@ -68,9 +51,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()->data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy 

[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2008-01-07 Thread travis
V1-V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

V2-V3:
- remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case.

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell [EMAIL PROTECTED]
Cc: Andi Kleen [EMAIL PROTECTED]
Signed-off-by: Christoph Lameter [EMAIL PROTECTED]
Signed-off-by: Mike Travis [EMAIL PROTECTED]

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   24 
 8 files changed, 26 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -35,12 +26,6 @@ do { 
\
 } while (0)
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
per_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -49,7 +34,4 @@ do {  
\
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include linux/threads.h
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(.data.percpu)))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(.data.percpu.shared_aligned))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 /*
  * Pretty much a literal copy of asm-generic/percpu.h, except that 
percpu_modcopy() is an
@@ -68,9 +51,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()-data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we 

[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread travis
V1->V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell <[EMAIL PROTECTED]>
Cc: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Signed-off-by: Mike Travis <[EMAIL PROTECTED]>

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include 
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(".data.percpu")))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(".data.percpu.shared_aligned"))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()->data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(_cpu__##var, __my_cpu_offset()))
@@ -34,11 +25,6 @@ extern void 

Re: [PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread David Miller
From: [EMAIL PROTECTED]
Date: Thu, 27 Dec 2007 16:10:48 -0800

> V1->V2:
> - Special consideration for IA64: Add the ability to specify
>   arch specific per cpu flags
> 
> The arch definitions are all the same. So move them into linux/percpu.h.
> 
> We cannot move DECLARE_PER_CPU since some include files just include
> asm/percpu.h to avoid include recursion problems.
> 
> Cc: Rusty Russell <[EMAIL PROTECTED]>
> Cc: Andi Kleen <[EMAIL PROTECTED]>
> Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
> Signed-off-by: Mike Travis <[EMAIL PROTECTED]>

Acked-by: David S. Miller <[EMAIL PROTECTED]>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread travis
V1->V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell <[EMAIL PROTECTED]>
Cc: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
Signed-off-by: Mike Travis <[EMAIL PROTECTED]>

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include 
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(".data.percpu")))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(".data.percpu.shared_aligned"))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()->data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(_cpu__##var, __my_cpu_offset()))
@@ -34,11 +25,6 @@ extern void 

[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread travis
V1-V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell [EMAIL PROTECTED]
Cc: Andi Kleen [EMAIL PROTECTED]
Signed-off-by: Christoph Lameter [EMAIL PROTECTED]
Signed-off-by: Mike Travis [EMAIL PROTECTED]

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
per_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include linux/threads.h
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(.data.percpu)))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(.data.percpu.shared_aligned))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()-data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(per_cpu__##var, __my_cpu_offset()))
@@ -34,11 +25,6 @@ extern void 

Re: [PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread David Miller
From: [EMAIL PROTECTED]
Date: Thu, 27 Dec 2007 16:10:48 -0800

 V1-V2:
 - Special consideration for IA64: Add the ability to specify
   arch specific per cpu flags
 
 The arch definitions are all the same. So move them into linux/percpu.h.
 
 We cannot move DECLARE_PER_CPU since some include files just include
 asm/percpu.h to avoid include recursion problems.
 
 Cc: Rusty Russell [EMAIL PROTECTED]
 Cc: Andi Kleen [EMAIL PROTECTED]
 Signed-off-by: Christoph Lameter [EMAIL PROTECTED]
 Signed-off-by: Mike Travis [EMAIL PROTECTED]

Acked-by: David S. Miller [EMAIL PROTECTED]
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-12-27 Thread travis
V1-V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell [EMAIL PROTECTED]
Cc: Andi Kleen [EMAIL PROTECTED]
Signed-off-by: Christoph Lameter [EMAIL PROTECTED]
Signed-off-by: Mike Travis [EMAIL PROTECTED]

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
per_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
 #include linux/threads.h
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(.data.percpu)))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   __attribute__((__section__(.data.percpu.shared_aligned))) \
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name  \
-   cacheline_aligned_in_smp
-#else
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-   DEFINE_PER_CPU(type, name)
-#endif
+   extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef CONFIG_SMP
 
@@ -63,9 +46,6 @@ extern void *per_cpu_init(void);
 
 #endif /* SMP */
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) 
EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 /*
  * Be extremely careful when taking the address of this variable!  Due to 
virtual
  * remapping, it is different from the canonical address returned by 
__get_cpu_var(var)!
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
 #define __my_cpu_offset() get_paca()-data_offset
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(per_cpu__##var, __my_cpu_offset()))
@@ -34,11 +25,6 @@ extern void 

[patch 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-11-28 Thread Christoph Lameter
V1->V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell <[EMAIL PROTECTED]>
Cc: Andi Kleen <[EMAIL PROTECTED]>
Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

Index: linux-2.6.24-rc3-mm2/include/linux/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/linux/percpu.h2007-11-28 
12:51:21.231963697 -0800
+++ linux-2.6.24-rc3-mm2/include/linux/percpu.h 2007-11-28 12:51:42.448213150 
-0800
@@ -9,6 +9,27 @@
 
 #include 
 
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
+
+#define DEFINE_PER_CPU(type, name) \
+   __attribute__((__section__(".data.percpu")))\
+   PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
+   __attribute__((__section__(".data.percpu.shared_aligned"))) \
+   PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
+   cacheline_aligned_in_smp
+#else
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)\
+   DEFINE_PER_CPU(type, name)
+#endif
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
 #ifndef PERCPU_ENOUGH_ROOM
 #ifdef CONFIG_MODULES
Index: linux-2.6.24-rc3-mm2/include/asm-generic/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/asm-generic/percpu.h  2007-11-28 
12:51:38.782051096 -0800
+++ linux-2.6.24-rc3-mm2/include/asm-generic/percpu.h   2007-11-28 
12:51:42.448213150 -0800
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(".data.percpu"))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(".data.percpu.shared_aligned"))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
Index: linux-2.6.24-rc3-mm2/include/asm-ia64/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/asm-ia64/percpu.h 2007-11-28 
12:51:21.255962978 -0800
+++ linux-2.6.24-rc3-mm2/include/asm-ia64/percpu.h  2007-11-28 
12:51:42.448213150 -0800
@@ -16,28 +16,11 @@
 #include 
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(".data.percpu")))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define 

[patch 02/10] percpu: Move arch XX_PER_CPU_XX definitions into linux/percpu.h

2007-11-28 Thread Christoph Lameter
V1-V2:
- Special consideration for IA64: Add the ability to specify
  arch specific per cpu flags

The arch definitions are all the same. So move them into linux/percpu.h.

We cannot move DECLARE_PER_CPU since some include files just include
asm/percpu.h to avoid include recursion problems.

Cc: Rusty Russell [EMAIL PROTECTED]
Cc: Andi Kleen [EMAIL PROTECTED]
Signed-off-by: Christoph Lameter [EMAIL PROTECTED]

---
 include/asm-generic/percpu.h |   18 --
 include/asm-ia64/percpu.h|   24 ++--
 include/asm-powerpc/percpu.h |   17 -
 include/asm-s390/percpu.h|   18 --
 include/asm-sparc64/percpu.h |   16 
 include/asm-x86/percpu_32.h  |   12 
 include/asm-x86/percpu_64.h  |   17 -
 include/linux/percpu.h   |   21 +
 8 files changed, 23 insertions(+), 120 deletions(-)

Index: linux-2.6.24-rc3-mm2/include/linux/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/linux/percpu.h2007-11-28 
12:51:21.231963697 -0800
+++ linux-2.6.24-rc3-mm2/include/linux/percpu.h 2007-11-28 12:51:42.448213150 
-0800
@@ -9,6 +9,27 @@
 
 #include asm/percpu.h
 
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
+
+#define DEFINE_PER_CPU(type, name) \
+   __attribute__((__section__(.data.percpu)))\
+   PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
+   __attribute__((__section__(.data.percpu.shared_aligned))) \
+   PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
+   cacheline_aligned_in_smp
+#else
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)\
+   DEFINE_PER_CPU(type, name)
+#endif
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
 #ifndef PERCPU_ENOUGH_ROOM
 #ifdef CONFIG_MODULES
Index: linux-2.6.24-rc3-mm2/include/asm-generic/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/asm-generic/percpu.h  2007-11-28 
12:51:38.782051096 -0800
+++ linux-2.6.24-rc3-mm2/include/asm-generic/percpu.h   2007-11-28 
12:51:42.448213150 -0800
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #define per_cpu_offset(x) (__per_cpu_offset[x])
 
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-__attribute__((__section__(.data.percpu))) __typeof__(type) 
per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-__attribute__((__section__(.data.percpu.shared_aligned))) \
-__typeof__(type) per_cpu__##name   \
-cacheline_aligned_in_smp
-
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void);   \
@@ -27,12 +18,6 @@ extern unsigned long __per_cpu_offset[NR
 
 #else /* ! SMP */
 
-#define DEFINE_PER_CPU(type, name) \
-__typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)  \
-DEFINE_PER_CPU(type, name)
-
 #define per_cpu(var, cpu)  (*((void)(cpu), 
per_cpu__##var))
 #define __get_cpu_var(var) per_cpu__##var
 #define __raw_get_cpu_var(var) per_cpu__##var
@@ -41,7 +26,4 @@ extern unsigned long __per_cpu_offset[NR
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
 
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
 #endif /* _ASM_GENERIC_PERCPU_H_ */
Index: linux-2.6.24-rc3-mm2/include/asm-ia64/percpu.h
===
--- linux-2.6.24-rc3-mm2.orig/include/asm-ia64/percpu.h 2007-11-28 
12:51:21.255962978 -0800
+++ linux-2.6.24-rc3-mm2/include/asm-ia64/percpu.h  2007-11-28 
12:51:42.448213150 -0800
@@ -16,28 +16,11 @@
 #include linux/threads.h
 
 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
-# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
-#else
-# define __SMALL_ADDR_AREA
+# define PER_CPU_ATTRIBUTES__attribute__((__model__ (__small__)))
 #endif
 
 #define DECLARE_PER_CPU(type, name)\
-   extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
-   __attribute__((__section__(.data.percpu)))\
-   __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
-
-#ifdef CONFIG_SMP
-#define