Re: [PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-19 Thread Andi Kleen
On Sun, Aug 19, 2012 at 01:27:00AM -0700, Jeremy Fitzhardinge wrote:
> On 08/18/2012 07:56 PM, Andi Kleen wrote:
> > From: Andi Kleen 
> >
> > The paravirt thunks use a hack of using a static reference to a static
> > function to reference that function from the top level statement.
> >
> > This assumes that gcc always generates static function names in a specific
> > format, which is not necessarily true.
> >
> > Simply make these functions global and asmlinkage. This way the
> > static __used variables are not needed and everything works.
> 
> I'm not a huge fan of unstaticing all this stuff, but it doesn't
> surprise me that the current code is brittle in the face of gcc changes.

Hmm actually reading my own patch again it may be wrong. You need
regparm(3) here right? asmlinkage forces it to (0). I'll change it to 
__visible. I think I did that earlier for all the 32bit code, but missed
this one.

-Andi
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-19 Thread Jeremy Fitzhardinge
On 08/18/2012 07:56 PM, Andi Kleen wrote:
> From: Andi Kleen 
>
> The paravirt thunks use a hack of using a static reference to a static
> function to reference that function from the top level statement.
>
> This assumes that gcc always generates static function names in a specific
> format, which is not necessarily true.
>
> Simply make these functions global and asmlinkage. This way the
> static __used variables are not needed and everything works.

I'm not a huge fan of unstaticing all this stuff, but it doesn't
surprise me that the current code is brittle in the face of gcc changes.

J

>
> Changed in paravirt and in all users (Xen and vsmp)
>
> Cc: jer...@goop.org
> Signed-off-by: Andi Kleen 
> ---
>  arch/x86/include/asm/paravirt.h |2 +-
>  arch/x86/kernel/vsmp_64.c   |8 
>  arch/x86/xen/irq.c  |8 
>  arch/x86/xen/mmu.c  |   16 
>  4 files changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index a0facf3..cc733a6 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -804,9 +804,9 @@ static __always_inline void arch_spin_unlock(struct 
> arch_spinlock *lock)
>   */
>  #define PV_CALLEE_SAVE_REGS_THUNK(func)  
> \
>   extern typeof(func) __raw_callee_save_##func;   \
> - static void *__##func##__ __used = func;\
>   \
>   asm(".pushsection .text;"   \
> + ".globl __raw_callee_save_" #func " ; " \
>   "__raw_callee_save_" #func ": " \
>   PV_SAVE_ALL_CALLER_REGS \
>   "call " #func ";"   \
> diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
> index 992f890..f393d6d 100644
> --- a/arch/x86/kernel/vsmp_64.c
> +++ b/arch/x86/kernel/vsmp_64.c
> @@ -33,7 +33,7 @@
>   * and vice versa.
>   */
>  
> -static unsigned long vsmp_save_fl(void)
> +asmlinkage unsigned long vsmp_save_fl(void)
>  {
>   unsigned long flags = native_save_fl();
>  
> @@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
>  
> -static void vsmp_restore_fl(unsigned long flags)
> +asmlinkage void vsmp_restore_fl(unsigned long flags)
>  {
>   if (flags & X86_EFLAGS_IF)
>   flags &= ~X86_EFLAGS_AC;
> @@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
>  
> -static void vsmp_irq_disable(void)
> +asmlinkage void vsmp_irq_disable(void)
>  {
>   unsigned long flags = native_save_fl();
>  
> @@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
>  
> -static void vsmp_irq_enable(void)
> +asmlinkage void vsmp_irq_enable(void)
>  {
>   unsigned long flags = native_save_fl();
>  
> diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
> index 1573376..3dd8831 100644
> --- a/arch/x86/xen/irq.c
> +++ b/arch/x86/xen/irq.c
> @@ -21,7 +21,7 @@ void xen_force_evtchn_callback(void)
>   (void)HYPERVISOR_xen_version(0, NULL);
>  }
>  
> -static unsigned long xen_save_fl(void)
> +asmlinkage unsigned long xen_save_fl(void)
>  {
>   struct vcpu_info *vcpu;
>   unsigned long flags;
> @@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
>  
> -static void xen_restore_fl(unsigned long flags)
> +asmlinkage void xen_restore_fl(unsigned long flags)
>  {
>   struct vcpu_info *vcpu;
>  
> @@ -66,7 +66,7 @@ static void xen_restore_fl(unsigned long flags)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
>  
> -static void xen_irq_disable(void)
> +asmlinkage void xen_irq_disable(void)
>  {
>   /* There's a one instruction preempt window here.  We need to
>  make sure we're don't switch CPUs between getting the vcpu
> @@ -77,7 +77,7 @@ static void xen_irq_disable(void)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
>  
> -static void xen_irq_enable(void)
> +asmlinkage void xen_irq_enable(void)
>  {
>   struct vcpu_info *vcpu;
>  
> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
> index b65a761..9f82443 100644
> --- a/arch/x86/xen/mmu.c
> +++ b/arch/x86/xen/mmu.c
> @@ -429,7 +429,7 @@ static pteval_t iomap_pte(pteval_t val)
>   return val;
>  }
>  
> -static pteval_t xen_pte_val(pte_t pte)
> +asmlinkage pteval_t xen_pte_val(pte_t pte)
>  {
>   pteval_t pteval = pte.pte;
>  #if 0
> @@ -446,7 +446,7 @@ static pteval_t xen_pte_val(pte_t pte)
>  }
>  PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
>  
> -static pgdval_t xen_pgd_val(pgd_t pgd)
> +asmlinkage pgdval_t xen_pgd_val(pgd_t pgd)
>  {
>   return pte_mfn_to_pfn(pgd.pgd);
>  }
> @@ 

Re: [PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-19 Thread Jeremy Fitzhardinge
On 08/18/2012 07:56 PM, Andi Kleen wrote:
 From: Andi Kleen a...@linux.intel.com

 The paravirt thunks use a hack of using a static reference to a static
 function to reference that function from the top level statement.

 This assumes that gcc always generates static function names in a specific
 format, which is not necessarily true.

 Simply make these functions global and asmlinkage. This way the
 static __used variables are not needed and everything works.

I'm not a huge fan of unstaticing all this stuff, but it doesn't
surprise me that the current code is brittle in the face of gcc changes.

J


 Changed in paravirt and in all users (Xen and vsmp)

 Cc: jer...@goop.org
 Signed-off-by: Andi Kleen a...@linux.intel.com
 ---
  arch/x86/include/asm/paravirt.h |2 +-
  arch/x86/kernel/vsmp_64.c   |8 
  arch/x86/xen/irq.c  |8 
  arch/x86/xen/mmu.c  |   16 
  4 files changed, 17 insertions(+), 17 deletions(-)

 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
 index a0facf3..cc733a6 100644
 --- a/arch/x86/include/asm/paravirt.h
 +++ b/arch/x86/include/asm/paravirt.h
 @@ -804,9 +804,9 @@ static __always_inline void arch_spin_unlock(struct 
 arch_spinlock *lock)
   */
  #define PV_CALLEE_SAVE_REGS_THUNK(func)  
 \
   extern typeof(func) __raw_callee_save_##func;   \
 - static void *__##func##__ __used = func;\
   \
   asm(.pushsection .text;   \
 + .globl __raw_callee_save_ #func  ;  \
   __raw_callee_save_ #func :  \
   PV_SAVE_ALL_CALLER_REGS \
   call  #func ;   \
 diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
 index 992f890..f393d6d 100644
 --- a/arch/x86/kernel/vsmp_64.c
 +++ b/arch/x86/kernel/vsmp_64.c
 @@ -33,7 +33,7 @@
   * and vice versa.
   */
  
 -static unsigned long vsmp_save_fl(void)
 +asmlinkage unsigned long vsmp_save_fl(void)
  {
   unsigned long flags = native_save_fl();
  
 @@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
  }
  PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
  
 -static void vsmp_restore_fl(unsigned long flags)
 +asmlinkage void vsmp_restore_fl(unsigned long flags)
  {
   if (flags  X86_EFLAGS_IF)
   flags = ~X86_EFLAGS_AC;
 @@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
  }
  PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
  
 -static void vsmp_irq_disable(void)
 +asmlinkage void vsmp_irq_disable(void)
  {
   unsigned long flags = native_save_fl();
  
 @@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
  }
  PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
  
 -static void vsmp_irq_enable(void)
 +asmlinkage void vsmp_irq_enable(void)
  {
   unsigned long flags = native_save_fl();
  
 diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
 index 1573376..3dd8831 100644
 --- a/arch/x86/xen/irq.c
 +++ b/arch/x86/xen/irq.c
 @@ -21,7 +21,7 @@ void xen_force_evtchn_callback(void)
   (void)HYPERVISOR_xen_version(0, NULL);
  }
  
 -static unsigned long xen_save_fl(void)
 +asmlinkage unsigned long xen_save_fl(void)
  {
   struct vcpu_info *vcpu;
   unsigned long flags;
 @@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
  
 -static void xen_restore_fl(unsigned long flags)
 +asmlinkage void xen_restore_fl(unsigned long flags)
  {
   struct vcpu_info *vcpu;
  
 @@ -66,7 +66,7 @@ static void xen_restore_fl(unsigned long flags)
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
  
 -static void xen_irq_disable(void)
 +asmlinkage void xen_irq_disable(void)
  {
   /* There's a one instruction preempt window here.  We need to
  make sure we're don't switch CPUs between getting the vcpu
 @@ -77,7 +77,7 @@ static void xen_irq_disable(void)
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
  
 -static void xen_irq_enable(void)
 +asmlinkage void xen_irq_enable(void)
  {
   struct vcpu_info *vcpu;
  
 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
 index b65a761..9f82443 100644
 --- a/arch/x86/xen/mmu.c
 +++ b/arch/x86/xen/mmu.c
 @@ -429,7 +429,7 @@ static pteval_t iomap_pte(pteval_t val)
   return val;
  }
  
 -static pteval_t xen_pte_val(pte_t pte)
 +asmlinkage pteval_t xen_pte_val(pte_t pte)
  {
   pteval_t pteval = pte.pte;
  #if 0
 @@ -446,7 +446,7 @@ static pteval_t xen_pte_val(pte_t pte)
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
  
 -static pgdval_t xen_pgd_val(pgd_t pgd)
 +asmlinkage pgdval_t xen_pgd_val(pgd_t pgd)
  {
   return pte_mfn_to_pfn(pgd.pgd);
  }
 @@ -477,7 +477,7 @@ void xen_set_pat(u64 pat)
   WARN_ON(pat != 0x0007010600070106ull);
  }
  
 -static pte_t 

Re: [PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-19 Thread Andi Kleen
On Sun, Aug 19, 2012 at 01:27:00AM -0700, Jeremy Fitzhardinge wrote:
 On 08/18/2012 07:56 PM, Andi Kleen wrote:
  From: Andi Kleen a...@linux.intel.com
 
  The paravirt thunks use a hack of using a static reference to a static
  function to reference that function from the top level statement.
 
  This assumes that gcc always generates static function names in a specific
  format, which is not necessarily true.
 
  Simply make these functions global and asmlinkage. This way the
  static __used variables are not needed and everything works.
 
 I'm not a huge fan of unstaticing all this stuff, but it doesn't
 surprise me that the current code is brittle in the face of gcc changes.

Hmm actually reading my own patch again it may be wrong. You need
regparm(3) here right? asmlinkage forces it to (0). I'll change it to 
__visible. I think I did that earlier for all the 32bit code, but missed
this one.

-Andi
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-18 Thread Andi Kleen
From: Andi Kleen 

The paravirt thunks use a hack of using a static reference to a static
function to reference that function from the top level statement.

This assumes that gcc always generates static function names in a specific
format, which is not necessarily true.

Simply make these functions global and asmlinkage. This way the
static __used variables are not needed and everything works.

Changed in paravirt and in all users (Xen and vsmp)

Cc: jer...@goop.org
Signed-off-by: Andi Kleen 
---
 arch/x86/include/asm/paravirt.h |2 +-
 arch/x86/kernel/vsmp_64.c   |8 
 arch/x86/xen/irq.c  |8 
 arch/x86/xen/mmu.c  |   16 
 4 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a0facf3..cc733a6 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -804,9 +804,9 @@ static __always_inline void arch_spin_unlock(struct 
arch_spinlock *lock)
  */
 #define PV_CALLEE_SAVE_REGS_THUNK(func)
\
extern typeof(func) __raw_callee_save_##func;   \
-   static void *__##func##__ __used = func;\
\
asm(".pushsection .text;"   \
+   ".globl __raw_callee_save_" #func " ; " \
"__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";"   \
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 992f890..f393d6d 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -33,7 +33,7 @@
  * and vice versa.
  */
 
-static unsigned long vsmp_save_fl(void)
+asmlinkage unsigned long vsmp_save_fl(void)
 {
unsigned long flags = native_save_fl();
 
@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
 
-static void vsmp_restore_fl(unsigned long flags)
+asmlinkage void vsmp_restore_fl(unsigned long flags)
 {
if (flags & X86_EFLAGS_IF)
flags &= ~X86_EFLAGS_AC;
@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
-static void vsmp_irq_disable(void)
+asmlinkage void vsmp_irq_disable(void)
 {
unsigned long flags = native_save_fl();
 
@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
-static void vsmp_irq_enable(void)
+asmlinkage void vsmp_irq_enable(void)
 {
unsigned long flags = native_save_fl();
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 1573376..3dd8831 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -21,7 +21,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
 }
 
-static unsigned long xen_save_fl(void)
+asmlinkage unsigned long xen_save_fl(void)
 {
struct vcpu_info *vcpu;
unsigned long flags;
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
 
-static void xen_restore_fl(unsigned long flags)
+asmlinkage void xen_restore_fl(unsigned long flags)
 {
struct vcpu_info *vcpu;
 
@@ -66,7 +66,7 @@ static void xen_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
-static void xen_irq_disable(void)
+asmlinkage void xen_irq_disable(void)
 {
/* There's a one instruction preempt window here.  We need to
   make sure we're don't switch CPUs between getting the vcpu
@@ -77,7 +77,7 @@ static void xen_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-static void xen_irq_enable(void)
+asmlinkage void xen_irq_enable(void)
 {
struct vcpu_info *vcpu;
 
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a761..9f82443 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -429,7 +429,7 @@ static pteval_t iomap_pte(pteval_t val)
return val;
 }
 
-static pteval_t xen_pte_val(pte_t pte)
+asmlinkage pteval_t xen_pte_val(pte_t pte)
 {
pteval_t pteval = pte.pte;
 #if 0
@@ -446,7 +446,7 @@ static pteval_t xen_pte_val(pte_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
-static pgdval_t xen_pgd_val(pgd_t pgd)
+asmlinkage pgdval_t xen_pgd_val(pgd_t pgd)
 {
return pte_mfn_to_pfn(pgd.pgd);
 }
@@ -477,7 +477,7 @@ void xen_set_pat(u64 pat)
WARN_ON(pat != 0x0007010600070106ull);
 }
 
-static pte_t xen_make_pte(pteval_t pte)
+asmlinkage pte_t xen_make_pte(pteval_t pte)
 {
phys_addr_t addr = (pte & PTE_PFN_MASK);
 #if 0
@@ -512,14 +512,14 @@ static pte_t xen_make_pte(pteval_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
-static pgd_t xen_make_pgd(pgdval_t pgd)
+asmlinkage pgd_t 

[PATCH 53/74] x86, lto, paravirt: Make paravirt thunks global

2012-08-18 Thread Andi Kleen
From: Andi Kleen a...@linux.intel.com

The paravirt thunks use a hack of using a static reference to a static
function to reference that function from the top level statement.

This assumes that gcc always generates static function names in a specific
format, which is not necessarily true.

Simply make these functions global and asmlinkage. This way the
static __used variables are not needed and everything works.

Changed in paravirt and in all users (Xen and vsmp)

Cc: jer...@goop.org
Signed-off-by: Andi Kleen a...@linux.intel.com
---
 arch/x86/include/asm/paravirt.h |2 +-
 arch/x86/kernel/vsmp_64.c   |8 
 arch/x86/xen/irq.c  |8 
 arch/x86/xen/mmu.c  |   16 
 4 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a0facf3..cc733a6 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -804,9 +804,9 @@ static __always_inline void arch_spin_unlock(struct 
arch_spinlock *lock)
  */
 #define PV_CALLEE_SAVE_REGS_THUNK(func)
\
extern typeof(func) __raw_callee_save_##func;   \
-   static void *__##func##__ __used = func;\
\
asm(.pushsection .text;   \
+   .globl __raw_callee_save_ #func  ;  \
__raw_callee_save_ #func :  \
PV_SAVE_ALL_CALLER_REGS \
call  #func ;   \
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 992f890..f393d6d 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -33,7 +33,7 @@
  * and vice versa.
  */
 
-static unsigned long vsmp_save_fl(void)
+asmlinkage unsigned long vsmp_save_fl(void)
 {
unsigned long flags = native_save_fl();
 
@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
 
-static void vsmp_restore_fl(unsigned long flags)
+asmlinkage void vsmp_restore_fl(unsigned long flags)
 {
if (flags  X86_EFLAGS_IF)
flags = ~X86_EFLAGS_AC;
@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
-static void vsmp_irq_disable(void)
+asmlinkage void vsmp_irq_disable(void)
 {
unsigned long flags = native_save_fl();
 
@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
-static void vsmp_irq_enable(void)
+asmlinkage void vsmp_irq_enable(void)
 {
unsigned long flags = native_save_fl();
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 1573376..3dd8831 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -21,7 +21,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
 }
 
-static unsigned long xen_save_fl(void)
+asmlinkage unsigned long xen_save_fl(void)
 {
struct vcpu_info *vcpu;
unsigned long flags;
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
 
-static void xen_restore_fl(unsigned long flags)
+asmlinkage void xen_restore_fl(unsigned long flags)
 {
struct vcpu_info *vcpu;
 
@@ -66,7 +66,7 @@ static void xen_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
-static void xen_irq_disable(void)
+asmlinkage void xen_irq_disable(void)
 {
/* There's a one instruction preempt window here.  We need to
   make sure we're don't switch CPUs between getting the vcpu
@@ -77,7 +77,7 @@ static void xen_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-static void xen_irq_enable(void)
+asmlinkage void xen_irq_enable(void)
 {
struct vcpu_info *vcpu;
 
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a761..9f82443 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -429,7 +429,7 @@ static pteval_t iomap_pte(pteval_t val)
return val;
 }
 
-static pteval_t xen_pte_val(pte_t pte)
+asmlinkage pteval_t xen_pte_val(pte_t pte)
 {
pteval_t pteval = pte.pte;
 #if 0
@@ -446,7 +446,7 @@ static pteval_t xen_pte_val(pte_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
-static pgdval_t xen_pgd_val(pgd_t pgd)
+asmlinkage pgdval_t xen_pgd_val(pgd_t pgd)
 {
return pte_mfn_to_pfn(pgd.pgd);
 }
@@ -477,7 +477,7 @@ void xen_set_pat(u64 pat)
WARN_ON(pat != 0x0007010600070106ull);
 }
 
-static pte_t xen_make_pte(pteval_t pte)
+asmlinkage pte_t xen_make_pte(pteval_t pte)
 {
phys_addr_t addr = (pte  PTE_PFN_MASK);
 #if 0
@@ -512,14 +512,14 @@ static pte_t xen_make_pte(pteval_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
-static pgd_t xen_make_pgd(pgdval_t pgd)