[Xen-ia64-devel] [PATCH 5/5] ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.

2009-03-04 Thread Isaku Yamahata
implemented xen binary patch for pv_cpu_ops.

Signed-off-by: Isaku Yamahata yamah...@valinux.co.jp
---
 arch/ia64/include/asm/xen/privop.h |4 +
 arch/ia64/xen/hypercall.S  |2 +
 arch/ia64/xen/xen_pv_ops.c |  665 
 3 files changed, 671 insertions(+), 0 deletions(-)

diff --git a/arch/ia64/include/asm/xen/privop.h 
b/arch/ia64/include/asm/xen/privop.h
index 2261dda..e5fbaee 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -82,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
 extern unsigned long xen_get_cpuid(int index);
 extern unsigned long xen_get_pmd(int index);
 
+#ifndef ASM_SUPPORTED
 extern unsigned long xen_get_eflag(void);  /* see xen_ia64_getreg */
 extern void xen_set_eflag(unsigned long);  /* see xen_ia64_setreg */
+#endif
 
 //
 /* Instructions paravirtualized for performance */
@@ -108,6 +110,7 @@ extern void xen_set_eflag(unsigned long);   /* see 
xen_ia64_setreg */
 #define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS-interrupt_mask_addr) - 1))
 
+#ifndef ASM_SUPPORTED
 /* Although all privileged operations can be left to trap and will
  * be properly handled by Xen, some are frequent enough that we use
  * hyperprivops for performance. */
@@ -125,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned 
long val1,
   unsigned long val4);
 extern void xen_set_kr(unsigned long index, unsigned long val);
 extern void xen_ptcga(unsigned long addr, unsigned long size);
+#endif /* !ASM_SUPPORTED */
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb..e32dae4 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
 #include asm/intrinsics.h
 #include asm/xen/privop.h
 
+#ifdef __INTEL_COMPILER
 /*
  * Hypercalls without parameter.
  */
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp
;;
 END(xen_set_rr0_to_rr4)
+#endif
 
 GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index bdf1acb..6c44225 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -154,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
xen_setup_vcpu_info_placement();
 }
 
+#ifdef ASM_SUPPORTED
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+#endif
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type);
+
 static const struct pv_init_ops xen_init_ops __initconst = {
.banner = xen_banner,
 
@@ -164,6 +171,10 @@ static const struct pv_init_ops xen_init_ops __initconst = 
{
.arch_setup_nomca = xen_arch_setup_nomca,
 
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+#ifdef ASM_SUPPORTED
+   .patch_bundle = xen_patch_bundle,
+#endif
+   .patch_branch = xen_patch_branch,
 };
 
 /***
@@ -214,6 +225,7 @@ static struct pv_patchdata xen_patchdata __initdata = {
  * intrinsics hooks.
  */
 
+#ifndef ASM_SUPPORTED
 static void
 xen_set_itm_with_offset(unsigned long val)
 {
@@ -381,6 +393,410 @@ xen_intrin_local_irq_restore(unsigned long mask)
else
xen_rsm_i();
 }
+#else
+#define __DEFINE_FUNC(name, code)  \
+   extern const char xen_ ## name ## _direct_start[];  \
+   extern const char xen_ ## name ## _direct_end[];\
+   asm (.align 32\n  \
+.proc xen_ #name \n\
+xen_ #name :\n \
+xen_ #name _direct_start:\n\
+code   \
+xen_ #name _direct_end:\n  \
+br.cond.sptk.many b6\n   \
+.endp xen_ #name \n)
+
+#define DEFINE_VOID_FUNC0(name, code)  \
+   extern void \
+   xen_ ## name (void);\
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg);   \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg0,   \
+ unsigned long arg1);  \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC0(name, code)   \
+   extern unsigned long\
+   xen_ ## name (void);\
+   

[Xen-ia64-devel] [PATCH 5/5] ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.

2008-12-21 Thread Isaku Yamahata
implemented xen binary patch for pv_cpu_ops.

Signed-off-by: Isaku Yamahata yamah...@valinux.co.jp
---
 arch/ia64/include/asm/xen/privop.h |4 +
 arch/ia64/xen/hypercall.S  |2 +
 arch/ia64/xen/xen_pv_ops.c |  665 
 3 files changed, 671 insertions(+), 0 deletions(-)

diff --git a/arch/ia64/include/asm/xen/privop.h 
b/arch/ia64/include/asm/xen/privop.h
index 2261dda..e5fbaee 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -82,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
 extern unsigned long xen_get_cpuid(int index);
 extern unsigned long xen_get_pmd(int index);
 
+#ifndef ASM_SUPPORTED
 extern unsigned long xen_get_eflag(void);  /* see xen_ia64_getreg */
 extern void xen_set_eflag(unsigned long);  /* see xen_ia64_setreg */
+#endif
 
 //
 /* Instructions paravirtualized for performance */
@@ -108,6 +110,7 @@ extern void xen_set_eflag(unsigned long);   /* see 
xen_ia64_setreg */
 #define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS-interrupt_mask_addr) - 1))
 
+#ifndef ASM_SUPPORTED
 /* Although all privileged operations can be left to trap and will
  * be properly handled by Xen, some are frequent enough that we use
  * hyperprivops for performance. */
@@ -125,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned 
long val1,
   unsigned long val4);
 extern void xen_set_kr(unsigned long index, unsigned long val);
 extern void xen_ptcga(unsigned long addr, unsigned long size);
+#endif /* !ASM_SUPPORTED */
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb..e32dae4 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
 #include asm/intrinsics.h
 #include asm/xen/privop.h
 
+#ifdef __INTEL_COMPILER
 /*
  * Hypercalls without parameter.
  */
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp
;;
 END(xen_set_rr0_to_rr4)
+#endif
 
 GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index eda13a8..7833226 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -154,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
xen_setup_vcpu_info_placement();
 }
 
+#ifdef ASM_SUPPORTED
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+#endif
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type);
+
 static struct pv_init_ops xen_init_ops __initdata = {
.banner = xen_banner,
 
@@ -164,6 +171,10 @@ static struct pv_init_ops xen_init_ops __initdata = {
.arch_setup_nomca = xen_arch_setup_nomca,
 
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+#ifdef ASM_SUPPORTED
+   .patch_bundle = xen_patch_bundle,
+#endif
+   .patch_branch = xen_patch_branch,
 };
 
 /***
@@ -214,6 +225,7 @@ static struct pv_patchdata xen_patchdata __initdata = {
  * intrinsics hooks.
  */
 
+#ifndef ASM_SUPPORTED
 static void
 xen_set_itm_with_offset(unsigned long val)
 {
@@ -381,6 +393,410 @@ xen_intrin_local_irq_restore(unsigned long mask)
else
xen_rsm_i();
 }
+#else
+#define __DEFINE_FUNC(name, code)  \
+   extern const char xen_ ## name ## _direct_start[];  \
+   extern const char xen_ ## name ## _direct_end[];\
+   asm (.align 32\n  \
+.proc xen_ #name \n\
+xen_ #name :\n \
+xen_ #name _direct_start:\n\
+code   \
+xen_ #name _direct_end:\n  \
+br.cond.sptk.many b6\n   \
+.endp xen_ #name \n)
+
+#define DEFINE_VOID_FUNC0(name, code)  \
+   extern void \
+   xen_ ## name (void);\
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg);   \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg0,   \
+ unsigned long arg1);  \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC0(name, code)   \
+   extern unsigned long\
+   xen_ ## name (void);\
+   

[Xen-ia64-devel] [PATCH 5/5] ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.

2008-12-11 Thread Isaku Yamahata
implemented xen binary patch for pv_cpu_ops.

Signed-off-by: Isaku Yamahata yamah...@valinux.co.jp
---
 arch/ia64/include/asm/xen/privop.h |4 +
 arch/ia64/xen/hypercall.S  |2 +
 arch/ia64/xen/xen_pv_ops.c |  665 
 3 files changed, 671 insertions(+), 0 deletions(-)

diff --git a/arch/ia64/include/asm/xen/privop.h 
b/arch/ia64/include/asm/xen/privop.h
index 2261dda..e5fbaee 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -82,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
 extern unsigned long xen_get_cpuid(int index);
 extern unsigned long xen_get_pmd(int index);
 
+#ifndef ASM_SUPPORTED
 extern unsigned long xen_get_eflag(void);  /* see xen_ia64_getreg */
 extern void xen_set_eflag(unsigned long);  /* see xen_ia64_setreg */
+#endif
 
 //
 /* Instructions paravirtualized for performance */
@@ -108,6 +110,7 @@ extern void xen_set_eflag(unsigned long);   /* see 
xen_ia64_setreg */
 #define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS-interrupt_mask_addr) - 1))
 
+#ifndef ASM_SUPPORTED
 /* Although all privileged operations can be left to trap and will
  * be properly handled by Xen, some are frequent enough that we use
  * hyperprivops for performance. */
@@ -125,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned 
long val1,
   unsigned long val4);
 extern void xen_set_kr(unsigned long index, unsigned long val);
 extern void xen_ptcga(unsigned long addr, unsigned long size);
+#endif /* !ASM_SUPPORTED */
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb..e32dae4 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
 #include asm/intrinsics.h
 #include asm/xen/privop.h
 
+#ifdef __INTEL_COMPILER
 /*
  * Hypercalls without parameter.
  */
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp
;;
 END(xen_set_rr0_to_rr4)
+#endif
 
 GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index eda13a8..7833226 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -154,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
xen_setup_vcpu_info_placement();
 }
 
+#ifdef ASM_SUPPORTED
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+#endif
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type);
+
 static struct pv_init_ops xen_init_ops __initdata = {
.banner = xen_banner,
 
@@ -164,6 +171,10 @@ static struct pv_init_ops xen_init_ops __initdata = {
.arch_setup_nomca = xen_arch_setup_nomca,
 
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+#ifdef ASM_SUPPORTED
+   .patch_bundle = xen_patch_bundle,
+#endif
+   .patch_branch = xen_patch_branch,
 };
 
 /***
@@ -214,6 +225,7 @@ static struct pv_patchdata xen_patchdata __initdata = {
  * intrinsics hooks.
  */
 
+#ifndef ASM_SUPPORTED
 static void
 xen_set_itm_with_offset(unsigned long val)
 {
@@ -381,6 +393,410 @@ xen_intrin_local_irq_restore(unsigned long mask)
else
xen_rsm_i();
 }
+#else
+#define __DEFINE_FUNC(name, code)  \
+   extern const char xen_ ## name ## _direct_start[];  \
+   extern const char xen_ ## name ## _direct_end[];\
+   asm (.align 32\n  \
+.proc xen_ #name \n\
+xen_ #name :\n \
+xen_ #name _direct_start:\n\
+code   \
+xen_ #name _direct_end:\n  \
+br.cond.sptk.many b6\n   \
+.endp xen_ #name \n)
+
+#define DEFINE_VOID_FUNC0(name, code)  \
+   extern void \
+   xen_ ## name (void);\
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg);   \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg0,   \
+ unsigned long arg1);  \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC0(name, code)   \
+   extern unsigned long\
+   xen_ ## name (void);\
+   

[Xen-ia64-devel] [PATCH 5/5] ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.

2008-11-25 Thread Isaku Yamahata
implemented xen binary patch for pv_cpu_ops.

Signed-off-by: Isaku Yamahata [EMAIL PROTECTED]
---
 arch/ia64/include/asm/xen/privop.h |4 +
 arch/ia64/xen/hypercall.S  |2 +
 arch/ia64/xen/xen_pv_ops.c |  667 
 3 files changed, 673 insertions(+), 0 deletions(-)

diff --git a/arch/ia64/include/asm/xen/privop.h 
b/arch/ia64/include/asm/xen/privop.h
index 2261dda..e5fbaee 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -82,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
 extern unsigned long xen_get_cpuid(int index);
 extern unsigned long xen_get_pmd(int index);
 
+#ifndef ASM_SUPPORTED
 extern unsigned long xen_get_eflag(void);  /* see xen_ia64_getreg */
 extern void xen_set_eflag(unsigned long);  /* see xen_ia64_setreg */
+#endif
 
 //
 /* Instructions paravirtualized for performance */
@@ -108,6 +110,7 @@ extern void xen_set_eflag(unsigned long);   /* see 
xen_ia64_setreg */
 #define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS-interrupt_mask_addr) - 1))
 
+#ifndef ASM_SUPPORTED
 /* Although all privileged operations can be left to trap and will
  * be properly handled by Xen, some are frequent enough that we use
  * hyperprivops for performance. */
@@ -125,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned 
long val1,
   unsigned long val4);
 extern void xen_set_kr(unsigned long index, unsigned long val);
 extern void xen_ptcga(unsigned long addr, unsigned long size);
+#endif /* !ASM_SUPPORTED */
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb..e32dae4 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
 #include asm/intrinsics.h
 #include asm/xen/privop.h
 
+#ifdef __INTEL_COMPILER
 /*
  * Hypercalls without parameter.
  */
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp
;;
 END(xen_set_rr0_to_rr4)
+#endif
 
 GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index eda13a8..a2825fa 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -154,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
xen_setup_vcpu_info_placement();
 }
 
+#ifdef ASM_SUPPORTED
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+#endif
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type);
+
 static struct pv_init_ops xen_init_ops __initdata = {
.banner = xen_banner,
 
@@ -164,6 +171,10 @@ static struct pv_init_ops xen_init_ops __initdata = {
.arch_setup_nomca = xen_arch_setup_nomca,
 
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+#ifdef ASM_SUPPORTED
+   .patch_bundle = xen_patch_bundle,
+#endif
+   .patch_branch = xen_patch_branch,
 };
 
 /***
@@ -214,6 +225,7 @@ static struct pv_patchdata xen_patchdata __initdata = {
  * intrinsics hooks.
  */
 
+#ifndef ASM_SUPPORTED
 static void
 xen_set_itm_with_offset(unsigned long val)
 {
@@ -381,6 +393,412 @@ xen_intrin_local_irq_restore(unsigned long mask)
else
xen_rsm_i();
 }
+#else
+#define __DEFINE_FUNC(name, code)  \
+   extern const char xen_ ## name ## _direct_start[];  \
+   extern const char xen_ ## name ## _direct_end[];\
+   asm (.align 32\n  \
+.proc xen_ #name \n\
+xen_ #name :\n \
+xen_ #name _direct_start:\n\
+code   \
+xen_ #name _direct_end:\n  \
+br.cond.sptk.many b6\n   \
+.endp xen_ #name \n)
+
+#define DEFINE_VOID_FUNC0(name, code)  \
+   extern void \
+   xen_ ## name (void);\
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg);   \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code)  \
+   extern void \
+   xen_ ## name (unsigned long arg0,   \
+ unsigned long arg1);  \
+   __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC0(name, code)   \
+   extern unsigned long\
+   xen_ ## name (void);\
+   __DEFINE_FUNC(name,