diff -Naur linux-2.6.13/arch/ia64/Kconfig linux-2.6.13-Xenpatch/arch/ia64/Kconfig
--- linux-2.6.13/arch/ia64/Kconfig	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/Kconfig	2005-08-29 10:39:15.000000000 -0600
@@ -46,6 +46,13 @@
 	bool
 	default y
 
+config XEN
+	bool
+	default n
+	help
+	  Enable Xen hypervisor support.  Resulting kernel runs
+	  both as a guest OS on Xen and natively on hardware.
+
 config SCHED_NO_NO_OMIT_FRAME_POINTER
 	bool
 	default y
diff -Naur linux-2.6.13/arch/ia64/kernel/entry.S linux-2.6.13-Xenpatch/arch/ia64/kernel/entry.S
--- linux-2.6.13/arch/ia64/kernel/entry.S	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/kernel/entry.S	2005-08-29 10:41:55.000000000 -0600
@@ -181,7 +181,7 @@
  *	called.  The code starting at .map relies on this.  The rest of the code
  *	doesn't care about the interrupt masking status.
  */
-GLOBAL_ENTRY(ia64_switch_to)
+GLOBAL_ENTRY(__ia64_switch_to)
 	.prologue
 	alloc r16=ar.pfs,1,0,0,0
 	DO_SAVE_SWITCH_STACK
@@ -235,7 +235,7 @@
 	;;
 	itr.d dtr[r25]=r23		// wire in new mapping...
 	br.cond.sptk .done
-END(ia64_switch_to)
+END(__ia64_switch_to)
 
 /*
  * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
@@ -376,7 +376,7 @@
  *	- b7 holds address to return to
  *	- must not touch r8-r11
  */
-ENTRY(load_switch_stack)
+GLOBAL_ENTRY(load_switch_stack)
 	.prologue
 	.altrp b7
 
@@ -488,7 +488,7 @@
 	 * because some system calls (such as ia64_execve) directly
 	 * manipulate ar.pfs.
 	 */
-GLOBAL_ENTRY(ia64_trace_syscall)
+GLOBAL_ENTRY(__ia64_trace_syscall)
 	PT_REGS_UNWIND_INFO(0)
 	/*
 	 * We need to preserve the scratch registers f6-f11 in case the system
@@ -558,7 +558,7 @@
 (p6)	mov r10=-1
 (p6)	mov r8=r9
 	br.cond.sptk .strace_save_retval
-END(ia64_trace_syscall)
+END(__ia64_trace_syscall)
 
 	/*
 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
@@ -611,8 +611,11 @@
 	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
 	mov r10=r0				// clear error indication in r10
 (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
+	;;
+	// don't fall through, ia64_leave_syscall may be #define'd
+	br.cond.sptk.few ia64_leave_syscall
+	;;
 END(ia64_ret_from_syscall)
-	// fall through
 /*
  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
  *	need to switch to bank 0 and doesn't restore the scratch registers.
@@ -657,7 +660,7 @@
  *	      ar.csd: cleared
  *	      ar.ssd: cleared
  */
-ENTRY(ia64_leave_syscall)
+GLOBAL_ENTRY(__ia64_leave_syscall)
 	PT_REGS_UNWIND_INFO(0)
 	/*
 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -765,7 +768,7 @@
 	mov.m ar.ssd=r0			// M2   clear ar.ssd
 	mov f11=f0			// F    clear f11
 	br.cond.sptk.many rbs_switch	// B
-END(ia64_leave_syscall)
+END(__ia64_leave_syscall)
 
 #ifdef CONFIG_IA32_SUPPORT
 GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
@@ -777,10 +780,13 @@
 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
 	.mem.offset 8,0
 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
+	;;
+	// don't fall through, ia64_leave_kernel may be #define'd
+	br.cond.sptk.few ia64_leave_kernel
+	;;
 END(ia64_ret_from_ia32_execve)
-	// fall through
 #endif /* CONFIG_IA32_SUPPORT */
-GLOBAL_ENTRY(ia64_leave_kernel)
+GLOBAL_ENTRY(__ia64_leave_kernel)
 	PT_REGS_UNWIND_INFO(0)
 	/*
 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -1125,7 +1131,7 @@
 	ld8 r10=[r3]
 	br.cond.sptk.many .work_processed_syscall	// re-check
 
-END(ia64_leave_kernel)
+END(__ia64_leave_kernel)
 
 ENTRY(handle_syscall_error)
 	/*
@@ -1165,7 +1171,7 @@
 	 * be set up by the caller.  We declare 8 input registers so the system call
 	 * args get preserved, in case we need to restart a system call.
 	 */
-ENTRY(notify_resume_user)
+GLOBAL_ENTRY(notify_resume_user)
 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
 	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
 	mov r9=ar.unat
diff -Naur linux-2.6.13/arch/ia64/kernel/head.S linux-2.6.13-Xenpatch/arch/ia64/kernel/head.S
--- linux-2.6.13/arch/ia64/kernel/head.S	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/kernel/head.S	2005-08-29 10:39:15.000000000 -0600
@@ -370,6 +370,10 @@
 
 	// This is executed by the bootstrap processor (bsp) only:
 
+#ifdef CONFIG_XEN
+	br.call.sptk.many rp=early_xen_setup
+	;;
+#endif
 #ifdef CONFIG_IA64_FW_EMU
 	// initialize PAL & SAL emulator:
 	br.call.sptk.many rp=sys_fw_init
diff -Naur linux-2.6.13/arch/ia64/kernel/pal.S linux-2.6.13-Xenpatch/arch/ia64/kernel/pal.S
--- linux-2.6.13/arch/ia64/kernel/pal.S	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/kernel/pal.S	2005-08-29 10:39:15.000000000 -0600
@@ -16,6 +16,7 @@
 #include <asm/processor.h>
 
 	.data
+	.globl pal_entry_point
 pal_entry_point:
 	data8 ia64_pal_default_handler
 	.text
@@ -53,7 +54,7 @@
  * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
  *
  */
-GLOBAL_ENTRY(ia64_pal_call_static)
+GLOBAL_ENTRY(__ia64_pal_call_static)
 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
 	alloc loc1 = ar.pfs,5,5,0,0
 	movl loc2 = pal_entry_point
@@ -90,7 +91,7 @@
 	;;
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
-END(ia64_pal_call_static)
+END(__ia64_pal_call_static)
 
 /*
  * Make a PAL call using the stacked registers calling convention.
diff -Naur linux-2.6.13/arch/ia64/kernel/setup.c linux-2.6.13-Xenpatch/arch/ia64/kernel/setup.c
--- linux-2.6.13/arch/ia64/kernel/setup.c	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/kernel/setup.c	2005-08-29 10:42:39.000000000 -0600
@@ -287,6 +287,10 @@
 {
 	int earlycons = 0;
 
+#ifdef CONFIG_XEN
+	if (!early_xen_console_setup(cmdline))
+		earlycons++;
+#endif
 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
 	{
 		extern int sn_serial_console_early_setup(void);
diff -Naur linux-2.6.13/arch/ia64/Makefile linux-2.6.13-Xenpatch/arch/ia64/Makefile
--- linux-2.6.13/arch/ia64/Makefile	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/arch/ia64/Makefile	2005-08-29 10:39:15.000000000 -0600
@@ -57,6 +57,7 @@
 core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
+core-$(CONFIG_XEN)		+= arch/ia64/xen/
 
 drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
diff -Naur linux-2.6.13/include/asm-ia64/gcc_intrin.h linux-2.6.13-Xenpatch/include/asm-ia64/gcc_intrin.h
--- linux-2.6.13/include/asm-ia64/gcc_intrin.h	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/include/asm-ia64/gcc_intrin.h	2005-08-29 10:39:15.000000000 -0600
@@ -26,7 +26,7 @@
 
 register unsigned long ia64_r13 asm ("r13") __attribute_used__;
 
-#define ia64_setreg(regnum, val)						\
+#define __ia64_setreg(regnum, val)						\
 ({										\
 	switch (regnum) {							\
 	    case _IA64_REG_PSR_L:						\
@@ -55,7 +55,7 @@
 	}									\
 })
 
-#define ia64_getreg(regnum)							\
+#define __ia64_getreg(regnum)							\
 ({										\
 	__u64 ia64_intri_res;							\
 										\
@@ -92,7 +92,7 @@
 
 #define ia64_hint_pause 0
 
-#define ia64_hint(mode)						\
+#define __ia64_hint(mode)						\
 ({								\
 	switch (mode) {						\
 	case ia64_hint_pause:					\
@@ -374,7 +374,7 @@
 
 #define ia64_invala() asm volatile ("invala" ::: "memory")
 
-#define ia64_thash(addr)							\
+#define __ia64_thash(addr)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
@@ -394,18 +394,18 @@
 
 #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
 
-#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+#define __ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
 
-#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+#define __ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
 
 
-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
+#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"			\
 					     :: "r"(trnum), "r"(addr) : "memory")
 
-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
+#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"			\
 					     :: "r"(trnum), "r"(addr) : "memory")
 
-#define ia64_tpa(addr)								\
+#define __ia64_tpa(addr)							\
 ({										\
 	__u64 ia64_pa;								\
 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
@@ -415,22 +415,22 @@
 #define __ia64_set_dbr(index, val)						\
 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
 
-#define ia64_set_ibr(index, val)						\
+#define __ia64_set_ibr(index, val)						\
 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
 
-#define ia64_set_pkr(index, val)						\
+#define __ia64_set_pkr(index, val)						\
 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
 
-#define ia64_set_pmc(index, val)						\
+#define __ia64_set_pmc(index, val)						\
 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
 
-#define ia64_set_pmd(index, val)						\
+#define __ia64_set_pmd(index, val)						\
 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
 
-#define ia64_set_rr(index, val)							\
+#define __ia64_set_rr(index, val)							\
 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
 
-#define ia64_get_cpuid(index)								\
+#define __ia64_get_cpuid(index)								\
 ({											\
 	__u64 ia64_intri_res;								\
 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
@@ -444,21 +444,21 @@
 	ia64_intri_res;								\
 })
 
-#define ia64_get_ibr(index)							\
+#define __ia64_get_ibr(index)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
 	ia64_intri_res;								\
 })
 
-#define ia64_get_pkr(index)							\
+#define __ia64_get_pkr(index)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
 	ia64_intri_res;								\
 })
 
-#define ia64_get_pmc(index)							\
+#define __ia64_get_pmc(index)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
@@ -466,48 +466,48 @@
 })
 
 
-#define ia64_get_pmd(index)							\
+#define __ia64_get_pmd(index)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
 	ia64_intri_res;								\
 })
 
-#define ia64_get_rr(index)							\
+#define __ia64_get_rr(index)							\
 ({										\
 	__u64 ia64_intri_res;							\
 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
 	ia64_intri_res;								\
 })
 
-#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
 
 
 #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
 
-#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
-#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define __ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define __ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
 #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
 #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
 
-#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
+#define __ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
 
-#define ia64_ptcga(addr, size)							\
+#define __ia64_ptcga(addr, size)							\
 do {										\
 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
 	ia64_dv_serialize_data();						\
 } while (0)
 
-#define ia64_ptcl(addr, size)							\
+#define __ia64_ptcl(addr, size)							\
 do {										\
 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
 	ia64_dv_serialize_data();						\
 } while (0)
 
-#define ia64_ptri(addr, size)						\
+#define __ia64_ptri(addr, size)						\
 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
 
-#define ia64_ptrd(addr, size)						\
+#define __ia64_ptrd(addr, size)						\
 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
 
 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
@@ -589,7 +589,7 @@
         }								\
 })
 
-#define ia64_intrin_local_irq_restore(x)			\
+#define __ia64_intrin_local_irq_restore(x)			\
 do {								\
 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
 		      "(p6) ssm psr.i;"				\
@@ -598,4 +598,6 @@
 		      :: "r"((x)) : "p6", "p7", "memory");	\
 } while (0)
 
+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
+
 #endif /* _ASM_IA64_GCC_INTRIN_H */
diff -Naur linux-2.6.13/include/asm-ia64/intel_intrin.h linux-2.6.13-Xenpatch/include/asm-ia64/intel_intrin.h
--- linux-2.6.13/include/asm-ia64/intel_intrin.h	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/include/asm-ia64/intel_intrin.h	2005-08-29 10:39:15.000000000 -0600
@@ -119,10 +119,10 @@
 		 	 * intrinsic
 		 	 */
 
-#define ia64_getreg		__getReg
-#define ia64_setreg		__setReg
+#define __ia64_getreg		__getReg
+#define __ia64_setreg		__setReg
 
-#define ia64_hint(x)
+#define __ia64_hint(x)
 
 #define ia64_mux1_brcst	 0
 #define ia64_mux1_mix		 8
@@ -135,16 +135,16 @@
 #define ia64_getf_exp		__getf_exp
 #define ia64_shrp		_m64_shrp
 
-#define ia64_tpa		__tpa
+#define __ia64_tpa		__tpa
 #define ia64_invala		__invala
 #define ia64_invala_gr		__invala_gr
 #define ia64_invala_fr		__invala_fr
 #define ia64_nop		__nop
 #define ia64_sum		__sum
-#define ia64_ssm		__ssm
+#define __ia64_ssm		__ssm
 #define ia64_rum		__rum
-#define ia64_rsm		__rsm
-#define ia64_fc 		__fc
+#define __ia64_rsm		__rsm
+#define __ia64_fc 		__fc
 
 #define ia64_ldfs		__ldfs
 #define ia64_ldfd		__ldfd
@@ -182,24 +182,24 @@
 
 #define __ia64_set_dbr(index, val)	\
 		__setIndReg(_IA64_REG_INDR_DBR, index, val)
-#define ia64_set_ibr(index, val)	\
+#define __ia64_set_ibr(index, val)	\
 		__setIndReg(_IA64_REG_INDR_IBR, index, val)
-#define ia64_set_pkr(index, val)	\
+#define __ia64_set_pkr(index, val)	\
 		__setIndReg(_IA64_REG_INDR_PKR, index, val)
-#define ia64_set_pmc(index, val)	\
+#define __ia64_set_pmc(index, val)	\
 		__setIndReg(_IA64_REG_INDR_PMC, index, val)
-#define ia64_set_pmd(index, val)	\
+#define __ia64_set_pmd(index, val)	\
 		__setIndReg(_IA64_REG_INDR_PMD, index, val)
-#define ia64_set_rr(index, val)	\
+#define __ia64_set_rr(index, val)	\
 		__setIndReg(_IA64_REG_INDR_RR, index, val)
 
-#define ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
+#define __ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
 #define __ia64_get_dbr(index) 	__getIndReg(_IA64_REG_INDR_DBR, index)
-#define ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
-#define ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
-#define ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
-#define ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
-#define ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
+#define __ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
+#define __ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
+#define __ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
+#define __ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
+#define __ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
 
 #define ia64_srlz_d		__dsrlz
 #define ia64_srlz_i		__isrlz
@@ -218,18 +218,18 @@
 #define ia64_ld8_acq		__ld8_acq
 
 #define ia64_sync_i		__synci
-#define ia64_thash		__thash
-#define ia64_ttag		__ttag
-#define ia64_itcd		__itcd
-#define ia64_itci		__itci
-#define ia64_itrd		__itrd
-#define ia64_itri		__itri
-#define ia64_ptce		__ptce
-#define ia64_ptcl		__ptcl
-#define ia64_ptcg		__ptcg
-#define ia64_ptcga		__ptcga
-#define ia64_ptri		__ptri
-#define ia64_ptrd		__ptrd
+#define __ia64_thash		__thash
+#define __ia64_ttag		__ttag
+#define __ia64_itcd		__itcd
+#define __ia64_itci		__itci
+#define __ia64_itrd		__itrd
+#define __ia64_itri		__itri
+#define __ia64_ptce		__ptce
+#define __ia64_ptcl		__ptcl
+#define __ia64_ptcg		__ptcg
+#define __ia64_ptcga		__ptcga
+#define __ia64_ptri		__ptri
+#define __ia64_ptrd		__ptrd
 #define ia64_dep_mi		_m64_dep_mi
 
 /* Values for lfhint in __lfetch and __lfetch_fault */
@@ -244,14 +244,16 @@
 #define ia64_lfetch_fault	__lfetch_fault
 #define ia64_lfetch_fault_excl	__lfetch_fault_excl
 
-#define ia64_intrin_local_irq_restore(x)		\
+#define __ia64_intrin_local_irq_restore(x)		\
 do {							\
 	if ((x) != 0) {					\
-		ia64_ssm(IA64_PSR_I);			\
+		__ia64_ssm(IA64_PSR_I);			\
 		ia64_srlz_d();				\
 	} else {					\
-		ia64_rsm(IA64_PSR_I);			\
+		__ia64_rsm(IA64_PSR_I);			\
 	}						\
 } while (0)
 
+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
+
 #endif /* _ASM_IA64_INTEL_INTRIN_H */
diff -Naur linux-2.6.13/include/asm-ia64/pal.h linux-2.6.13-Xenpatch/include/asm-ia64/pal.h
--- linux-2.6.13/include/asm-ia64/pal.h	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/include/asm-ia64/pal.h	2005-08-29 10:39:15.000000000 -0600
@@ -79,6 +79,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
+#include <asm/processor.h>
 #include <asm/fpu.h>
 
 /*
diff -Naur linux-2.6.13/include/asm-ia64/privop.h linux-2.6.13-Xenpatch/include/asm-ia64/privop.h
--- linux-2.6.13/include/asm-ia64/privop.h	1969-12-31 17:00:00.000000000 -0700
+++ linux-2.6.13-Xenpatch/include/asm-ia64/privop.h	2005-08-29 10:39:15.000000000 -0600
@@ -0,0 +1,60 @@
+#ifndef _ASM_IA64_PRIVOP_H
+#define _ASM_IA64_PRIVOP_H
+
+/*
+ * Copyright (C) 2005 Hewlett-Packard Co
+ *	Dan Magenheimer <dan.magenheimer@hp.com>
+ *
+ */
+
+#include <linux/config.h>
+#ifdef CONFIG_XEN
+#include <asm/xen/privop.h>
+#endif
+
+#ifndef __ASSEMBLY
+
+#ifndef IA64_PARAVIRTUALIZED
+
+#define ia64_getreg			__ia64_getreg
+#define ia64_setreg			__ia64_setreg
+#define ia64_hint			__ia64_hint
+#define ia64_thash			__ia64_thash
+#define ia64_itci			__ia64_itci
+#define ia64_itcd			__ia64_itcd
+#define ia64_itri			__ia64_itri
+#define ia64_itrd			__ia64_itrd
+#define ia64_tpa			__ia64_tpa
+#define ia64_set_ibr			__ia64_set_ibr
+#define ia64_set_pkr			__ia64_set_pkr
+#define ia64_set_pmc			__ia64_set_pmc
+#define ia64_set_pmd			__ia64_set_pmd
+#define ia64_set_rr			__ia64_set_rr
+#define ia64_get_cpuid			__ia64_get_cpuid
+#define ia64_get_ibr			__ia64_get_ibr
+#define ia64_get_pkr			__ia64_get_pkr
+#define ia64_get_pmc			__ia64_get_pmc
+#define ia64_get_pmd			__ia64_get_pmd
+#define ia64_get_rr			__ia64_get_rr
+#define ia64_fc				__ia64_fc
+#define ia64_ssm			__ia64_ssm
+#define ia64_rsm			__ia64_rsm
+#define ia64_ptce			__ia64_ptce
+#define ia64_ptcga			__ia64_ptcga
+#define ia64_ptcl			__ia64_ptcl
+#define ia64_ptri			__ia64_ptri
+#define ia64_ptrd			__ia64_ptrd
+#define	ia64_get_psr_i			__ia64_get_psr_i
+#define ia64_intrin_local_irq_restore	__ia64_intrin_local_irq_restore
+#define ia64_pal_halt_light		__ia64_pal_halt_light
+#define	ia64_leave_kernel		__ia64_leave_kernel
+#define	ia64_leave_syscall		__ia64_leave_syscall
+#define	ia64_trace_syscall		__ia64_trace_syscall
+#define	ia64_switch_to			__ia64_switch_to
+#define	ia64_pal_call_static		__ia64_pal_call_static
+
+#endif /* !IA64_PARAVIRTUALIZED */
+
+#endif /* !__ASSEMBLY */
+
+#endif /* _ASM_IA64_PRIVOP_H */
diff -Naur linux-2.6.13/include/asm-ia64/processor.h linux-2.6.13-Xenpatch/include/asm-ia64/processor.h
--- linux-2.6.13/include/asm-ia64/processor.h	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/include/asm-ia64/processor.h	2005-08-29 10:39:15.000000000 -0600
@@ -19,6 +19,7 @@
 #include <asm/kregs.h>
 #include <asm/ptrace.h>
 #include <asm/ustack.h>
+#include <asm/privop.h>
 
 /* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
 #define ARCH_HAS_SCHED_DOMAIN
diff -Naur linux-2.6.13/include/asm-ia64/system.h linux-2.6.13-Xenpatch/include/asm-ia64/system.h
--- linux-2.6.13/include/asm-ia64/system.h	2005-08-28 17:41:01.000000000 -0600
+++ linux-2.6.13-Xenpatch/include/asm-ia64/system.h	2005-08-29 10:39:15.000000000 -0600
@@ -124,7 +124,7 @@
 #define __local_irq_save(x)			\
 do {						\
 	ia64_stop();				\
-	(x) = ia64_getreg(_IA64_REG_PSR);	\
+	(x) = ia64_get_psr_i();			\
 	ia64_stop();				\
 	ia64_rsm(IA64_PSR_I);			\
 } while (0)
@@ -172,7 +172,7 @@
 #endif /* !CONFIG_IA64_DEBUG_IRQ */
 
 #define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
-#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
+#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_get_psr_i(); })
 
 #define irqs_disabled()				\
 ({						\
