This patch is not for commit.  But for review, commnets.

Current itlb/dtlb miss handlers don't handle a tlb miss
in xen/ia64 identity mapping area.
Such a miss might occur because xen/ia64 enables vhpt walker
even for region 7. (Please see set_one_rr())
I haven't encountered it yet, I'm guessing so from reading code.

Vti itlb/dtlb miss handlers do it by checking psr.vm bits.
This patch is not so optimized for review,
but it should be done later.


# HG changeset patch
# User [EMAIL PROTECTED]
# Node ID 07055de7ed1525110eefe5914847f0fc6002e232
# Parent  cfa3b96b056debee0d86249bd74fb717b43ea84b
make itlb/dtlb miss handler to handle xen/ia64 identity mapping area.
xen/ia64 enables vhpt walker for all regions unlink Linux.
So tlb misses on identity mapping area are catched by
itlb/dtlb miss handler, not alt itlb/dltb miss handler.
vmx_ivt.S already has such tweaks by checking psr.vm bit.

TODO: optimization
      tlb miss handlers are performance critical.

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r cfa3b96b056d -r 07055de7ed15 xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S   Fri Jan 13 14:58:41 2006 -0600
+++ b/xen/arch/ia64/xen/ivt.S   Thu Jan 26 21:48:35 2006 +0900
@@ -245,12 +245,47 @@
        DBG_FAULT(1)
 #ifdef XEN
        VHPT_CCHAIN_LOOKUP(itlb_miss,i)
+#if VHPT_ENABLED       
+       // XXX TODO optimization
+       mov r31=pr                              // save predicates
+       mov r30=cr.ipsr
+       mov r28=cr.iip
+       mov r16=cr.ifa                          // get virtual address
+       mov r17=cr.isr                          // save predicates
+       ;;
+       extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2  // extract psr.cpl
+       ;; 
+       cmp.ne p6, p0 = r0, r18                 // cpl == 0?
+(p6)   br.cond.sptk 1f
+
+       // Is the faulted iip in vmm area?
+       // check [59:58] bit
+       // 00, 11: guest
+       // 01, 10: vmm
+       extr.u r19 = r28, 58, 2
+       ;; 
+       cmp.eq p7, p0 = 0x0, r19
+(p7)   br.cond.sptk 1f
+       cmp.eq p8, p0 = 0x3, r19
+(p8)   br.cond.sptk 1f
+
+       // Is the faulted address is in the identity mapping area?
+       // 0xf000... or 0xe8000...
+       extr.u r20 = r16, 59, 5
+       ;; 
+       cmp.eq p9, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+(p9)   br.cond.spnt late_alt_itlb_miss
+       cmp.eq p10, p0 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+(p10)  br.cond.spnt late_alt_itlb_miss
+1:     
+#endif
 #ifdef VHPT_GLOBAL
 //     br.cond.sptk page_fault
        br.cond.sptk fast_tlb_miss_reflect
        ;;
 #endif
-#endif
+       mov r29=b0                              // save b0
+#else
        /*
         * The ITLB handler accesses the L3 PTE via the virtually mapped linear
         * page table.  If a nested TLB miss occurs, we switch into physical
@@ -260,6 +295,7 @@
        mov r16=cr.ifa                          // get virtual address
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
+#endif
 .itlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
        movl r30=1f                             // load nested fault 
continuation point
@@ -297,12 +333,47 @@
        DBG_FAULT(2)
 #ifdef XEN
        VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
+#if VHPT_ENABLED
+       // XXX TODO optimization
+       mov r31=pr                              // save predicates
+       mov r30=cr.ipsr
+       mov r28=cr.iip
+       mov r16=cr.ifa                          // get virtual address
+       mov r17=cr.isr                          // save predicates
+       ;; 
+       extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2  // extract psr.cpl
+       ;; 
+       cmp.ne p6, p0 = r0, r18                 // cpl == 0?
+(p6)   br.cond.sptk 1f
+
+       // Is the faulted iip in vmm area?
+       // check [59:58] bit
+       // 00, 11: guest
+       // 01, 10: vmm
+       extr.u r19 = r28, 58, 2
+       ;; 
+       cmp.eq p7, p0 = 0x0, r19
+(p7)   br.cond.sptk 1f
+       cmp.eq p8, p0 = 0x3, r19
+(p8)   br.cond.sptk 1f
+
+       // Is the faulted address is in the identity mapping area?
+       // 0xf000... or 0xe8000...
+       extr.u r20 = r16, 59, 5
+       ;; 
+       cmp.eq p9, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+(p9)   br.cond.spnt late_alt_dtlb_miss
+       cmp.eq p10, p0 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+(p10)  br.cond.spnt late_alt_dtlb_miss
+1:
+#endif 
 #ifdef VHPT_GLOBAL
 //     br.cond.sptk page_fault
        br.cond.sptk fast_tlb_miss_reflect
        ;;
 #endif
-#endif
+       mov r29=b0                              // save b0
+#else  
        /*
         * The DTLB handler accesses the L3 PTE via the virtually mapped linear
         * page table.  If a nested TLB miss occurs, we switch into physical
@@ -312,6 +383,7 @@
        mov r16=cr.ifa                          // get virtual address
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
+#endif
 dtlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
        movl r30=1f                             // load nested fault 
continuation point


-- 
yamahata
# HG changeset patch
# User [EMAIL PROTECTED]
# Node ID 07055de7ed1525110eefe5914847f0fc6002e232
# Parent  cfa3b96b056debee0d86249bd74fb717b43ea84b
make itlb/dtlb miss handler to handle xen/ia64 identity mapping area.
xen/ia64 enables vhpt walker for all regions unlink Linux.
So tlb misses on identity mapping area are catched by
itlb/dtlb miss handler, not alt itlb/dltb miss handler.
vmx_ivt.S already has such tweaks by checking psr.vm bit.

TODO: optimization
      tlb miss handlers are performance critical.

Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]>

diff -r cfa3b96b056d -r 07055de7ed15 xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S   Fri Jan 13 14:58:41 2006 -0600
+++ b/xen/arch/ia64/xen/ivt.S   Thu Jan 26 21:48:35 2006 +0900
@@ -245,12 +245,47 @@
        DBG_FAULT(1)
 #ifdef XEN
        VHPT_CCHAIN_LOOKUP(itlb_miss,i)
+#if VHPT_ENABLED       
+       // XXX TODO optimization
+       mov r31=pr                              // save predicates
+       mov r30=cr.ipsr
+       mov r28=cr.iip
+       mov r16=cr.ifa                          // get virtual address
+       mov r17=cr.isr                          // save predicates
+       ;;
+       extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2  // extract psr.cpl
+       ;; 
+       cmp.ne p6, p0 = r0, r18                 // cpl == 0?
+(p6)   br.cond.sptk 1f
+
+       // Is the faulted iip in vmm area?
+       // check [59:58] bit
+       // 00, 11: guest
+       // 01, 10: vmm
+       extr.u r19 = r28, 58, 2
+       ;; 
+       cmp.eq p7, p0 = 0x0, r19
+(p7)   br.cond.sptk 1f
+       cmp.eq p8, p0 = 0x3, r19
+(p8)   br.cond.sptk 1f
+
+       // Is the faulted address is in the identity mapping area?
+       // 0xf000... or 0xe8000...
+       extr.u r20 = r16, 59, 5
+       ;; 
+       cmp.eq p9, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+(p9)   br.cond.spnt late_alt_itlb_miss
+       cmp.eq p10, p0 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+(p10)  br.cond.spnt late_alt_itlb_miss
+1:     
+#endif
 #ifdef VHPT_GLOBAL
 //     br.cond.sptk page_fault
        br.cond.sptk fast_tlb_miss_reflect
        ;;
 #endif
-#endif
+       mov r29=b0                              // save b0
+#else
        /*
         * The ITLB handler accesses the L3 PTE via the virtually mapped linear
         * page table.  If a nested TLB miss occurs, we switch into physical
@@ -260,6 +295,7 @@
        mov r16=cr.ifa                          // get virtual address
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
+#endif
 .itlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
        movl r30=1f                             // load nested fault 
continuation point
@@ -297,12 +333,47 @@
        DBG_FAULT(2)
 #ifdef XEN
        VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
+#if VHPT_ENABLED
+       // XXX TODO optimization
+       mov r31=pr                              // save predicates
+       mov r30=cr.ipsr
+       mov r28=cr.iip
+       mov r16=cr.ifa                          // get virtual address
+       mov r17=cr.isr                          // save predicates
+       ;; 
+       extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2  // extract psr.cpl
+       ;; 
+       cmp.ne p6, p0 = r0, r18                 // cpl == 0?
+(p6)   br.cond.sptk 1f
+
+       // Is the faulted iip in vmm area?
+       // check [59:58] bit
+       // 00, 11: guest
+       // 01, 10: vmm
+       extr.u r19 = r28, 58, 2
+       ;; 
+       cmp.eq p7, p0 = 0x0, r19
+(p7)   br.cond.sptk 1f
+       cmp.eq p8, p0 = 0x3, r19
+(p8)   br.cond.sptk 1f
+
+       // Is the faulted address is in the identity mapping area?
+       // 0xf000... or 0xe8000...
+       extr.u r20 = r16, 59, 5
+       ;; 
+       cmp.eq p9, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+(p9)   br.cond.spnt late_alt_dtlb_miss
+       cmp.eq p10, p0 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+(p10)  br.cond.spnt late_alt_dtlb_miss
+1:
+#endif 
 #ifdef VHPT_GLOBAL
 //     br.cond.sptk page_fault
        br.cond.sptk fast_tlb_miss_reflect
        ;;
 #endif
-#endif
+       mov r29=b0                              // save b0
+#else  
        /*
         * The DTLB handler accesses the L3 PTE via the virtually mapped linear
         * page table.  If a nested TLB miss occurs, we switch into physical
@@ -312,6 +383,7 @@
        mov r16=cr.ifa                          // get virtual address
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
+#endif
 dtlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
        movl r30=1f                             // load nested fault 
continuation point
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to