The changeover to per cpu MCA/INIT stacks appears to be working.
However we still treat them as if we had one MCA stack and no INIT
stack, IOW we are still restricted to one MCA or INIT event at a time.
The (incomplete and work in progress) patch below aims to really use
the per cpu MCA/INIT stacks, my target is :-
* Handle concurrent MCA events on multiple cpus.
* Handle concurrent INIT events on multiple cpus, both monarch and
slave.
* Cope with nested events, to diagnose MCA events in INIT handlers.
* Give decent traceback through MCA and INIT events, for both online
(kdb, gdb) and after the event (lcrash) diagnosis.
* Recover from INIT as well as MCA events. This includes INIT events
that were sent because an MCA hit some disabled cpus.
Although the patch is incomplete, it will compile. It is big enough
and there are enough unanswered questions that I am throwing this open
for abuse^Wreview. Ignore the restore path for returning to the
original stack, that will be filled in once the entry path is
satisfactory.
The patch is bigger than I wanted because I had to remove the static
SAL to OS and OS to SAL states, making them per event instead. That
caused a lot of small changes in mca.c and mca_drv.c, to pass the
structure instead of using a single static version. There is enough
overlap between SAL to OS and OS to SAL to make it worth consolidating
them into a single SAL/OS state structure.
proc_state_dump has gone completely. The MCA/INIT handler should only
need to save the registers that the kernel changes, IOW struct pt_regs
is all that is required. There may be a couple of other variables that
need to be saved, such as previous values of the IA64_KR registers,
those still need to be added.
The rse_ variables and macros have been removed, together with the
separate stackframe. They are replaced by the standard RBS switch
information in struct pt_regs.
Outstanding questions:
* What does "current" mean when we have normal stacks plus MCA and INIT
stacks? Does current point to the original stack, or does it point
to the current MCA/INIT stack. My preference is to point current at
the current MCA/INIT stack, if that means copying the current state
from the previous stack then so be it.
* Does the MCA/INIT stack need to be wired into IA64_KR_CURRENT_STACK
and IA64_TR_CURRENT_STACK? If it does, what happens to access to the
original stack?
* Does the code in ia64_pt_regs_save() look sensible? In particular
are the NaT handling and the RBS switch correct?
Still to do: The INIT path. Unwinding. Finish the restore path.
arch/ia64/kernel/asm-offsets.c | 40 +
arch/ia64/kernel/mca.c | 65 --
arch/ia64/kernel/mca_asm.S | 1045 +++++++++++++++++------------------------
arch/ia64/kernel/mca_drv.c | 37 -
include/asm-ia64/mca.h | 62 +-
include/asm-ia64/mca_asm.h | 128 +----
6 files changed, 571 insertions(+), 806 deletions(-)
Index: linux/include/asm-ia64/mca.h
===================================================================
--- linux.orig/include/asm-ia64/mca.h 2005-02-15 13:27:38.000000000 +1100
+++ linux/include/asm-ia64/mca.h 2005-02-15 13:27:42.000000000 +1100
@@ -11,8 +11,6 @@
#ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H
-#define IA64_MCA_STACK_SIZE 8192
-
#if !defined(__ASSEMBLY__)
#include <linux/interrupt.h>
@@ -63,18 +61,26 @@ typedef struct ia64_mc_info_s {
} ia64_mc_info_t;
-typedef struct ia64_mca_sal_to_os_state_s {
- u64 imsto_os_gp; /* GP of the os registered with
the SAL */
- u64 imsto_pal_proc; /* PAL_PROC entry point -
physical addr */
- u64 imsto_sal_proc; /* SAL_PROC entry point -
physical addr */
- u64 imsto_sal_gp; /* GP of the SAL - physical */
- u64 imsto_rendez_state; /* Rendez state information */
- u64 imsto_sal_check_ra; /* Return address in SAL_CHECK
while going
- * back to SAL from OS after
MCA handling.
- */
- u64 pal_min_state; /* from PAL in r17 */
- u64 proc_state_param; /* from PAL in r18. See SDV
2:268 11.3.2.1 */
-} ia64_mca_sal_to_os_state_t;
+/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
+ * Note: mca_asm.S depends on the precise layout of this structure.
+ */
+
+struct ia64_sal_os_state {
+ /* SAL to OS, must be at offset 0 */
+ u64 os_gp; /* GP of the os
registered with the SAL, physical */
+ u64 pal_proc; /* PAL_PROC entry
point, physical */
+ u64 sal_proc; /* SAL_PROC entry
point, physical */
+ u64 rendez_state; /* Rendez state
information */
+ u64 proc_state_param; /* from R18 */
+ /* common, must follow SAL to OS */
+ u64 sal_ra; /* Return address in
SAL, physical */
+ u64 sal_gp; /* GP of the SAL -
physical */
+ pal_min_state_area_t *pal_min_state; /* from R17. physical
in asm, virtual in C */
+ /* OS to SAL, must follow common */
+ u64 os_status; /* OS status to SAL,
enum below */
+ u64 context; /* 0 if return to same
context
+ 1 if return to new
context */
+};
enum {
IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by
OS_MCA */
@@ -88,29 +94,10 @@ enum {
IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context
*/
};
-typedef struct ia64_mca_os_to_sal_state_s {
- u64 imots_os_status; /* OS status to SAL as to
what happened
- * with the MCA handling.
- */
- u64 imots_sal_gp; /* GP of the SAL - physical */
- u64 imots_context; /* 0 if return to same context
- 1 if return to new context */
- u64 *imots_new_min_state; /* Pointer to structure
containing
- * new values of registers in
the min state
- * save area.
- */
- u64 imots_sal_check_ra; /* Return address in SAL_CHECK
while going
- * back to SAL from OS after
MCA handling.
- */
-} ia64_mca_os_to_sal_state_t;
-
/* Per-CPU MCA state that is too big for normal per-CPU variables. */
struct ia64_mca_cpu {
- u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */
- u64 proc_state_dump[512];
- u64 stackframe[32];
- u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
+ u64 mca_stack[KERNEL_STACK_SIZE/8];
u64 init_stack[KERNEL_STACK_SIZE/8];
} __attribute__ ((aligned(16)));
@@ -121,11 +108,14 @@ extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
-extern void ia64_mca_ucmc_handler(void);
+extern void ia64_mca_ucmc_handler(struct ia64_sal_os_state *);
+extern void ia64_init_handler(struct pt_regs *,
+ struct switch_stack *,
+ struct ia64_sal_os_state *);
extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void);
extern void ia64_mca_cmc_vector_setup(void);
-extern int ia64_reg_MCA_extension(void*);
+extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state
*));
extern void ia64_unreg_MCA_extension(void);
#endif /* !__ASSEMBLY__ */
Index: linux/arch/ia64/kernel/mca_asm.S
===================================================================
--- linux.orig/arch/ia64/kernel/mca_asm.S 2005-02-15 13:27:38.000000000
+1100
+++ linux/arch/ia64/kernel/mca_asm.S 2005-02-15 13:27:42.000000000 +1100
@@ -16,6 +16,9 @@
// 04/11/12 Russ Anderson <[EMAIL PROTECTED]>
// Added per cpu MCA/INIT stack save areas.
//
+// 05/02/13 Keith Owens <[EMAIL PROTECTED]>
+// Use per cpu MCA/INIT stack save areas for all data.
+//
#include <linux/config.h>
#include <linux/threads.h>
@@ -39,68 +42,6 @@
#include "minstate.h"
-/*
- * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
- * 1. GR1 = OS GP
- * 2. GR8 = PAL_PROC physical address
- * 3. GR9 = SAL_PROC physical address
- * 4. GR10 = SAL GP (physical)
- * 5. GR11 = Rendez state
- * 6. GR12 = Return address to location within SAL_CHECK
- */
-#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
- LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
- st8 [_tmp]=r1,0x08;; \
- st8 [_tmp]=r8,0x08;; \
- st8 [_tmp]=r9,0x08;; \
- st8 [_tmp]=r10,0x08;; \
- st8 [_tmp]=r11,0x08;; \
- st8 [_tmp]=r12,0x08;; \
- st8 [_tmp]=r17,0x08;; \
- st8 [_tmp]=r18,0x08
-
-/*
- * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
- * (p6) is executed if we never entered virtual mode (TLB error)
- * (p7) is executed if we entered virtual mode as expected (normal case)
- * 1. GR8 = OS_MCA return status
- * 2. GR9 = SAL GP (physical)
- * 3. GR10 = 0/1 returning same/new context
- * 4. GR22 = New min state save area pointer
- * returns ptr to SAL rtn save loc in _tmp
- */
-#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
- movl _tmp=ia64_os_to_sal_handoff_state;; \
- DATA_VA_TO_PA(_tmp);; \
- ld8 r8=[_tmp],0x08;; \
- ld8 r9=[_tmp],0x08;; \
- ld8 r10=[_tmp],0x08;; \
- ld8 r22=[_tmp],0x08;;
- // now _tmp is pointing to SAL rtn save location
-
-/*
- * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
- * imots_os_status=IA64_MCA_COLD_BOOT
- * imots_sal_gp=SAL GP
- * imots_context=IA64_MCA_SAME_CONTEXT
- * imots_new_min_state=Min state save area pointer
- * imots_sal_check_ra=Return address to location within SAL_CHECK
- *
- */
-#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
- movl tmp=IA64_MCA_COLD_BOOT; \
- movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
- movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],48;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- movl tmp=IA64_MCA_SAME_CONTEXT;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff],-8;; \
- st8 [os_to_sal_handoff]=tmp,8;; \
- ld8 tmp=[sal_to_os_handoff];; \
- st8 [os_to_sal_handoff]=tmp;;
-
#define GET_IA64_MCA_DATA(reg) \
GET_THIS_PADDR(reg, ia64_mca_data) \
;; \
@@ -108,14 +49,12 @@
.global ia64_os_mca_dispatch
.global ia64_os_mca_dispatch_end
- .global ia64_sal_to_os_handoff_state
- .global ia64_os_to_sal_handoff_state
.text
.align 16
ia64_os_mca_dispatch:
-
+ .prologue
// Serialize all MCA processing
mov r3=1;;
LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
@@ -124,24 +63,17 @@ ia64_os_mca_spin:
cmp.ne p6,p0=r4,r0
(p6) br ia64_os_mca_spin
- // Save the SAL to OS MCA handoff state as defined
- // by SAL SPEC 3.0
- // NOTE : The order in which the state gets saved
- // is dependent on the way the C-structure
- // for ia64_mca_sal_to_os_state_t has been
- // defined in include/asm/mca.h
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
-
- // LOG PROCESSOR STATE INFO FROM HERE ON..
-begin_os_mca_dump:
- br ia64_os_mca_proc_state_dump;;
-
-ia64_os_mca_done_dump:
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_sal_os_state_save // save the SAL to OS MCA
handoff state
+1:
- LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+ GET_IA64_MCA_DATA(r2)
+ // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
+ ;;
+ add
r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
r2
;;
- ld8 r18=[r16] // Get processor state parameter on existing
PALE_CHECK.
+ ld8 r18=[r3] // Get processor state
parameter on existing PALE_CHECK.
;;
tbit.nz p6,p7=r18,60
(p7) br.spnt done_tlb_purge_and_reload
@@ -305,65 +237,56 @@ begin_tlb_purge_and_reload:
itr.d dtr[r20]=r16
;;
srlz.d
- ;;
- br.sptk.many done_tlb_purge_and_reload
-err:
- COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
- br.sptk.many ia64_os_mca_done_restore
done_tlb_purge_and_reload:
- // Setup new stack frame for OS_MCA handling
- GET_IA64_MCA_DATA(r2)
- ;;
- add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
- add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
- ;;
- rse_switch_context(r6,r3,r2);; // RSC management in this new context
-
- GET_IA64_MCA_DATA(r2)
- ;;
- add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
- ;;
- mov r12=r2 // establish new stack-pointer
+ // create struct pt_regs from min_state, switch stack
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_pt_regs_save
+1:
+
+ // everything saved, now we can set the kernel registers
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_set_kernel_registers
+1:
// Enter virtual mode from physical mode
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
ia64_os_mca_virtual_begin:
- // Call virtual mode handler
- movl r2=ia64_mca_ucmc_handler;;
- mov b6=r2;;
- br.call.sptk.many b0=b6;;
-.ret0:
- // Revert back to physical mode before going back to SAL
- PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
-ia64_os_mca_virtual_end:
+ mov ar.rsc=3 // set eager mode for C handler
- // restore the original stack frame here
+ // Call virtual mode handler
+ alloc r14=ar.pfs,0,0,1,0
+ .body
GET_IA64_MCA_DATA(r2)
;;
- add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
- ;;
- movl r4=IA64_PSR_MC
+ add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
;;
- rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
+ DATA_PA_TO_VA(out0,r2)
+ br.call.sptk.many b0=ia64_mca_ucmc_handler;;
- // let us restore all the registers from our PSI structure
- mov r8=gp
- ;;
-begin_os_mca_restore:
- br ia64_os_mca_proc_state_restore;;
+ // Revert back to physical mode before going back to SAL
+ PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
+ia64_os_mca_virtual_end:
-ia64_os_mca_done_restore:
- OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
- // branch back to SALE_CHECK
- ld8 r3=[r2];;
- mov b0=r3;; // SAL_CHECK return address
+ // restore struct pt_regs (kernel modified registers), switch back to
+ // previous stack
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_pt_regs_restore
+1:
+
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_sal_os_state_restore // restore the SAL to OS MCA
handoff state
+1:
+ mov b0=r12;; // SAL_CHECK return address
// release lock
- movl r3=ia64_mca_serialize;;
- DATA_VA_TO_PA(r3);;
+ LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
st8.rel [r3]=r0
br b0
@@ -374,471 +297,403 @@ ia64_os_mca_dispatch_end:
//++
// Name:
-// ia64_os_mca_proc_state_dump()
+// ia64_sal_os_state_save()
//
// Stub Description:
//
-// This stub dumps the processor state during MCHK to a data area
+// Save the SAL/OS state. This is sensitive to the layout of struct
+// ia64_sal_os_state in mca.h.
+//
+// r2 contains the return address, r3 contains either
+// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+// The OS to SAL section of struct ia64_sal_os_state is set to a default
+// value of cold boot and return to the same context.
//
//--
-ia64_os_mca_proc_state_dump:
-// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
-// to virtual addressing mode.
+ia64_sal_os_state_save:
+ mov b0=r2 // save return address
GET_IA64_MCA_DATA(r2)
+ add r3=MCA_SOS_OFFSET, r3
+ ;;
+ add r3=r2, r3 // struct ia64_sal_os_state on MCA or INIT stack
+ ;;
+ st8 [r3]=r1,8 // OS GP, physical
+ ;;
+ st8 [r3]=r8,8 // PAL_PROC, physical
+ ;;
+ st8 [r3]=r9,8 // SAL_PROC, physical
+ ;;
+ st8 [r3]=r11,8 // rendezvous state
+ ;;
+ st8 [r3]=r18,8 // processor state parameter
+ ;;
+ st8 [r3]=r12,8 // SAL return address, physical
+ ;;
+ st8 [r3]=r10,8 // SAL GP, physical
+ ;;
+ st8 [r3]=r17,8 // PAL min_state, physical in asm, virtual in C
+ mov r6=IA64_MCA_COLD_BOOT
;;
- add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+ st8 [r3]=r6,8 // set default OS status
+ mov r7=IA64_MCA_SAME_CONTEXT
;;
-// save ar.NaT
- mov r5=ar.unat // ar.unat
+ st8 [r3]=r7,8 // set default OS context
+ br.sptk b0
-// save banked GRs 16-31 along with NaT bits
- bsw.1;;
- st8.spill [r2]=r16,8;;
- st8.spill [r2]=r17,8;;
- st8.spill [r2]=r18,8;;
- st8.spill [r2]=r19,8;;
- st8.spill [r2]=r20,8;;
- st8.spill [r2]=r21,8;;
- st8.spill [r2]=r22,8;;
- st8.spill [r2]=r23,8;;
- st8.spill [r2]=r24,8;;
- st8.spill [r2]=r25,8;;
- st8.spill [r2]=r26,8;;
- st8.spill [r2]=r27,8;;
- st8.spill [r2]=r28,8;;
- st8.spill [r2]=r29,8;;
- st8.spill [r2]=r30,8;;
- st8.spill [r2]=r31,8;;
-
- mov r4=ar.unat;;
- st8 [r2]=r4,8 // save User NaT bits for
r16-r31
- mov ar.unat=r5 // restore original unat
- bsw.0;;
-
-//save BRs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r4
-
- mov r3=b0
- mov r5=b1
- mov r7=b2;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=b3
- mov r5=b4
- mov r7=b5;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=b6
- mov r5=b7;;
- st8 [r2]=r3,2*8
- st8 [r4]=r5,2*8;;
-
-cSaveCRs:
-// save CRs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r4
-
- mov r3=cr.dcr
- mov r5=cr.itm
- mov r7=cr.iva;;
-
- st8 [r2]=r3,8*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;; // 48 byte rements
-
- mov r3=cr.pta;;
- st8 [r2]=r3,8*8;; // 64 byte rements
-
-// if PSR.ic=0, reading interruption registers causes an illegal operation
fault
- mov r3=psr;;
- tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos.
test
-(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
-begin_skip_intr_regs:
-(p6) br SkipIntrRegs;;
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=cr.ipsr
- mov r5=cr.isr
- mov r7=r0;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.iip
- mov r5=cr.ifa
- mov r7=cr.itir;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.iipa
- mov r5=cr.ifs
- mov r7=cr.iim;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr25;; // cr.iha
- st8 [r2]=r3,160;; // 160 byte rement
-
-SkipIntrRegs:
- st8 [r2]=r0,152;; // another 152 byte .
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=cr.lid
-// mov r5=cr.ivr // cr.ivr, don't read it
- mov r7=cr.tpr;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=r0 // cr.eoi => cr67
- mov r5=r0 // cr.irr0 => cr68
- mov r7=r0;; // cr.irr1 => cr69
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=r0 // cr.irr2 => cr70
- mov r5=r0 // cr.irr3 => cr71
- mov r7=cr.itv;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=cr.pmv
- mov r5=cr.cmcv;;
- st8 [r2]=r3,7*8
- st8 [r4]=r5,7*8;;
-
- mov r3=r0 // cr.lrr0 => cr80
- mov r5=r0;; // cr.lrr1 => cr81
- st8 [r2]=r3,23*8
- st8 [r4]=r5,23*8;;
-
- adds r2=25*8,r2;;
-
-cSaveARs:
-// save ARs
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2 // duplicate r2 in r6
-
- mov r3=ar.k0
- mov r5=ar.k1
- mov r7=ar.k2;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.k3
- mov r5=ar.k4
- mov r7=ar.k5;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.k6
- mov r5=ar.k7
- mov r7=r0;; // ar.kr8
- st8 [r2]=r3,10*8
- st8 [r4]=r5,10*8
- st8 [r6]=r7,10*8;; // rement by 72 bytes
-
- mov r3=ar.rsc
- mov ar.rsc=r0 // put RSE in
enforced lazy mode
- mov r5=ar.bsp
- ;;
- mov r7=ar.bspstore;;
- st8 [r2]=r3,3*8
- st8 [r4]=r5,3*8
- st8 [r6]=r7,3*8;;
-
- mov r3=ar.rnat;;
- st8 [r2]=r3,8*13 // increment by 13x8 bytes
-
- mov r3=ar.ccv;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.unat;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.fpsr;;
- st8 [r2]=r3,8*4
-
- mov r3=ar.itc;;
- st8 [r2]=r3,160 // 160
-
- mov r3=ar.pfs;;
- st8 [r2]=r3,8
-
- mov r3=ar.lc;;
- st8 [r2]=r3,8
-
- mov r3=ar.ec;;
- st8 [r2]=r3
- add r2=8*62,r2 //padding
-
-// save RRs
- mov ar.lc=0x08-1
- movl r4=0x00;;
-
-cStRR:
- dep.z r5=r4,61,3;;
- mov r3=rr[r5];;
- st8 [r2]=r3,8
- add r4=1,r4
- br.cloop.sptk.few cStRR
+//EndStub//////////////////////////////////////////////////////////////////////
+
+
+//++
+// Name:
+// ia64_sal_os_state_restore()
+//
+// Stub Description:
+//
+// Restore the SAL/OS state. This is sensitive to the layout of struct
+// ia64_sal_os_state in mca.h.
+//
+// r2 contains the return address, r3 contains either
+// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//--
+
+ia64_sal_os_state_restore:
+ mov b0=r2 // save return address
+ GET_IA64_MCA_DATA(r2)
+ add r3=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_COMMON_OFFSET, r3
+ ;;
+ add r3=r2, r3 // common struct ia64_sal_os_state on MCA or
INIT stack
+ ;;
+ ld8 r12=[r3],8 // SAL return address, physical
+ ;;
+ ld8 r9=[r3],8 // SAL GP, physical
+ ;;
+ ld8 r22=[r3],8 // pal_min_state
+ ;;
+ ld8 r8=[r3],8 // OS status
;;
-end_os_mca_dump:
- br ia64_os_mca_done_dump;;
+ ld8 r10=[r3],8 // OS context
+ br.sptk b0
//EndStub//////////////////////////////////////////////////////////////////////
//++
// Name:
-// ia64_os_mca_proc_state_restore()
+// ia64_pt_regs_save()
//
// Stub Description:
//
-// This is a stub to restore the saved processor state during MCHK
+// Create a struct pt_regs from the current registers plus the contents
+// of pal_min_state.
//
+// r2 contains the return address, r3 contains either
+// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+// On entry, sos->pal_min_state is physical, on exit it is virtual.
+//
+// On entry RBS is still on the original stack, this routine switches RBS
+// to use the MCA/INIT stack. MCA/INIT handlers are entered in RSE lazy
+// mode, with cover already issued.
+//
//--
-ia64_os_mca_proc_state_restore:
+#define ms r4
+#define regs r5
+#define save_unat r6
+#define temp1 r2 /* careful, it overlaps with input
registers */
+#define temp2 r3 /* careful, it overlaps with input
registers */
+#define temp3 r7
+#define temp4 r8
-// Restore bank1 GR16-31
+ia64_pt_regs_save:
+ add regs=MCA_PT_REGS_OFFSET, r3
+ add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
+ mov b0=r2 // save return address
+ GET_IA64_MCA_DATA(temp1)
+ ;;
+ add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on
MCA or INIT stack
+ add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
+ ;;
+ // Address of minstate area provided by PAL is physical, uncacheable.
+ // Convert to Linux virtual address in region 6 for C code.
+ ld8 ms=[temp2] // pal_min_state, physical
+ ;;
+ dep temp1=-1,ms,62,2 // set region 6
+ ;;
+ st8 [temp2]=temp1 // pal_min_state, virtual
+
+ // copy the ar, cr, pr, br entries
+ add temp1=IA64_PMSA_PR_OFFSET, ms
+ add temp2=IA64_PT_REGS_PR_OFFSET, regs
+ ;;
+ ld8 temp3=[temp1],8 // pmsa_pr
+ ;;
+ st8 [temp2]=temp3
+ add temp2=IA64_PT_REGS_B0_OFFSET, regs
+ ld8 temp4=[temp1],8 // pmsa_br0
+ ;;
+ st8 [temp2]=temp4
+ add temp2=IA64_PT_REGS_AR_RSC_OFFSET, regs
+ ld8 temp3=[temp1],8 // pmsa_rsc
+ ;;
+ st8 [temp2]=temp3
+ // if ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use pmsa_{xip,xpsr,xfs}
+ // we just copied pmsa_rsc, so temp1 contains &pmsa_iip.
+ add temp2=IA64_PMSA_IPSR_OFFSET, ms
+ ;;
+ ld8 temp3=[temp2] // pmsa_ipsr
+ ;;
+ tbit.z p6,p0=temp3, 13 // test ipsr.ic
+ ;;
+(p6) add temp1=IA64_PMSA_XIP_OFFSET-IA64_PMSA_IIP_OFFSET, temp1
+ ;;
+ ld8 temp3=[temp1],8 // save iip or xip
+ add temp2=IA64_PT_REGS_CR_IIP_OFFSET, regs
+ ;;
+ st8 [temp2]=temp3
+ ld8 temp4=[temp1],8 // save ispr or xpsr
+ add temp2=IA64_PT_REGS_CR_IPSR_OFFSET, regs
+ ;;
+ st8 [temp2]=temp4
+ ld8 temp3=[temp1],8 // save ifs or xfs
+ add temp2=IA64_PT_REGS_CR_IFS_OFFSET, regs
+ ;;
+ st8 [temp2]=temp3
+ mov temp3=ar.unat
+ add temp2=IA64_PT_REGS_AR_UNAT_OFFSET, regs
+ ;;
+ st8 [temp2]=temp3 // save ar.unat
+ mov temp3=ar.pfs
+ add temp2=IA64_PT_REGS_AR_PFS_OFFSET, regs
+ ;;
+ st8 [temp2]=temp3 // save ar.pfs
+ mov temp3=ar.bspstore
+ add temp2=IA64_PT_REGS_AR_BSPSTORE_OFFSET, regs
+ ;;
+ extr.u temp4=temp3, 61, 3 // region from old ar.bspstore
+ ;;
+ cmp.ne p6,p0=temp4, r0 // is old ar.bspstore a virtual
address?
+(p6) br.cond.dpnt 1f
+ DATA_PA_TO_VA(temp3,temp4) // no, C code needs a virtual
address
+ ;;
+1: st8 [temp2]=temp3 // save ar.bspstore
+ mov temp3=ar.rnat
+ add temp2=IA64_PT_REGS_AR_RNAT_OFFSET, regs
+ ;;
+ st8 [temp2]=temp3 // save ar.rnat
+ mov temp1=-MCA_PT_REGS_OFFSET+IA64_RBS_OFFSET
+ ;;
+ add temp1=temp1, regs // step to RBS offset in
current stack
+ add temp2=IA64_PT_REGS_LOADRS_OFFSET, regs
+ ;;
+ mov ar.bspstore=temp1 // switch RBS to MCA/INIT stack
+ ;;
+ mov temp3=ar.bsp
+ ;;
+ sub temp3=temp3, temp1 // ar.bsp - ar.bspstore
+ ;;
+ shl temp3=temp3,16 // compute ar.rsc to be used
for "loadrs"
+ ;;
+ st8 [temp2]=temp3 // save loadrs
+
+ // copy the general registers that are not done by SAVE_REST
+ mov save_unat=ar.unat // save current unat
+ ld8 temp1=[ms] // pmsa_nat_bits
+ ;;
+ mov ar.unat=temp1 // load NaT bits for
registers in pal_min_state
+ add temp1=IA64_PMSA_GR_OFFSET+(1-1)*8, ms //
&pal_min_state_area.pmsa_gr[1-1], saved GR1
+ add temp2=IA64_PMSA_GR_OFFSET+(2-1)*8, ms //
&pal_min_state_area.pmsa_gr[2-1], saved GR2
+ add temp3=IA64_PMSA_GR_OFFSET+(3-1)*8, ms //
&pal_min_state_area.pmsa_gr[3-1], saved GR3
+ ;;
+ ld8.fill r1=[temp1]
+ ld8.fill r16=[temp2] // copy r2 in r16
+ ld8.fill r17=[temp3] // copy r3 in r17
+ ;;
+ add temp1=IA64_PMSA_GR_OFFSET+(8-1)*8, ms //
&pal_min_state_area.pmsa_gr[8-1], saved GR8
+ add temp2=IA64_PMSA_GR_OFFSET+(9-1)*8, ms //
&pal_min_state_area.pmsa_gr[9-1], saved GR9
+ ;;
+ ld8.fill r8=[temp1],16
+ ld8.fill r9=[temp2],16
+ ;;
+ ld8.fill r10=[temp1],16
+ ld8.fill r11=[temp2],16
+ ;;
+ ld8.fill r12=[temp1],16
+ ld8.fill r13=[temp2],16
+ ;;
+ ld8.fill r14=[temp1]
+ ld8.fill r15=[temp2]
+ ;;
+ mov ar.unat=save_unat // restore current unat
+ add temp1=IA64_PT_REGS_R8_OFFSET, regs
+ add temp2=IA64_PT_REGS_R9_OFFSET, regs
+ ;;
+.mem.offset 0,0; st8.spill [temp1]=r8,16
+.mem.offset 8,0; st8.spill [temp2]=r9,16
+ ;;
+.mem.offset 0,0; st8.spill [temp1]=r10
+.mem.offset 8,0; st8.spill [temp2]=r11
+ ;;
+ add temp1=IA64_PT_REGS_R1_OFFSET, regs
+ add temp2=IA64_PT_REGS_R12_OFFSET, regs
+ ;;
+.mem.offset 0,0; st8.spill [temp1]=r1
+.mem.offset 8,0; st8.spill [temp2]=r12
+ ;;
+ add temp1=IA64_PT_REGS_R13_OFFSET, regs
+ add temp2=IA64_PT_REGS_R14_OFFSET, regs
+ ;;
+.mem.offset 0,0; st8.spill [temp1]=r13
+.mem.offset 8,0; st8.spill [temp2]=r14
+ ;;
+ add temp1=IA64_PT_REGS_R15_OFFSET, regs
+ add temp2=IA64_PT_REGS_R2_OFFSET, regs
+ ;;
+.mem.offset 0,0; st8.spill [temp1]=r15
+.mem.offset 8,0; st8.spill [temp2]=r16,8 // actually r2
+ ;;
+ st8.spill [temp2]=r17 // actually r3
+ ;;
+
+ // restore bank 1 gr16-31 from pal_minstate, including NaT bits.
+ // SAVE_REST assumes that these registers are unchanged from the
+ // interrupt.
+
+ mov save_unat=ar.unat // save current unat
+ ld8 temp1=[ms] // pmsa_nat_bits
+ ;;
+ mov ar.unat=temp1 // load NaT bits for registers
in pal_min_state
+ add temp1=IA64_PMSA_BANK1_GR_OFFSET, ms //
&pal_min_state_area.pmsa_bank1_gr[0], saved GR16
+ add temp2=IA64_PMSA_BANK1_GR_OFFSET+8, ms //
&pal_min_state_area.pmsa_bank1_gr[1], saved GR17
+ ;;
+ ld8.fill r16=[temp1],16
+ ld8.fill r17=[temp2],16
+ ;;
+ ld8.fill r18=[temp1],16
+ ld8.fill r19=[temp2],16
+ ;;
+ ld8.fill r20=[temp1],16
+ ld8.fill r21=[temp2],16
+ ;;
+ ld8.fill r22=[temp1],16
+ ld8.fill r23=[temp2],16
+ ;;
+ ld8.fill r24=[temp1],16
+ ld8.fill r25=[temp2],16
+ ;;
+ ld8.fill r26=[temp1],16
+ ld8.fill r27=[temp2],16
+ ;;
+ ld8.fill r28=[temp1],16
+ ld8.fill r29=[temp2],16
+ ;;
+ ld8.fill r30=[temp1],16
+ ld8.fill r31=[temp2],16
+ ;;
+ mov ar.unat=save_unat // restore current unat
+
+ // get ready for SAVE_REST
+ add temp1=IA64_PT_REGS_R16_OFFSET, regs
+ add temp2=IA64_PT_REGS_R17_OFFSET, regs
+ mov r8=ar.ccv
+ mov r9=ar.csd
+ mov r10=ar.ssd
+ movl r11=FPSR_DEFAULT
+ ;;
+ SAVE_REST
+
+ br.sptk b0
+
+//EndStub//////////////////////////////////////////////////////////////////////
+
+
+//++
+// Name:
+// ia64_pt_regs_restore()
+//
+// Stub Description:
+//
+// Restore the modified kernel registers from the struct pt_regs that was
+// created from the entry state plus pal_min_state.
+//
+// r2 contains the return address, r3 contains either
+// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+// On entry, pal_min_state is virtual, on exit it is physical.
+//
+// On entry RBS is on the MCA/INIT stack, this routine switches RBS
+// back to the previous stack.
+//
+//--
+
+ia64_pt_regs_restore:
+ mov b0=r2 // save return address
GET_IA64_MCA_DATA(r2)
+ add r3=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
+ ;;
+ add r3=r2, r3 // struct ia64_sal_os_state.pal_min_state on
MCA or INIT stack
+ ;;
+ // Convert Linux virtual address in region 6 back to physical.
+ ld8 ms=[r3] // pal_min_state, virtual
;;
- add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+ DATA_VA_TO_PA(ms)
+ ;;
+ st8 [r3]=ms
+ // FIXME: restore pt_regs
+ br.sptk b0
+
+//EndStub//////////////////////////////////////////////////////////////////////
-restore_GRs: // restore bank-1 GRs 16-31
- bsw.1;;
- add r3=16*8,r2;; // to get to NaT of GR 16-31
- ld8 r3=[r3];;
- mov ar.unat=r3;; // first restore NaT
-
- ld8.fill r16=[r2],8;;
- ld8.fill r17=[r2],8;;
- ld8.fill r18=[r2],8;;
- ld8.fill r19=[r2],8;;
- ld8.fill r20=[r2],8;;
- ld8.fill r21=[r2],8;;
- ld8.fill r22=[r2],8;;
- ld8.fill r23=[r2],8;;
- ld8.fill r24=[r2],8;;
- ld8.fill r25=[r2],8;;
- ld8.fill r26=[r2],8;;
- ld8.fill r27=[r2],8;;
- ld8.fill r28=[r2],8;;
- ld8.fill r29=[r2],8;;
- ld8.fill r30=[r2],8;;
- ld8.fill r31=[r2],8;;
-
- ld8 r3=[r2],8;; // increment to skip NaT
- bsw.0;;
-
-restore_BRs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov b0=r3
- mov b1=r5
- mov b2=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov b3=r3
- mov b4=r5
- mov b5=r7;;
-
- ld8 r3=[r2],2*8
- ld8 r5=[r4],2*8;;
- mov b6=r3
- mov b7=r5;;
-
-restore_CRs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],8*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;; // 48 byte increments
- mov cr.dcr=r3
- mov cr.itm=r5
- mov cr.iva=r7;;
-
- ld8 r3=[r2],8*8;; // 64 byte increments
-// mov cr.pta=r3
-
-
-// if PSR.ic=1, reading interruption registers causes an illegal operation
fault
- mov r3=psr;;
- tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos.
test
-(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
-
-begin_rskip_intr_regs:
-(p6) br rSkipIntrRegs;;
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.ipsr=r3
-// mov cr.isr=r5 // cr.isr is read only
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.iip=r3
- mov cr.ifa=r5
- mov cr.itir=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov cr.iipa=r3
- mov cr.ifs=r5
- mov cr.iim=r7
-
- ld8 r3=[r2],160;; // 160 byte increment
- mov cr.iha=r3
-
-rSkipIntrRegs:
- ld8 r3=[r2],152;; // another 152 byte inc.
-
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r6
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
- mov cr.lid=r3
-// mov cr.ivr=r5 // cr.ivr is read only
- mov cr.tpr=r7;;
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
-// mov cr.eoi=r3
-// mov cr.irr0=r5 // cr.irr0 is read only
-// mov cr.irr1=r7;; // cr.irr1 is read only
-
- ld8 r3=[r2],8*3
- ld8 r5=[r4],8*3
- ld8 r7=[r6],8*3;;
-// mov cr.irr2=r3 // cr.irr2 is read only
-// mov cr.irr3=r5 // cr.irr3 is read only
- mov cr.itv=r7;;
-
- ld8 r3=[r2],8*7
- ld8 r5=[r4],8*7;;
- mov cr.pmv=r3
- mov cr.cmcv=r5;;
-
- ld8 r3=[r2],8*23
- ld8 r5=[r4],8*23;;
- adds r2=8*23,r2
- adds r4=8*23,r4;;
-// mov cr.lrr0=r3
-// mov cr.lrr1=r5
-
- adds r2=8*2,r2;;
-
-restore_ARs:
- add r4=8,r2 // duplicate r2 in r4
- add r6=2*8,r2;; // duplicate r2 in r4
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov ar.k0=r3
- mov ar.k1=r5
- mov ar.k2=r7;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
- mov ar.k3=r3
- mov ar.k4=r5
- mov ar.k5=r7;;
-
- ld8 r3=[r2],10*8
- ld8 r5=[r4],10*8
- ld8 r7=[r6],10*8;;
- mov ar.k6=r3
- mov ar.k7=r5
- ;;
-
- ld8 r3=[r2],3*8
- ld8 r5=[r4],3*8
- ld8 r7=[r6],3*8;;
-// mov ar.rsc=r3
-// mov ar.bsp=r5 // ar.bsp is read only
- mov ar.rsc=r0 // make sure that
RSE is in enforced lazy mode
- ;;
- mov ar.bspstore=r7;;
-
- ld8 r9=[r2],8*13;;
- mov ar.rnat=r9
-
- mov ar.rsc=r3
- ld8 r3=[r2],8*4;;
- mov ar.ccv=r3
-
- ld8 r3=[r2],8*4;;
- mov ar.unat=r3
-
- ld8 r3=[r2],8*4;;
- mov ar.fpsr=r3
-
- ld8 r3=[r2],160;; // 160
-// mov ar.itc=r3
-
- ld8 r3=[r2],8;;
- mov ar.pfs=r3
-
- ld8 r3=[r2],8;;
- mov ar.lc=r3
-
- ld8 r3=[r2];;
- mov ar.ec=r3
- add r2=8*62,r2;; // padding
-
-restore_RRs:
- mov r5=ar.lc
- mov ar.lc=0x08-1
- movl r4=0x00;;
-cStRRr:
- dep.z r7=r4,61,3
- ld8 r3=[r2],8;;
- mov rr[r7]=r3 // what are its access
previledges?
- add r4=1,r4
- br.cloop.sptk.few cStRRr
+
+//++
+// Name:
+// ia64_set_kernel_registers()
+//
+// Stub Description:
+//
+// Set the registers that are required by the kernel to run on an MCA/INIT
+// stack.
+//
+// r2 contains the return address, r3 contains either
+// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//--
+
+ia64_set_kernel_registers:
+ add temp3=MCA_SP_OFFSET, r3
+ add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
+ mov b0=r2 // save return address
+ GET_IA64_MCA_DATA(r2)
+ ;;
+ add temp4=temp4, r2 // &struct ia64_sal_os_state.os_gp
+ ;;
+ ld8 r1=[temp4] // OS GP from SAL OS state
+ add r12=r2, temp3 // kernel stack pointer on MCA/INIT stack
+ add r13=r2, r3 // set current to start of MCA/INIT stack
;;
- mov ar.lc=r5
+ DATA_PA_TO_VA(r1,temp1)
+ DATA_PA_TO_VA(r12,temp2)
+ DATA_PA_TO_VA(r13,temp3)
;;
-end_os_mca_restore:
- br ia64_os_mca_done_restore;;
+ mov IA64_KR(CURRENT)=r13
+
+ // FIXME: do I need to wire IA64_KR_CURRENT_STACK and
IA64_TR_CURRENT_STACK?
+
+ br.sptk b0
//EndStub//////////////////////////////////////////////////////////////////////
+#undef ms
+#undef regs
+#undef save_unat
+#undef temp1
+#undef temp2
+#undef temp3
+#undef temp4
+
// ok, the issue here is that we need to save state information so
// it can be useable by the kernel debugger and show regs routines.
@@ -870,51 +725,44 @@ end_os_mca_restore:
GLOBAL_ENTRY(ia64_monarch_init_handler)
.prologue
// stash the information the SAL passed to os
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
- SAVE_MIN_WITH_COVER
- ;;
- mov r8=cr.ifa
- mov r9=cr.isr
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
+ br.sptk ia64_sal_os_state_save // save the SAL to OS INIT
handoff state
+1:
+
+ // create struct pt_regs from min_state, switch stack, set kernel
registers
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
+ br.sptk ia64_pt_regs_save
+1:
+ //
+ // Enter virtual mode from physical mode
+ VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
+ia64_os_init_virtual_begin:
-// ok, enough should be saved at this point to be dangerous, and supply
-// information for a dump
-// We need to switch to Virtual mode before hitting the C functions.
+ mov ar.rsc=3 // set eager mode for C handler
- movl
r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
- mov r3=psr // get the current psr, minimum enabled at this point
- ;;
- or r2=r2,r3
- ;;
- movl r3=IVirtual_Switch
- ;;
- mov cr.iip=r3 // short return to set the appropriate bits
- mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
- ;;
- rfi
- ;;
-IVirtual_Switch:
- //
- // We should now be running virtual
- //
- // Let's call the C handler to get the rest of the state info
- //
- alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first
in insn group!)
- ;;
- adds out0=16,sp // out0 = pointer to pt_regs
- ;;
- DO_SAVE_SWITCH_STACK
- .body
- adds out1=16,sp // out0 = pointer to
switch_stack
+ // Call virtual mode handler
+ br.call.sptk.many b0=ia64_init_handler;;
- br.call.sptk.many rp=ia64_init_handler
-.ret1:
+ // Revert back to physical mode before going back to SAL
+ PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
+ia64_os_init_virtual_end:
+
+ // restore struct pt_regs (kernel modified registers), switch back to
+ // previous stack
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
+ br.sptk ia64_pt_regs_restore
+1:
+
+ movl r2=1f // return address
+ mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
+ br.sptk ia64_sal_os_state_restore // restore the SAL to OS INIT
handoff state
+1:
+ mov b0=r12;; // SAL_CHECK return address
+ br b0
-return_from_init:
- br.sptk return_from_init
END(ia64_monarch_init_handler)
//
@@ -926,3 +774,4 @@ END(ia64_monarch_init_handler)
GLOBAL_ENTRY(ia64_slave_init_handler)
1: br.sptk 1b
END(ia64_slave_init_handler)
+
Index: linux/arch/ia64/kernel/asm-offsets.c
===================================================================
--- linux.orig/arch/ia64/kernel/asm-offsets.c 2005-02-15 13:27:38.000000000
+1100
+++ linux/arch/ia64/kernel/asm-offsets.c 2005-02-15 13:27:42.000000000
+1100
@@ -211,17 +211,41 @@ void foo(void)
#endif
BLANK();
- DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
- offsetof (struct ia64_mca_cpu, proc_state_dump));
- DEFINE(IA64_MCA_CPU_STACK_OFFSET,
- offsetof (struct ia64_mca_cpu, stack));
- DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
- offsetof (struct ia64_mca_cpu, stackframe));
- DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
- offsetof (struct ia64_mca_cpu, rbstore));
+ DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
+ offsetof (struct ia64_mca_cpu, mca_stack));
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack));
BLANK();
+ DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
+ offsetof (struct ia64_sal_os_state, sal_ra));
+ DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
+ offsetof (struct ia64_sal_os_state, os_gp));
+ DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
+ offsetof (struct ia64_sal_os_state, pal_min_state));
+ DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
+ offsetof (struct ia64_sal_os_state, proc_state_param));
+ DEFINE(IA64_SAL_OS_STATE_SIZE,
+ sizeof (struct ia64_sal_os_state));
+ DEFINE(IA64_PMSA_GR_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_gr));
+ DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
+ DEFINE(IA64_PMSA_PR_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_pr));
+ DEFINE(IA64_PMSA_BR0_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_br0));
+ DEFINE(IA64_PMSA_RSC_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_rsc));
+ DEFINE(IA64_PMSA_IIP_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_iip));
+ DEFINE(IA64_PMSA_IPSR_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_ipsr));
+ DEFINE(IA64_PMSA_IFS_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_ifs));
+ DEFINE(IA64_PMSA_XIP_OFFSET,
+ offsetof (struct pal_min_state_area_s, pmsa_xip));
+ BLANK();
+
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct
time_interpolator, addr));
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct
time_interpolator, source));
Index: linux/include/asm-ia64/mca_asm.h
===================================================================
--- linux.orig/include/asm-ia64/mca_asm.h 2005-02-15 13:27:38.000000000
+1100
+++ linux/include/asm-ia64/mca_asm.h 2005-02-15 13:27:42.000000000 +1100
@@ -8,6 +8,8 @@
* Copyright (C) 2000 David Mosberger-Tang <[EMAIL PROTECTED]>
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Jenna Hall <[EMAIL PROTECTED]>
+ * Copyright (C) 2004 Silicon Graphics, Inc
+ * Copyright (C) 2004 Keith Owens <[EMAIL PROTECTED]>
*/
#ifndef _ASM_IA64_MCA_ASM_H
#define _ASM_IA64_MCA_ASM_H
@@ -207,106 +209,30 @@
;;
/*
- * The following offsets capture the order in which the
- * RSE related registers from the old context are
- * saved onto the new stack frame.
- *
- * +-----------------------+
- * |NDIRTY [BSP - BSPSTORE]|
- * +-----------------------+
- * | RNAT |
- * +-----------------------+
- * | BSPSTORE |
- * +-----------------------+
- * | IFS |
- * +-----------------------+
- * | PFS |
- * +-----------------------+
- * | RSC |
- * +-----------------------+ <-------- Bottom of new stack frame
- */
-#define rse_rsc_offset 0
-#define rse_pfs_offset (rse_rsc_offset+0x08)
-#define rse_ifs_offset (rse_pfs_offset+0x08)
-#define rse_bspstore_offset (rse_ifs_offset+0x08)
-#define rse_rnat_offset (rse_bspstore_offset+0x08)
-#define rse_ndirty_offset (rse_rnat_offset+0x08)
-
-/*
- * rse_switch_context
- *
- * 1. Save old RSC onto the new stack frame
- * 2. Save PFS onto new stack frame
- * 3. Cover the old frame and start a new frame.
- * 4. Save IFS onto new stack frame
- * 5. Save the old BSPSTORE on the new stack frame
- * 6. Save the old RNAT on the new stack frame
- * 7. Write BSPSTORE with the new backing store pointer
- * 8. Read and save the new BSP to calculate the #dirty registers
- * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
- */
-#define rse_switch_context(temp,p_stackframe,p_bspstore)
\
- ;;
\
- mov temp=ar.rsc;;
\
- st8 [p_stackframe]=temp,8;; \
- mov temp=ar.pfs;;
\
- st8 [p_stackframe]=temp,8;
\
- cover ;;
\
- mov temp=cr.ifs;;
\
- st8 [p_stackframe]=temp,8;;
\
- mov temp=ar.bspstore;;
\
- st8 [p_stackframe]=temp,8;; \
- mov temp=ar.rnat;;
\
- st8 [p_stackframe]=temp,8;
\
- mov ar.bspstore=p_bspstore;;
\
- mov temp=ar.bsp;;
\
- sub temp=temp,p_bspstore;;
\
- st8 [p_stackframe]=temp,8;;
-
-/*
- * rse_return_context
- * 1. Allocate a zero-sized frame
- * 2. Store the number of dirty registers RSC.loadrs field
- * 3. Issue a loadrs to insure that any registers from the interrupted
- * context which were saved on the new stack frame have been loaded
- * back into the stacked registers
- * 4. Restore BSPSTORE
- * 5. Restore RNAT
- * 6. Restore PFS
- * 7. Restore IFS
- * 8. Restore RSC
- * 9. Issue an RFI
- */
-#define rse_return_context(psr_mask_reg,temp,p_stackframe)
\
- ;;
\
- alloc temp=ar.pfs,0,0,0,0;
\
- add p_stackframe=rse_ndirty_offset,p_stackframe;;
\
- ld8 temp=[p_stackframe];;
\
- shl temp=temp,16;;
\
- mov ar.rsc=temp;;
\
- loadrs;;
\
- add
p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
- ld8 temp=[p_stackframe];;
\
- mov ar.bspstore=temp;;
\
- add
p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
- ld8 temp=[p_stackframe];;
\
- mov ar.rnat=temp;;
\
- add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;;
\
- ld8 temp=[p_stackframe];;
\
- mov ar.pfs=temp;;
\
- add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;;
\
- ld8 temp=[p_stackframe];;
\
- mov cr.ifs=temp;;
\
- add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;;
\
- ld8 temp=[p_stackframe];;
\
- mov ar.rsc=temp ;
\
- mov temp=psr;;
\
- or temp=temp,psr_mask_reg;;
\
- mov cr.ipsr=temp;;
\
- mov temp=ip;;
\
- add temp=0x30,temp;;
\
- mov cr.iip=temp;;
\
- srlz.i;;
\
- rfi;;
+ * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
+ * stacks, except that the SAL/OS state is stored at the top of the MCA/INIT
+ * stack. To support concurrent entry to MCA or INIT, as well as MCA over
+ * INIT, each event needs its own SAL/OS state. All entries are 16 byte
+ * aligned.
+ *
+ * +---------------------------+
+ * | SAL/OS state |
+ * +---------------------------+
+ * | pt_regs |
+ * +---------------------------+
+ * | 16 byte scratch area |
+ * +---------------------------+ <-------- SP at start of C MCA handler
+ * | ..... |
+ * +---------------------------+
+ * | RBS for MCA/INIT handler |
+ * +---------------------------+
+ * | struct task, not used yet |
+ * +---------------------------+ <-------- Bottom of MCA/INIT stack
+ */
+
+#define ALIGN16(x) ((x)&~15)
+#define MCA_SOS_OFFSET
ALIGN16(KERNEL_STACK_SIZE-IA64_SAL_OS_STATE_SIZE)
+#define MCA_PT_REGS_OFFSET
ALIGN16(MCA_SOS_OFFSET-IA64_PT_REGS_SIZE)
+#define MCA_SP_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-16)
#endif /* _ASM_IA64_MCA_ASM_H */
Index: linux/arch/ia64/kernel/mca_drv.c
===================================================================
--- linux.orig/arch/ia64/kernel/mca_drv.c 2005-02-15 13:27:38.000000000
+1100
+++ linux/arch/ia64/kernel/mca_drv.c 2005-02-15 13:27:42.000000000 +1100
@@ -4,6 +4,8 @@
*
* Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto ([EMAIL PROTECTED])
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) Keith Owens <[EMAIL PROTECTED]>
*/
#include <linux/config.h>
#include <linux/types.h>
@@ -38,10 +40,6 @@
/* max size of SAL error record (default) */
static int sal_rec_max = 10000;
-/* from mca.c */
-static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state;
-static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state;
-
/* from mca_drv_asm.S */
extern void *mca_handler_bhhook(void);
@@ -317,7 +315,8 @@ init_record_index_pools(void)
*/
static mca_type_t
-is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
+is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
+ struct ia64_sal_os_state *sos)
{
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
@@ -328,7 +327,7 @@ is_mca_global(peidx_table_t *peidx, pal_
* Therefore it is local MCA when rendezvous has not been requested.
* Failed to rendezvous, the system must be down.
*/
- switch (sal_to_os_handoff_state->imsto_rendez_state) {
+ switch (sos->rendez_state) {
case -1: /* SAL rendezvous unsuccessful */
return MCA_IS_GLOBAL;
case 0: /* SAL rendezvous not required */
@@ -389,7 +388,8 @@ is_mca_global(peidx_table_t *peidx, pal_
*/
static int
-recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx,
pal_bus_check_info_t *pbci)
+recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx,
pal_bus_check_info_t *pbci,
+ struct ia64_sal_os_state *sos)
{
sal_log_mod_error_info_t *smei;
pal_min_state_area_t *pmsa;
@@ -427,7 +427,7 @@ recover_from_read_error(slidx_table_t *s
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
- pmsa = (pal_min_state_area_t
*)(sal_to_os_handoff_state->pal_min_state | (6ul<<61));
+ pmsa = sos->pal_min_state;
/* pass to bhhook as 1st argument (gr8) */
pmsa->pmsa_gr[8-1] = smei->target_identifier;
/* set interrupted return address (but no use) */
@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *s
*/
static int
-recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
pal_bus_check_info_t *pbci)
+recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
pal_bus_check_info_t *pbci,
+ struct ia64_sal_os_state *sos)
{
int status = 0;
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_
case 1: /* partial read */
case 3: /* full line(cpu) read */
case 9: /* I/O space read */
- status = recover_from_read_error(slidx, peidx, pbci);
+ status = recover_from_read_error(slidx, peidx, pbci,
sos);
break;
case 0: /* unknown */
case 2: /* partial write */
@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_
*/
static int
-recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t
*peidx, pal_bus_check_info_t *pbci)
+recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t
*peidx, pal_bus_check_info_t *pbci,
+ struct ia64_sal_os_state *sos)
{
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
@@ -545,7 +547,7 @@ recover_from_processor_error(int platfor
* This means "there are some platform errors".
*/
if (platform)
- return recover_from_platform_error(slidx, peidx, pbci);
+ return recover_from_platform_error(slidx, peidx, pbci, sos);
/*
* On account of strange SAL error record, we cannot recover.
*/
@@ -562,8 +564,7 @@ recover_from_processor_error(int platfor
static int
mca_try_to_recover(void *rec,
- ia64_mca_sal_to_os_state_t *sal_to_os_state,
- ia64_mca_os_to_sal_state_t *os_to_sal_state)
+ struct ia64_sal_os_state *sos)
{
int platform_err;
int n_proc_err;
@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec,
peidx_table_t peidx;
pal_bus_check_info_t pbci;
- /* handoff state from/to mca.c */
- sal_to_os_handoff_state = sal_to_os_state;
- os_to_sal_handoff_state = os_to_sal_state;
-
/* Make index of SAL error record */
platform_err = mca_make_slidx(rec, &slidx);
@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec,
*((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
/* Check whether MCA is global or not */
- if (is_mca_global(&peidx, &pbci))
+ if (is_mca_global(&peidx, &pbci, sos))
return 0;
/* Try to recover a processor error */
- return recover_from_processor_error(platform_err, &slidx, &peidx,
&pbci);
+ return recover_from_processor_error(platform_err, &slidx, &peidx,
&pbci, sos);
}
/*
Index: linux/arch/ia64/kernel/mca.c
===================================================================
--- linux.orig/arch/ia64/kernel/mca.c 2005-02-15 13:27:38.000000000 +1100
+++ linux/arch/ia64/kernel/mca.c 2005-02-15 13:27:42.000000000 +1100
@@ -48,6 +48,9 @@
* Delete dead variables and functions.
* Reorder to remove the need for forward declarations and to
consolidate
* related code.
+ *
+ * 2005-02-13 Keith Owens <[EMAIL PROTECTED]>
+ * Convert MCA/INIT handlers to use per event stacks and SAL/OS
state.
*/
#include <linux/config.h>
#include <linux/types.h>
@@ -84,8 +87,6 @@
#endif
/* Used by mca_asm.S */
-ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
-ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
u64 ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -804,38 +805,24 @@ ia64_mca_wakeup_int_handler(int wakeup_i
*/
static void
-ia64_return_to_sal_check(int recover)
+ia64_return_to_sal_check(int recover, struct ia64_sal_os_state *sos)
{
-
- /* Copy over some relevant stuff from the sal_to_os_mca_handoff
- * so that it can be used at the time of os_mca_to_sal_handoff
- */
- ia64_os_to_sal_handoff_state.imots_sal_gp =
- ia64_sal_to_os_handoff_state.imsto_sal_gp;
-
- ia64_os_to_sal_handoff_state.imots_sal_check_ra =
- ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
-
if (recover)
- ia64_os_to_sal_handoff_state.imots_os_status =
IA64_MCA_CORRECTED;
+ sos->os_status = IA64_MCA_CORRECTED;
else
- ia64_os_to_sal_handoff_state.imots_os_status =
IA64_MCA_COLD_BOOT;
+ sos->os_status = IA64_MCA_COLD_BOOT;
/* Default = tell SAL to return to same context */
- ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
-
- ia64_os_to_sal_handoff_state.imots_new_min_state =
- (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
-
+ sos->context = IA64_MCA_SAME_CONTEXT;
}
/* Function pointer for extra MCA recovery */
int (*ia64_mca_ucmc_extension)
- (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
+ (void*,struct ia64_sal_os_state*)
= NULL;
int
-ia64_reg_MCA_extension(void *fn)
+ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
{
if (ia64_mca_ucmc_extension)
return 1;
@@ -867,15 +854,15 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension);
* Monarch also has the duty of sending wakeup-IPIs to pull the
* slave processors out of rendezvous spinloop.
*
- * Inputs : None
+ * Inputs : SAL/OS state for this event.
* Outputs : None
*/
void
-ia64_mca_ucmc_handler(void)
+ia64_mca_ucmc_handler(struct ia64_sal_os_state *sos)
{
pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
- &ia64_sal_to_os_handoff_state.proc_state_param;
- int recover;
+ &sos->proc_state_param;
+ int recover;
/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -883,11 +870,10 @@ ia64_mca_ucmc_handler(void)
/* TLB error is only exist in this SAL error record */
recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
/* other error recovery */
- || (ia64_mca_ucmc_extension
+ || (ia64_mca_ucmc_extension
&& ia64_mca_ucmc_extension(
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
- &ia64_sal_to_os_handoff_state,
- &ia64_os_to_sal_handoff_state));
+ sos));
if (recover) {
sal_log_record_header_t *rh =
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
@@ -901,7 +887,7 @@ ia64_mca_ucmc_handler(void)
ia64_mca_wakeup_all();
/* Return to SAL */
- ia64_return_to_sal_check(recover);
+ ia64_return_to_sal_check(recover, sos);
}
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd,
NULL);
@@ -1127,7 +1113,8 @@ ia64_mca_cpe_poll (unsigned long dummy)
*
* Called from ia64_monarch_init_handler
*
- * Inputs: pointer to pt_regs where processor info was saved.
+ * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
+ * this event.
*
* Returns:
* 0 if SAL must warm boot the System
@@ -1135,24 +1122,16 @@ ia64_mca_cpe_poll (unsigned long dummy)
*
*/
void
-ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
+ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw,
+ struct ia64_sal_os_state *sos)
{
- pal_min_state_area_t *ms;
-
oops_in_progress = 1; /* avoid deadlock in printk, but it makes
recovery dodgy */
console_loglevel = 15; /* make sure printks make it to console */
printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
- ia64_sal_to_os_handoff_state.proc_state_param);
-
- /*
- * Address of minstate area provided by PAL is physical,
- * uncacheable (bit 63 set). Convert to Linux virtual
- * address in region 6.
- */
- ms = (pal_min_state_area_t
*)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
+ sos->proc_state_param);
- init_handler_platform(ms, pt, sw); /* call platform specific
routines */
+ init_handler_platform(sos->pal_min_state, pt, sw); /* call
platform specific routines */
}
static int __init
-
To unsubscribe from this list: send the line "unsubscribe linux-ia64" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html