On Mon, Jun 13, 2016 at 11:29:13AM +1000, David Gwynne wrote:
> this is kind of like the change i just made to sparc64.
> 
> sparc created instances of inline functions for each of the splfoo
> calls. this provides an splraise call and turns splfoo into a macro
> to splraise(IPL_FOO).
> 
> the spl code is quite long so i turned it into functions instead
> of inlines.
> 
> could someone test this? i haven't got a running (walking?) sparc
> anymore.

OK getting further with this diff[1]. Going to let it run
overnight.

but one more nit below:

> Index: sparc/intr.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/sparc/sparc/intr.c,v
> retrieving revision 1.43
> diff -u -p -r1.43 intr.c
> --- sparc/intr.c      10 Dec 2015 19:48:04 -0000      1.43
> +++ sparc/intr.c      13 Jun 2016 01:27:01 -0000
> @@ -544,3 +544,70 @@ splassert_check(int wantipl, const char 
>       }
>  }
>  #endif
> +
> +int
> +spl0(void)
> +{
> +     int psr, oldipl;
> +
> +     /*
> +      * wrpsr xors two values: we choose old psr and old ipl here,
> +      * which gives us the same value as the old psr but with all
> +      * the old PIL bits turned off.
> +      */
> +     __asm volatile("rd %%psr,%0" : "=r" (psr));
> +     oldipl = psr & PSR_PIL;
> +     __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
> +
> +     /*
> +      * Three instructions must execute before we can depend
> +      * on the bits to be changed.
> +      */
> +     __asm volatile("nop; nop; nop");
> +     return (oldipl);
> +}
> +
> +int
> +splraise(int newipl)
> +{
> +     int psr, oldipl;
> +
> +     newipl <<= 8;
> +
> +     __asm volatile("rd %%psr,%0" : "=r" (psr));
> +     oldipl = psr & PSR_PIL;
> +     if (newipl <= oldipl)
> +             return oldipl;
> +
> +     psr &= ~oldipl;
> +     __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "n" (newipl));
> +     __asm volatile("nop; nop; nop");
> +     __asm volatile("":::"memory");  /* protect from reordering */ \

leftover back-slash in above line :-)


> +
> +     return (oldipl);
> +}
> +
> +int
> +splhigh(void)
> +{
> +     int psr, oldipl;
> +
> +     __asm volatile("rd %%psr,%0" : "=r" (psr));
> +     __asm volatile("wr %0,0,%%psr" : : "r" (psr | PSR_PIL));
> +     __asm volatile("and %1,%2,%0; nop; nop" : "=r" (oldipl) :
> +         "r" (psr), "n" (PSR_PIL));
> +     __asm volatile("":::"memory");  /* protect from reordering */
> +     return (oldipl);
> +}
> +
> +void
> +splx(int newipl)
> +{
> +     int psr;
> +
> +     __asm volatile("":::"memory");  /* protect from reordering */
> +     __asm volatile("rd %%psr,%0" : "=r" (psr));
> +     __asm volatile("wr %0,%1,%%psr" : :
> +         "r" (psr & ~PSR_PIL), "rn" (newipl));
> +     __asm volatile("nop; nop; nop");
> +}
> 


[1] My grep may be defective but I couldn't find anything
for sparc using:

-SPLHOLD(splausoft, IPL_AUSOFT)
-SPLHOLD(splfdsoft, IPL_FDSOFT)
-SPLHOLD(splaudio, IPL_AUHARD)

so not included.


Index: include/psl.h
===================================================================
RCS file: /cvs/obsd/src/sys/arch/sparc/include/psl.h,v
retrieving revision 1.28
diff -u -p -u -p -r1.28 psl.h
--- include/psl.h       29 Mar 2014 18:09:30 -0000      1.28
+++ include/psl.h       14 Jun 2016 07:51:32 -0000
@@ -106,9 +106,6 @@
 
 static __inline int getpsr(void);
 static __inline void setpsr(int);
-static __inline int spl0(void);
-static __inline int splhigh(void);
-static __inline void splx(int);
 static __inline int getmid(void);
 
 /*
@@ -142,28 +139,6 @@ setpsr(newpsr)
        __asm volatile("nop");
 }
 
-static __inline int
-spl0()
-{
-       int psr, oldipl;
-
-       /*
-        * wrpsr xors two values: we choose old psr and old ipl here,
-        * which gives us the same value as the old psr but with all
-        * the old PIL bits turned off.
-        */
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       oldipl = psr & PSR_PIL;
-       __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
-
-       /*
-        * Three instructions must execute before we can depend
-        * on the bits to be changed.
-        */
-       __asm volatile("nop; nop; nop");
-       return (oldipl);
-}
-
 #ifdef DIAGNOSTIC
 /*
  * Although this function is implemented in MI code, it must be in this MD
@@ -183,83 +158,24 @@ void splassert_check(int, const char *);
 #define splsoftassert(wantipl) do { /* nada */ } while (0)
 #endif
 
-/*
- * PIL 1 through 14 can use this macro.
- * (spl0 and splhigh are special since they put all 0s or all 1s
- * into the ipl field.)
- */
-#define        SPL(name, newipl) \
-static __inline int name(void); \
-static __inline int name() \
-{ \
-       int psr, oldipl; \
-       __asm volatile("rd %%psr,%0" : "=r" (psr)); \
-       oldipl = psr & PSR_PIL; \
-       psr &= ~oldipl; \
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr), "n" ((newipl) << 8)); \
-       __asm volatile("nop; nop; nop"); \
-       __asm volatile("":::"memory");  /* protect from reordering */ \
-       return (oldipl); \
-}
-/* A non-priority-decreasing version of SPL */
-#define        SPLHOLD(name, newipl) \
-static __inline int name(void); \
-static __inline int name() \
-{ \
-       int psr, oldipl; \
-       __asm volatile("rd %%psr,%0" : "=r" (psr)); \
-       oldipl = psr & PSR_PIL; \
-       if ((newipl << 8) <= oldipl) \
-               return oldipl; \
-       psr &= ~oldipl; \
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr), "n" ((newipl) << 8)); \
-       __asm volatile("nop; nop; nop"); \
-       __asm volatile("":::"memory");  /* protect from reordering */ \
-       return (oldipl); \
-}
-
-SPLHOLD(splsoftint, IPL_SOFTINT)
-#define        splsoftclock            splsoftint
-#define        splsoftnet              splsoftint
-SPLHOLD(splausoft, IPL_AUSOFT)
-SPLHOLD(splfdsoft, IPL_FDSOFT)
-SPLHOLD(splbio, IPL_BIO)
-SPLHOLD(splnet, IPL_NET)
-SPLHOLD(spltty, IPL_TTY)
-SPLHOLD(splvm, IPL_VM)
-SPLHOLD(splclock, IPL_CLOCK)
-SPLHOLD(splfd, IPL_FD)
-SPLHOLD(splzs, IPL_ZS)
-SPLHOLD(splaudio, IPL_AUHARD)
-SPLHOLD(splsched, IPL_SCHED)
-SPLHOLD(splstatclock, IPL_STATCLOCK)
-
-static __inline int splhigh()
-{
-       int psr, oldipl;
-
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       __asm volatile("wr %0,0,%%psr" : : "r" (psr | PSR_PIL));
-       __asm volatile("and %1,%2,%0; nop; nop" : "=r" (oldipl) : \
-           "r" (psr), "n" (PSR_PIL));
-       __asm volatile("":::"memory");  /* protect from reordering */
-       return (oldipl);
-}
+#define splsoftint()   splraise(IPL_SOFTINT)
+#define splsoftclock   splsoftint
+#define splsoftnet     splsoftint
+#define splbio()       splraise(IPL_BIO)
+#define splnet()       splraise(IPL_NET)
+#define spltty()       splraise(IPL_TTY)
+#define splvm()                splraise(IPL_VM)
+#define splclock()     splraise(IPL_CLOCK)
+#define splfd()                splraise(IPL_FD)
+#define splzs()                splraise(IPL_ZS)
+#define splsched()     splraise(IPL_SCHED)
+#define splstatclock() splraise(IPL_STATCLOCK)
+
+int    spl0(void);
+int    splraise(int);
+int    splhigh(void);
+void   splx(int);
 
-/* splx does not have a return value */
-static __inline void splx(newipl)
-       int newipl;
-{
-       int psr;
-
-       __asm volatile("":::"memory");  /* protect from reordering */
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr & ~PSR_PIL), "rn" (newipl));
-       __asm volatile("nop; nop; nop");
-}
 #endif /* KERNEL && !_LOCORE */
 
 #endif /* PSR_IMPL */

Reply via email to