The location of the ERROR and BUSY status bits depends on the descriptor
index, i.e. the CPU, of the message. We determine this location ahead of
the wait_completion loop to avoid repeating the calculation.

Split out the status location calculation into a new routine,
status_mmr_loc, to be used within each uv*_wait_completion routine.

Signed-off-by: Andrew Banman <[email protected]>
Acked-by: Mike Travis <[email protected]>
---
 arch/x86/platform/uv/tlb_uv.c | 41 +++++++++++++++++++++++++----------------
 1 file changed, 25 insertions(+), 16 deletions(-)

Index: community/arch/x86/platform/uv/tlb_uv.c
===================================================================
--- community.orig/arch/x86/platform/uv/tlb_uv.c
+++ community/arch/x86/platform/uv/tlb_uv.c
@@ -533,6 +533,22 @@ static inline void end_uvhub_quiesce(str
        atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
 }
 
+/*
+ * The ERROR and BUSY status registers are located pairwise over the STATUS_0
+ * and STATUS_1 mmrs; each an array[32] of 2 bits. Given CPU desc, determine
+ * the correct mmr and index for the message status.
+ */
+void status_mmr_loc(unsigned long *mmr, int *index, int desc)
+{
+       if (desc < UV_CPUS_PER_AS) {
+               *mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+               *index = desc * UV_ACT_STATUS_SIZE;
+       } else {
+               *mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+               *index = (desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE;
+       }
+}
+
 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
 {
        unsigned long descriptor_status;
@@ -548,13 +564,16 @@ static unsigned long uv1_read_status(uns
  * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
  */
 static int uv1_wait_completion(struct bau_desc *bau_desc,
-                               unsigned long mmr_offset, int right_shift,
                                struct bau_control *bcp, long try)
 {
        unsigned long descriptor_status;
+       unsigned long mmr_offset;
+       int right_shift;
+       int desc = bcp->uvhub_cpu;
        cycles_t ttm;
        struct ptc_stats *stat = bcp->statp;
 
+       status_mmr_loc(&mmr_offset, &right_shift, desc);
        descriptor_status = uv1_read_status(mmr_offset, right_shift);
        /* spin on the status MMR, waiting for it to go idle */
        while ((descriptor_status != DS_IDLE)) {
@@ -640,15 +659,17 @@ int handle_uv2_busy(struct bau_control *
 }
 
 static int uv2_3_wait_completion(struct bau_desc *bau_desc,
-                               unsigned long mmr_offset, int right_shift,
                                struct bau_control *bcp, long try)
 {
        unsigned long descriptor_stat;
+       unsigned long mmr_offset;
        cycles_t ttm;
        int desc = bcp->uvhub_cpu;
+       int right_shift;
        long busy_reps = 0;
        struct ptc_stats *stat = bcp->statp;
 
+       status_mmr_loc(&mmr_offset, &right_shift, desc);
        descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
 
        /* spin on the status MMR, waiting for it to go idle */
@@ -712,22 +733,10 @@ static int uv2_3_wait_completion(struct
  */
 static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, 
long try)
 {
-       int right_shift;
-       unsigned long mmr_offset;
-       int desc = bcp->uvhub_cpu;
-
-       if (desc < UV_CPUS_PER_AS) {
-               mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
-               right_shift = desc * UV_ACT_STATUS_SIZE;
-       } else {
-               mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
-               right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
-       }
-
        if (bcp->uvhub_version == 1)
-               return uv1_wait_completion(bau_desc, mmr_offset, right_shift, 
bcp, try);
+               return uv1_wait_completion(bau_desc, bcp, try);
        else
-               return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, 
bcp, try);
+               return uv2_3_wait_completion(bau_desc, bcp, try);
 }
 
 /*

Reply via email to