[PATCH V2 for-next 08/19] IB/hns: Optimize code of aeq and ceq interrupt handle and fix the bug of qpn

2016-09-20 Thread Salil Mehta
From: "Wei Hu (Xavier)" 

This patch optimized the codes of aeq and ceq interrupt handle
and fixed the bug in the calculation of qpn. For the special
qp(GSI or SMI), calculated the qp number according to physical
port and the qpn reported in the event of async event queue.

Signed-off-by: Wei Hu (Xavier) 
Signed-off-by: Salil Mehta 
---
PATCH V2: Addressed Leon Romanovsky's comment
 Link: https://lkml.org/lkml/2016/9/12/303
PATCH V1: Initial patch
---
 drivers/infiniband/hw/hns/hns_roce_eq.c | 146 
 drivers/infiniband/hw/hns/hns_roce_eq.h |   4 +
 2 files changed, 75 insertions(+), 75 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c 
b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fe..21e21b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev 
*hr_dev,
 {
struct device *dev = _dev->pdev->dev;
 
-   qpn = roce_get_field(aeqe->event.qp_event.qp,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
   HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev 
*hr_dev,
default:
break;
}
-
-   hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
-   HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-   HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
-   HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-   HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
 }
 
 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct 
hns_roce_dev *hr_dev,
 {
struct device *dev = _dev->pdev->dev;
 
-   qpn = roce_get_field(aeqe->event.qp_event.qp,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
   HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct 
hns_roce_dev *hr_dev,
default:
break;
}
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+  struct hns_roce_aeqe *aeqe,
+  int event_type)
+{
+   struct device *dev = _dev->pdev->dev;
+   int phy_port;
+   int qpn;
+
+   qpn = roce_get_field(aeqe->event.qp_event.qp,
+HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+   phy_port = roce_get_field(aeqe->event.qp_event.qp,
+   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+   if (qpn <= 1)
+   qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+   switch (event_type) {
+   case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+   dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+   break;
+   case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+   hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+   break;
+   case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+   hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+   break;
+   default:
+   break;
+   }
+
+   hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+  struct hns_roce_aeqe *aeqe,
+  int event_type)
+{
+   struct device *dev = _dev->pdev->dev;
+   u32 cqn;
+
+   cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+   HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+   HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+   switch (event_type) {
+   case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+   dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+   break;
+   case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+   dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+   break;
+   case 

[PATCH V2 for-next 08/19] IB/hns: Optimize code of aeq and ceq interrupt handle and fix the bug of qpn

2016-09-20 Thread Salil Mehta
From: "Wei Hu (Xavier)" 

This patch optimized the codes of aeq and ceq interrupt handle
and fixed the bug in the calculation of qpn. For the special
qp(GSI or SMI), calculated the qp number according to physical
port and the qpn reported in the event of async event queue.

Signed-off-by: Wei Hu (Xavier) 
Signed-off-by: Salil Mehta 
---
PATCH V2: Addressed Leon Romanovsky's comment
 Link: https://lkml.org/lkml/2016/9/12/303
PATCH V1: Initial patch
---
 drivers/infiniband/hw/hns/hns_roce_eq.c | 146 
 drivers/infiniband/hw/hns/hns_roce_eq.h |   4 +
 2 files changed, 75 insertions(+), 75 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c 
b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fe..21e21b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev 
*hr_dev,
 {
struct device *dev = _dev->pdev->dev;
 
-   qpn = roce_get_field(aeqe->event.qp_event.qp,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
   HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev 
*hr_dev,
default:
break;
}
-
-   hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
-   HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-   HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
-   HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-   HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
 }
 
 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct 
hns_roce_dev *hr_dev,
 {
struct device *dev = _dev->pdev->dev;
 
-   qpn = roce_get_field(aeqe->event.qp_event.qp,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
   HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct 
hns_roce_dev *hr_dev,
default:
break;
}
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+  struct hns_roce_aeqe *aeqe,
+  int event_type)
+{
+   struct device *dev = _dev->pdev->dev;
+   int phy_port;
+   int qpn;
+
+   qpn = roce_get_field(aeqe->event.qp_event.qp,
+HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+   phy_port = roce_get_field(aeqe->event.qp_event.qp,
+   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+   if (qpn <= 1)
+   qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+   switch (event_type) {
+   case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+   dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+   break;
+   case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+   hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+   break;
+   case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+   hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+   break;
+   default:
+   break;
+   }
+
+   hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+  struct hns_roce_aeqe *aeqe,
+  int event_type)
+{
+   struct device *dev = _dev->pdev->dev;
+   u32 cqn;
+
+   cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+   HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+   HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+   switch (event_type) {
+   case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+   dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+   break;
+   case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+   dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+   break;
+   case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+   dev_warn(dev, "CQ 0x%x ID