This commit takes care of the generated signature
error cqe generated by the HW (if happened) and stores
it on the QP signature error list.

Once the user will get the completion for the transaction
he must check for signature errors on signature memory region
using a new lightweight verb ib_check_sig_status and if such
exsists, he will get the signature error information.

In case the user will not check for signature error, i.e.
won't call ib_check_sig_status, it will not be allowed to
use the memory region for another signature operation
(REG_SIG_MR work request will fail).

The underlying mlx5 will handle signature error completions
and will mark the relevant memory region as dirty.

Signed-off-by: Sagi Grimberg <sa...@mellanox.com>
---
 drivers/infiniband/hw/mlx5/cq.c      |   53 ++++++++++++++++++++++++++++++++++
 drivers/infiniband/hw/mlx5/main.c    |    1 +
 drivers/infiniband/hw/mlx5/mlx5_ib.h |    7 ++++
 drivers/infiniband/hw/mlx5/mr.c      |   29 ++++++++++++++++++
 drivers/infiniband/hw/mlx5/qp.c      |    8 ++++-
 include/linux/mlx5/cq.h              |    1 +
 include/linux/mlx5/device.h          |   18 +++++++++++
 include/linux/mlx5/driver.h          |    4 ++
 include/linux/mlx5/qp.h              |    5 +++
 9 files changed, 124 insertions(+), 2 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 344ab03..da7605b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -351,6 +351,33 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct 
mlx5_cqe64 *cqe64,
        qp->sq.last_poll = tail;
 }
 
+static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
+                            struct ib_sig_err *item)
+{
+       u16 syndrome = be16_to_cpu(cqe->syndrome);
+
+       switch (syndrome) {
+       case 13:
+               item->err_type = IB_SIG_BAD_CRC;
+               break;
+       case 12:
+               item->err_type = IB_SIG_BAD_APPTAG;
+               break;
+       case 11:
+               item->err_type = IB_SIG_BAD_REFTAG;
+               break;
+       default:
+               break;
+       }
+
+       item->expected_guard = be32_to_cpu(cqe->expected_trans_sig) >> 16;
+       item->actual_guard = be32_to_cpu(cqe->actual_trans_sig) >> 16;
+       item->expected_logical_block = be32_to_cpu(cqe->expected_reftag);
+       item->actual_logical_block = be32_to_cpu(cqe->actual_reftag);
+       item->sig_err_offset = be64_to_cpu(cqe->err_offset);
+       item->key = be32_to_cpu(cqe->mkey);
+}
+
 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
                         struct mlx5_ib_qp **cur_qp,
                         struct ib_wc *wc)
@@ -360,12 +387,16 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
        struct mlx5_cqe64 *cqe64;
        struct mlx5_core_qp *mqp;
        struct mlx5_ib_wq *wq;
+       struct mlx5_sig_err_cqe *sig_err_cqe;
+       struct mlx5_core_mr *mmr;
+       struct mlx5_ib_mr *mr;
        uint8_t opcode;
        uint32_t qpn;
        u16 wqe_ctr;
        void *cqe;
        int idx;
 
+repoll:
        cqe = next_cqe_sw(cq);
        if (!cqe)
                return -EAGAIN;
@@ -449,6 +480,28 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
                        }
                }
                break;
+       case MLX5_CQE_SIG_ERR:
+               sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+
+               read_lock(&dev->mdev.priv.mr_table.lock);
+               mmr = __mlx5_mr_lookup(&dev->mdev,
+                                      be32_to_cpu(sig_err_cqe->mkey) & 
0xffffff00);
+               if (unlikely(!mmr)) {
+                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
+                                    cq->mcq.cqn, 
be32_to_cpu(sig_err_cqe->mkey));
+                       return -EINVAL;
+               }
+               read_unlock(&dev->mdev.priv.mr_table.lock);
+
+               mr = to_mibmr(mmr);
+
+               get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
+               mr->sig->sig_err_exists = true;
+
+               mlx5_ib_dbg(dev, "Got SIGERR on key: 0x%x\n",
+                           mr->sig->err_item.key);
+
+               goto repoll;
        }
 
        return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c 
b/drivers/infiniband/hw/mlx5/main.c
index 2e67a37..f3c7111 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1409,6 +1409,7 @@ static int init_one(struct pci_dev *pdev,
        dev->ib_dev.alloc_fast_reg_mr   = mlx5_ib_alloc_fast_reg_mr;
        dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
        dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
+       dev->ib_dev.check_sig_status    = mlx5_ib_check_sig_status;
 
        if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h 
b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 758f0e1..f175fa4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -394,6 +394,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct 
mlx5_core_qp *mqp)
        return container_of(mqp, struct mlx5_ib_qp, mqp);
 }
 
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+{
+       return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
 {
        return container_of(ibpd, struct mlx5_ib_pd, ibpd);
@@ -531,6 +536,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int 
*shift);
 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
+int mlx5_ib_check_sig_status(struct ib_mr *sig_mr,
+                            struct ib_sig_err *sig_err);
 
 static inline void init_query_mad(struct ib_smp *mad)
 {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 44f7e46..d796d60 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -967,6 +967,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
                access_mode = MLX5_ACCESS_MODE_KLM;
                mr->sig->psv_memory.psv_idx = psv_index[0];
                mr->sig->psv_wire.psv_idx = psv_index[1];
+
+               mr->sig->sig_status_checked = true;
+               mr->sig->sig_err_exists = false;
+               /* Next UMR, Arm SIGERR */
+               ++mr->sig->sigerr_count;
        }
 
        in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
@@ -1114,3 +1119,27 @@ void mlx5_ib_free_fast_reg_page_list(struct 
ib_fast_reg_page_list *page_list)
        kfree(mfrpl->ibfrpl.page_list);
        kfree(mfrpl);
 }
+
+int mlx5_ib_check_sig_status(struct ib_mr *sig_mr,
+                            struct ib_sig_err *sig_err)
+{
+       struct mlx5_ib_mr *mmr = to_mmr(sig_mr);
+       int ret = 0;
+
+       if (!mmr->sig->sig_err_exists)
+               goto out;
+
+       if (sig_mr->lkey == mmr->sig->err_item.key)
+               memcpy(sig_err, &mmr->sig->err_item, sizeof(*sig_err));
+       else {
+               sig_err->err_type = IB_SIG_BAD_CRC;
+               sig_err->sig_err_offset = 0;
+               sig_err->key = mmr->sig->err_item.key;
+       }
+       ret = 1;
+       mmr->sig->sig_err_exists = false;
+       mmr->sig->sigerr_count++;
+out:
+       mmr->sig->sig_status_checked = true;
+       return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d791e41..e7b55338 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1726,6 +1726,7 @@ static __be64 sig_mkey_mask(void)
        result = MLX5_MKEY_MASK_LEN             |
                MLX5_MKEY_MASK_PAGE_SIZE        |
                MLX5_MKEY_MASK_START_ADDR       |
+               MLX5_MKEY_MASK_EN_SIGERR        |
                MLX5_MKEY_MASK_EN_RINVAL        |
                MLX5_MKEY_MASK_KEY              |
                MLX5_MKEY_MASK_LR               |
@@ -2152,6 +2153,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg 
*seg,
 {
        struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
        u32 sig_key = sig_mr->rkey;
+       u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
 
        memset(seg, 0, sizeof(*seg));
 
@@ -2159,7 +2161,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg 
*seg,
        seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
                                   MLX5_ACCESS_MODE_KLM;
        seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
-       seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL |
+       seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
                                    MLX5_MKEY_BSF_EN | pdn);
        seg->start_addr = 0;
        seg->len = cpu_to_be64(length);
@@ -2189,7 +2191,8 @@ static int set_sig_umr_wr(struct ib_send_wr *wr, struct 
mlx5_ib_qp *qp,
 
        if (unlikely(wr->wr.sig_handover.access_flags &
                     IB_ACCESS_REMOTE_ATOMIC) ||
-           unlikely(!sig_mr->sig) || unlikely(!qp->signature_en))
+           unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
+           unlikely(!sig_mr->sig->sig_status_checked))
                return -EINVAL;
 
        /* length of the protected region, data + protection */
@@ -2227,6 +2230,7 @@ static int set_sig_umr_wr(struct ib_send_wr *wr, struct 
mlx5_ib_qp *qp,
        if (unlikely((*seg == qp->sq.qend)))
                *seg = mlx5_get_send_wqe(qp, 0);
 
+       sig_mr->sig->sig_status_checked = false;
        return 0;
 }
 
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 3db67f7..e1974b0 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -80,6 +80,7 @@ enum {
        MLX5_CQE_RESP_SEND_IMM  = 3,
        MLX5_CQE_RESP_SEND_INV  = 4,
        MLX5_CQE_RESIZE_CQ      = 0xff, /* TBD */
+       MLX5_CQE_SIG_ERR        = 12,
        MLX5_CQE_REQ_ERR        = 13,
        MLX5_CQE_RESP_ERR       = 14,
 };
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index aef7eed..96b50e8 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -117,6 +117,7 @@ enum {
        MLX5_MKEY_MASK_START_ADDR       = 1ull << 6,
        MLX5_MKEY_MASK_PD               = 1ull << 7,
        MLX5_MKEY_MASK_EN_RINVAL        = 1ull << 8,
+       MLX5_MKEY_MASK_EN_SIGERR        = 1ull << 9,
        MLX5_MKEY_MASK_BSF_EN           = 1ull << 12,
        MLX5_MKEY_MASK_KEY              = 1ull << 13,
        MLX5_MKEY_MASK_QPN              = 1ull << 14,
@@ -544,6 +545,23 @@ struct mlx5_cqe64 {
        u8              op_own;
 };
 
+struct mlx5_sig_err_cqe {
+       u8              rsvd0[16];
+       __be32          expected_trans_sig;
+       __be32          actual_trans_sig;
+       __be32          expected_reftag;
+       __be32          actual_reftag;
+       __be16          syndrome;
+       u8              rsvd22[2];
+       __be32          mkey;
+       __be64          err_offset;
+       u8              rsvd30[8];
+       __be32          qpn;
+       u8              rsvd38[2];
+       u8              signature;
+       u8              opcode;
+};
+
 struct mlx5_wqe_srq_next_seg {
        u8                      rsvd0[2];
        __be16                  next_wqe_index;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5fe0690..0c462bb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -413,6 +413,10 @@ struct mlx5_core_psv {
 struct mlx5_core_sig_ctx {
        struct mlx5_core_psv    psv_memory;
        struct mlx5_core_psv    psv_wire;
+       struct ib_sig_err       err_item;
+       bool                    sig_status_checked;
+       bool                    sig_err_exists;
+       u32                     sigerr_count;
 };
 
 struct mlx5_core_mr {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 9ea6cf6..557e0b3 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -501,6 +501,11 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct 
mlx5_core_dev *dev, u
        return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
 }
 
+static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, 
u32 key)
+{
+       return radix_tree_lookup(&dev->priv.mr_table.tree, key);
+}
+
 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
                        struct mlx5_core_qp *qp,
                        struct mlx5_create_qp_mbox_in *in,
-- 
1.7.8.2

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to