Add the mlx5 implementation of the TLS Rx routines to add/del TLS
contexts, also add the tls_dev_resync_rx routine
to work with the TLS inline Rx crypto offload infrastructure.

Signed-off-by: Boris Pismenny <bor...@mellanox.com>
Signed-off-by: Ilya Lesokhin <il...@mellanox.com>
---
 .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 46 +++++++++++++++-------
 .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 15 +++++++
 2 files changed, 46 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index 7fb9c75..68368c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct 
sock *sk,
        u32 caps = mlx5_accel_tls_device_caps(mdev);
        int ret = -ENOMEM;
        void *flow;
-
-       if (direction != TLS_OFFLOAD_CTX_DIR_TX)
-               return -EINVAL;
+       u32 swid;
 
        flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
        if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, 
struct sock *sk,
        if (ret)
                goto free_flow;
 
+       ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+                                     start_offload_tcp_sn, &swid,
+                                     direction == TLS_OFFLOAD_CTX_DIR_TX);
+       if (ret < 0)
+               goto free_flow;
+
        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
                struct mlx5e_tls_offload_context_tx *tx_ctx =
                    mlx5e_get_tls_tx_context(tls_ctx);
-               u32 swid;
-
-               ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
-                                                start_offload_tcp_sn, &swid);
-               if (ret < 0)
-                       goto free_flow;
 
                tx_ctx->swid = htonl(swid);
                tx_ctx->expected_seq = start_offload_tcp_sn;
+       } else {
+               struct mlx5e_tls_offload_context_rx *rx_ctx =
+                   mlx5e_get_tls_rx_context(tls_ctx);
+
+               rx_ctx->handle = htonl(swid);
        }
 
        return 0;
@@ -147,19 +150,32 @@ static void mlx5e_tls_del(struct net_device *netdev,
                          enum tls_offload_ctx_dir direction)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       unsigned int handle;
 
-       if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
-               u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+       handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+                      mlx5e_get_tls_tx_context(tls_ctx)->swid :
+                      mlx5e_get_tls_rx_context(tls_ctx)->handle);
 
-               mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
-       } else {
-               netdev_err(netdev, "unsupported direction %d\n", direction);
-       }
+       mlx5_accel_tls_del_flow(priv->mdev, handle,
+                               direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+                               u32 seq, u64 rcd_sn)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+       rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+       mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
 }
 
 static const struct tlsdev_ops mlx5e_tls_ops = {
        .tls_dev_add = mlx5e_tls_add,
        .tls_dev_del = mlx5e_tls_del,
+       .tls_dev_resync_rx = mlx5e_tls_resync_rx,
 };
 
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index e26222a..2d40ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -65,6 +65,21 @@ struct mlx5e_tls_offload_context_tx {
                            base);
 }
 
+struct mlx5e_tls_offload_context_rx {
+       struct tls_offload_context_rx base;
+       __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+       BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+                    TLS_OFFLOAD_CONTEXT_SIZE_RX);
+       return container_of(tls_offload_ctx_rx(tls_ctx),
+                           struct mlx5e_tls_offload_context_rx,
+                           base);
+}
+
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
 int mlx5e_tls_init(struct mlx5e_priv *priv);
 void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
-- 
1.8.3.1

Reply via email to