Re: [PATCH v12 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-19 Thread Stefano Brivio
On Mon, 19 Mar 2018 19:25:42 +0530
Atul Gupta  wrote:

> +static bool is_tls_skb(struct chtls_sock *csk, const struct sk_buff *skb)
> +{
> + return skb_ulp_tls_skb_flags(skb);
> +}

Do you need this function?

> +/* Copy Key to WR */
> +static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct ulptx_sc_memrd *sc_memrd;
> + struct ulptx_idata *sc;
> + struct chtls_hws *hws = >tlshws;
> + struct chtls_dev *cdev = csk->cdev;
> + u32 immdlen;
> + int kaddr;

Reverse christmas tree comments (that you already received for several
versions) ignored.

> [...]
>
> +/*
> + * Returns true if an sk_buff carries urgent data.
> + */
> +static bool skb_urgent(struct sk_buff *skb)
> +{
> + return (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG) != 0;
> +}

You silently ignored most of the comment I had about this already on
v10.

> [...]
>
> +static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
> +int dlen, int tls_immd, u32 credits,
> +int expn, int pdus)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct chtls_hws *hws = >tlshws;
> + struct net_device *dev = csk->egress_dev;
> + struct adapter *adap = netdev2adap(dev);
> + struct fw_tlstx_data_wr *req_wr;
> + struct cpl_tx_tls_sfo *req_cpl;
> + int iv_imm = skb_ulp_tls_skb_iv(skb);
> + struct tls_scmd *scmd = >scmd;
> + struct tls_scmd *updated_scmd;
> + unsigned int wr_ulp_mode_force;
> + unsigned char data_type;
> + unsigned char *req;
> + int len = dlen + expn;
> + int immd_len;

Reverse christmas tree.

> +static int chtls_expansion_size(struct sock *sk, int data_len,
> + int fullpdu,
> + unsigned short *pducnt)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct chtls_hws *hws = >tlshws;
> + struct tls_scmd *scmd = >scmd;
> + int hdrlen = TLS_HEADER_LENGTH;
> + int expnsize, frcnt, fraglast, fragsize;
> + int expppdu;

Same here.

> +
> + do {
> + fragsize = hws->mfs;
> +
> + if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
> + SCMD_CIPH_MODE_AES_GCM) {
> + frcnt = (data_len / fragsize);
> + expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
> + hdrlen;
> + expnsize =  frcnt * expppdu;
> +
> + if (fullpdu) {
> + *pducnt = data_len / (expppdu + fragsize);
> +
> + if (*pducnt > 32)
> + *pducnt = 32;
> + else if (!*pducnt)
> + *pducnt = 1;
> + expnsize = (*pducnt) * expppdu;
> + break;

Was this supposed to be conditional?

> + }
> + fraglast = data_len % fragsize;
> + if (fraglast > 0) {
> + frcnt += 1;
> + expnsize += expppdu;
> + }
> + break;
> + }
> + pr_info("unsupported cipher\n");
> + expnsize = 0;
> + } while (0);

And why the "do { ... } while (0)" anyway? Copied from a macro?

This series is *still* full of this kind of stuff. Is it such a bizarre
request to ask you to go through your code line by line and check it
carefully? Why should others do it instead?

> +
> + return expnsize;
> +}
> +
> +/* WR with IV, KEY and CPL SFO added */
> +static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
> +int tls_tx_imm, int tls_len, u32 credits)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct chtls_hws *hws = >tlshws;
> + int pdus = DIV_ROUND_UP(tls_len, hws->mfs);
> + unsigned short pdus_per_ulp = 0;
> + int expn_sz;

Reverse christmas tree comments ignored here.

> [...]
> +int chtls_push_frames(struct chtls_sock *csk, int comp)
> +{
> + struct sock *sk = csk->sk;
> + struct tcp_sock *tp = tcp_sk(sk);
> + struct chtls_hws *hws = >tlshws;
> + struct sk_buff *skb;
> + int total_size = 0;
> + int wr_size = sizeof(struct fw_ofld_tx_data_wr);

And here.

> [...]
> +int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct tcp_sock *tp = tcp_sk(sk);
> + struct chtls_dev *cdev = csk->cdev;
> + struct sk_buff *skb;
> + int mss, flags, err;
> + int copied = 0;
> + int hdrlen = 0;
> + int recordsz = 0;
> + long timeo;

And here.

> [...]
> +int chtls_sendpage(struct sock *sk, struct 

[PATCH v12 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-19 Thread Atul Gupta
TLS handler for record transmit.
Create Inline TLS work request and post to FW.
Create Inline TLS record CPLs for hardware

Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls_io.c | 1252 +++
 1 file changed, 1252 insertions(+)
 create mode 100644 drivers/crypto/chelsio/chtls/chtls_io.c

diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
new file mode 100644
index 000..c43072f
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -0,0 +1,1252 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gu...@chelsio.com)
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static bool is_tls_rx(struct chtls_sock *csk)
+{
+   return csk->tlshws.rxkey >= 0;
+}
+
+static bool is_tls_tx(struct chtls_sock *csk)
+{
+   return csk->tlshws.txkey >= 0;
+}
+
+static bool is_tls_skb(struct chtls_sock *csk, const struct sk_buff *skb)
+{
+   return skb_ulp_tls_skb_flags(skb);
+}
+
+static int data_sgl_len(const struct sk_buff *skb)
+{
+   unsigned int cnt;
+
+   cnt = skb_shinfo(skb)->nr_frags;
+   return sgl_len(cnt) * 8;
+}
+
+static int nos_ivs(struct sock *sk, unsigned int size)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+   return DIV_ROUND_UP(size, csk->tlshws.mfs);
+}
+
+static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
+{
+   int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
+   int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
+
+   if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
+   MAX_IMM_OFLD_TX_DATA_WR_LEN) {
+   ULP_SKB_CB(skb)->ulp.tls.iv = 1;
+   return 1;
+   }
+   ULP_SKB_CB(skb)->ulp.tls.iv = 0;
+   return 0;
+}
+
+static int max_ivs_size(struct sock *sk, int size)
+{
+   return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
+}
+
+static int ivs_size(struct sock *sk, const struct sk_buff *skb)
+{
+   return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
+CIPHER_BLOCK_SIZE) : 0;
+}
+
+static int flowc_wr_credits(int nparams, int *flowclenp)
+{
+   int flowclen16, flowclen;
+
+   flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+   flowclen16 = DIV_ROUND_UP(flowclen, 16);
+   flowclen = flowclen16 * 16;
+
+   if (flowclenp)
+   *flowclenp = flowclen;
+
+   return flowclen16;
+}
+
+static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
+  struct fw_flowc_wr *flowc,
+  int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   struct sk_buff *skb;
+
+   skb = alloc_skb(flowclen, GFP_ATOMIC);
+   if (!skb)
+   return NULL;
+
+   memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+   skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+
+   return skb;
+}
+
+static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
+int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   bool syn_sent = (sk->sk_state == TCP_SYN_SENT);
+   struct tcp_sock *tp = tcp_sk(sk);
+   int flowclen16 = flowclen / 16;
+   struct sk_buff *skb;
+
+   if (csk_flag(sk, CSK_TX_DATA_SENT)) {
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+
+   if (syn_sent)
+   __skb_queue_tail(>ooo_queue, skb);
+   else
+   skb_entail(sk, skb,
+  ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+   return 0;
+   }
+
+   if (!syn_sent) {
+   int ret;
+
+   ret = cxgb4_immdata_send(csk->egress_dev,
+csk->txq_idx,
+flowc, flowclen);
+   if (!ret)
+   return flowclen16;
+   }
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+   send_or_defer(sk, tp, skb, 0);
+   return flowclen16;
+}
+
+static u8 tcp_state_to_flowc_state(u8 state)
+{
+   u8 ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+
+   switch (state) {
+   case TCP_ESTABLISHED:
+   ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+   break;
+   case TCP_CLOSE_WAIT:
+   ret = FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
+   break;
+   case