[PATCH 0/5] xfrm: ESP Traffic Flow Confidentiality padding

2010-11-30 Thread Martin Willi
The following patchset adds Traffic Flow Confidentiality padding. The
first patch introduces a new Netlink XFRM attribute to configure TFC via
userspace. The second patch removes an existing padlen option in ESP; It
is not used at all, and I currently don't see the purpose of the field,
nor how it should interact with TFC padding enabled. Patch three and four
implement the padding logic in IPv4 and IPv6 ESP.

Padding is specified with a length to pad the encapsulated data to.
Support for TFC padding as specified in RFC4303 must be negotiated
explicitly by the key management protocol, hence the optional flag. The
fallback with ESP padding field expansion is limited to 255 padding
bytes. If this is insufficient, padding length is randomized to hide
the real length as good as possible.

The last patch adds an option to pad all packets to the PMTU. It works
fine for simple scenarios, but I'm not sure if my PMTU lookup works in
all cases (nested transforms?). Any pointer would be appreciated.

Martin Willi (5):
  xfrm: Add Traffic Flow Confidentiality padding XFRM attribute
  xfrm: Remove unused ESP padlen field
  xfrm: Traffic Flow Confidentiality for IPv4 ESP
  xfrm: Traffic Flow Confidentiality for IPv6 ESP
  xfrm: Add TFC padding option to automatically pad to PMTU

 include/linux/xfrm.h |8 +++
 include/net/esp.h|3 --
 include/net/xfrm.h   |1 +
 net/ipv4/esp4.c  |   58 +++--
 net/ipv6/esp6.c  |   58 +++--
 net/xfrm/xfrm_user.c |   16 -
 6 files changed, 105 insertions(+), 39 deletions(-)

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/5] xfrm: Add TFC padding option to automatically pad to PMTU

2010-11-30 Thread Martin Willi
Traffic Flow Confidentiality padding is most effective if all packets
have exactly the same size. For SAs with mixed traffic, the largest
packet size is usually the PMTU. Instead of calculating the PMTU
manually, the XFRM_TFC_PMTU flag automatically pads to the PMTU.

Signed-off-by: Martin Willi 
---
 include/linux/xfrm.h |1 +
 net/ipv4/esp4.c  |7 +++
 net/ipv6/esp6.c  |7 +++
 3 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index b1e5f8a..2a9f0b4 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -298,6 +298,7 @@ struct xfrm_tfc {
__u16   pad;
__u16   flags;
 #define XFRM_TFC_ESPV3 1   /* RFC4303 TFC padding, if possible */
+#define XFRM_TFC_PMTU  2   /* ignore pad field, pad to PMTU */
 };
 
 enum xfrm_sadattr_type_t {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index a6adfbc..cfb4992 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@ struct esp_skb_cb {
 
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
+
 /*
  * Allocate an AEAD request structure with extra space for SG and IV.
  *
@@ -133,6 +135,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff 
*skb)
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
tfclen = 0;
tfcpadto = x->tfc.pad;
+   if (x->tfc.flags & XFRM_TFC_PMTU) {
+   struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+
+   tfcpadto = esp4_get_mtu(x, dst->child_mtu_cached);
+   }
 
if (skb->len >= tfcpadto) {
clen = ALIGN(skb->len + 2, blksize);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 9494cb1..6cb9a02 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -49,6 +49,8 @@ struct esp_skb_cb {
 
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
+static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
+
 /*
  * Allocate an AEAD request structure with extra space for SG and IV.
  *
@@ -157,6 +159,11 @@ static int esp6_output(struct xfrm_state *x, struct 
sk_buff *skb)
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
tfclen = 0;
tfcpadto = x->tfc.pad;
+   if (x->tfc.flags & XFRM_TFC_PMTU) {
+   struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+
+   tfcpadto = esp6_get_mtu(x, dst->child_mtu_cached);
+   }
 
if (skb->len >= tfcpadto) {
clen = ALIGN(skb->len + 2, blksize);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/5] xfrm: Add Traffic Flow Confidentiality padding XFRM attribute

2010-11-30 Thread Martin Willi
The XFRMA_TFCPAD attribute for XFRM state installation configures
Traffic Flow Confidentiality by padding ESP packets to a specified
length. To use RFC4303 TFC padding and overcome the 255 byte ESP
padding field limit, the XFRM_TFC_ESPV3 flag must be set.

Signed-off-by: Martin Willi 
---
 include/linux/xfrm.h |7 +++
 include/net/xfrm.h   |1 +
 net/xfrm/xfrm_user.c |   16 ++--
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index b971e38..b1e5f8a 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -283,6 +283,7 @@ enum xfrm_attr_type_t {
XFRMA_KMADDRESS,/* struct xfrm_user_kmaddress */
XFRMA_ALG_AUTH_TRUNC,   /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
+   XFRMA_TFC,  /* struct xfrm_tfc */
__XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -293,6 +294,12 @@ struct xfrm_mark {
__u32   m; /* mask */
 };
 
+struct xfrm_tfc {
+   __u16   pad;
+   __u16   flags;
+#define XFRM_TFC_ESPV3 1   /* RFC4303 TFC padding, if possible */
+};
+
 enum xfrm_sadattr_type_t {
XFRMA_SAD_UNSPEC,
XFRMA_SAD_CNT,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index bcfb6b2..03468c0 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -143,6 +143,7 @@ struct xfrm_state {
struct xfrm_id  id;
struct xfrm_selectorsel;
struct xfrm_markmark;
+   struct xfrm_tfc tfc;
 
u32 genid;
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8bae6b2..0b4ec02 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -148,7 +148,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
attrs[XFRMA_ALG_AEAD]   ||
attrs[XFRMA_ALG_CRYPT]  ||
-   attrs[XFRMA_ALG_COMP])
+   attrs[XFRMA_ALG_COMP]   ||
+   attrs[XFRMA_TFC])
goto out;
break;
 
@@ -172,7 +173,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_AEAD]   ||
attrs[XFRMA_ALG_AUTH]   ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
-   attrs[XFRMA_ALG_CRYPT])
+   attrs[XFRMA_ALG_CRYPT]  ||
+   attrs[XFRMA_TFC])
goto out;
break;
 
@@ -186,6 +188,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_CRYPT]  ||
attrs[XFRMA_ENCAP]  ||
attrs[XFRMA_SEC_CTX]||
+   attrs[XFRMA_TFC]||
!attrs[XFRMA_COADDR])
goto out;
break;
@@ -439,6 +442,9 @@ static struct xfrm_state *xfrm_state_construct(struct net 
*net,
goto error;
}
 
+   if (attrs[XFRMA_TFC])
+   memcpy(&x->tfc, nla_data(attrs[XFRMA_TFC]), sizeof(x->tfc));
+
if (attrs[XFRMA_COADDR]) {
x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
sizeof(*x->coaddr), GFP_KERNEL);
@@ -688,6 +694,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (x->encap)
NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
 
+   if (x->tfc.pad || x->tfc.flags)
+   NLA_PUT(skb, XFRMA_TFC, sizeof(x->tfc), &x->tfc);
+
if (xfrm_mark_put(skb, &x->mark))
goto nla_put_failure;
 
@@ -2122,6 +2131,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] 
= {
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS]   = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK]= { .len = sizeof(struct xfrm_mark) },
+   [XFRMA_TFC] = { .len = sizeof(struct xfrm_tfc) },
 };
 
 static struct xfrm_link {
@@ -2301,6 +2311,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
+   if (x->tfc.pad)
+   l += nla_total_size(sizeof(x->tfc));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/5] xfrm: Remove unused ESP padlen field

2010-11-30 Thread Martin Willi
The padlen field in IPv4/6 ESP is used to align the ESP padding length
to a value larger than the aead block size. There is however no
option to set this field, hence it is removed.

Signed-off-by: Martin Willi 
---
 include/net/esp.h |3 ---
 net/ipv4/esp4.c   |   11 ++-
 net/ipv6/esp6.c   |   11 ++-
 3 files changed, 4 insertions(+), 21 deletions(-)

diff --git a/include/net/esp.h b/include/net/esp.h
index d584513..6dfb4d0 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -6,9 +6,6 @@
 struct crypto_aead;
 
 struct esp_data {
-   /* 0..255 */
-   int padlen;
-
/* Confidentiality & Integrity */
struct crypto_aead *aead;
 };
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1..67e4c12 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -132,8 +132,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff 
*skb)
 
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(clen + 2, blksize);
-   if (esp->padlen)
-   clen = ALIGN(clen, esp->padlen);
 
if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
goto error;
@@ -386,12 +384,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
 {
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
-   u32 align = max_t(u32, blksize, esp->padlen);
u32 rem;
 
mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-   rem = mtu & (align - 1);
-   mtu &= ~(align - 1);
+   rem = mtu & (blksize - 1);
+   mtu &= ~(blksize - 1);
 
switch (x->props.mode) {
case XFRM_MODE_TUNNEL:
@@ -570,8 +567,6 @@ static int esp_init_state(struct xfrm_state *x)
 
aead = esp->aead;
 
-   esp->padlen = 0;
-
x->props.header_len = sizeof(struct ip_esp_hdr) +
  crypto_aead_ivsize(aead);
if (x->props.mode == XFRM_MODE_TUNNEL)
@@ -594,8 +589,6 @@ static int esp_init_state(struct xfrm_state *x)
}
 
align = ALIGN(crypto_aead_blocksize(aead), 4);
-   if (esp->padlen)
-   align = max_t(u32, align, esp->padlen);
x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
 
 error:
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ee9b93b..e9e6e1c 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -156,8 +156,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff 
*skb)
 
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(clen + 2, blksize);
-   if (esp->padlen)
-   clen = ALIGN(clen, esp->padlen);
 
if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
goto error;
@@ -337,12 +335,11 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 {
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
-   u32 align = max_t(u32, blksize, esp->padlen);
u32 rem;
 
mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-   rem = mtu & (align - 1);
-   mtu &= ~(align - 1);
+   rem = mtu & (blksize - 1);
+   mtu &= ~(blksize - 1);
 
if (x->props.mode != XFRM_MODE_TUNNEL) {
u32 padsize = ((blksize - 1) & 7) + 1;
@@ -516,8 +513,6 @@ static int esp6_init_state(struct xfrm_state *x)
 
aead = esp->aead;
 
-   esp->padlen = 0;
-
x->props.header_len = sizeof(struct ip_esp_hdr) +
  crypto_aead_ivsize(aead);
switch (x->props.mode) {
@@ -536,8 +531,6 @@ static int esp6_init_state(struct xfrm_state *x)
}
 
align = ALIGN(crypto_aead_blocksize(aead), 4);
-   if (esp->padlen)
-   align = max_t(u32, align, esp->padlen);
x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
 
 error:
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/5] xfrm: Traffic Flow Confidentiality for IPv6 ESP

2010-11-30 Thread Martin Willi
If configured on xfrm state, increase the length of all packets to
a given boundary using TFC padding as specified in RFC4303. For
transport mode, or if the XFRM_TFC_ESPV3 is not set, grow the ESP
padding field instead.

Signed-off-by: Martin Willi 
---
 net/ipv6/esp6.c |   42 +-
 1 files changed, 33 insertions(+), 9 deletions(-)

diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index e9e6e1c..9494cb1 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -140,6 +140,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff 
*skb)
int blksize;
int clen;
int alen;
+   int plen;
+   int tfclen;
+   int tfcpadto;
int nfrags;
u8 *iv;
u8 *tail;
@@ -148,16 +151,33 @@ static int esp6_output(struct xfrm_state *x, struct 
sk_buff *skb)
/* skb is pure payload to encrypt */
err = -ENOMEM;
 
-   /* Round to block size */
-   clen = skb->len;
-
aead = esp->aead;
alen = crypto_aead_authsize(aead);
 
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
-   clen = ALIGN(clen + 2, blksize);
-
-   if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+   tfclen = 0;
+   tfcpadto = x->tfc.pad;
+
+   if (skb->len >= tfcpadto) {
+   clen = ALIGN(skb->len + 2, blksize);
+   } else if (x->tfc.flags & XFRM_TFC_ESPV3 &&
+  x->props.mode == XFRM_MODE_TUNNEL) {
+   /* ESPv3 TFC padding, append bytes to payload */
+   tfclen = tfcpadto - skb->len;
+   clen = ALIGN(skb->len + 2 + tfclen, blksize);
+   } else {
+   /* ESPv2 TFC padding. If we exceed the 255 byte maximum, use
+* random padding to hide payload length as good as possible. */
+   clen = ALIGN(skb->len + 2 + tfcpadto - skb->len, blksize);
+   if (clen - skb->len - 2 > 255) {
+   clen = ALIGN(skb->len + (u8)random32() + 2, blksize);
+   if (clen - skb->len - 2 > 255)
+   clen -= blksize;
+   }
+   }
+   plen = clen - skb->len - tfclen;
+   err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
+   if (err < 0)
goto error;
nfrags = err;
 
@@ -172,13 +192,17 @@ static int esp6_output(struct xfrm_state *x, struct 
sk_buff *skb)
 
/* Fill padding... */
tail = skb_tail_pointer(trailer);
+   if (tfclen) {
+   memset(tail, 0, tfclen);
+   tail += tfclen;
+   }
do {
int i;
-   for (i=0; ilen - 2; i++)
+   for (i = 0; i < plen - 2; i++)
tail[i] = i + 1;
} while (0);
-   tail[clen-skb->len - 2] = (clen - skb->len) - 2;
-   tail[clen - skb->len - 1] = *skb_mac_header(skb);
+   tail[plen - 2] = plen - 2;
+   tail[plen - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
 
skb_push(skb, -skb_network_offset(skb));
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/5] xfrm: Traffic Flow Confidentiality for IPv4 ESP

2010-11-30 Thread Martin Willi
If configured on xfrm state, increase the length of all packets to
a given boundary using TFC padding as specified in RFC4303. For
transport mode, or if the XFRM_TFC_ESPV3 is not set, grow the ESP
padding field instead.

Signed-off-by: Martin Willi 
---
 net/ipv4/esp4.c |   42 +-
 1 files changed, 33 insertions(+), 9 deletions(-)

diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 67e4c12..a6adfbc 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -117,23 +117,43 @@ static int esp_output(struct xfrm_state *x, struct 
sk_buff *skb)
int blksize;
int clen;
int alen;
+   int plen;
+   int tfclen;
+   int tfcpadto;
int nfrags;
 
/* skb is pure payload to encrypt */
 
err = -ENOMEM;
 
-   /* Round to block size */
-   clen = skb->len;
-
esp = x->data;
aead = esp->aead;
alen = crypto_aead_authsize(aead);
 
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
-   clen = ALIGN(clen + 2, blksize);
-
-   if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+   tfclen = 0;
+   tfcpadto = x->tfc.pad;
+
+   if (skb->len >= tfcpadto) {
+   clen = ALIGN(skb->len + 2, blksize);
+   } else if (x->tfc.flags & XFRM_TFC_ESPV3 &&
+  x->props.mode == XFRM_MODE_TUNNEL) {
+   /* ESPv3 TFC padding, append bytes to payload */
+   tfclen = tfcpadto - skb->len;
+   clen = ALIGN(skb->len + 2 + tfclen, blksize);
+   } else {
+   /* ESPv2 TFC padding. If we exceed the 255 byte maximum, use
+* random padding to hide payload length as good as possible. */
+   clen = ALIGN(skb->len + 2 + tfcpadto - skb->len, blksize);
+   if (clen - skb->len - 2 > 255) {
+   clen = ALIGN(skb->len + (u8)random32() + 2, blksize);
+   if (clen - skb->len - 2 > 255)
+   clen -= blksize;
+   }
+   }
+   plen = clen - skb->len - tfclen;
+   err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
+   if (err < 0)
goto error;
nfrags = err;
 
@@ -148,13 +168,17 @@ static int esp_output(struct xfrm_state *x, struct 
sk_buff *skb)
 
/* Fill padding... */
tail = skb_tail_pointer(trailer);
+   if (tfclen) {
+   memset(tail, 0, tfclen);
+   tail += tfclen;
+   }
do {
int i;
-   for (i=0; ilen - 2; i++)
+   for (i = 0; i < plen - 2; i++)
tail[i] = i + 1;
} while (0);
-   tail[clen - skb->len - 2] = (clen - skb->len) - 2;
-   tail[clen - skb->len - 1] = *skb_mac_header(skb);
+   tail[plen - 2] = plen - 2;
+   tail[plen - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
 
skb_push(skb, -skb_network_offset(skb));
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1.5 3/5] key: add tpm_send command

2010-11-30 Thread David Safford
On Tue, 2010-11-30 at 14:32 +, David Howells wrote:
> Serge Hallyn  wrote:
> 
> > > +int tpm_send(u32 chip_num, void *cmd, size_t buflen)
> > 
> > Hate to nit-pick, but any particular reason you're not following the
> > rest of the file and using 'struct tpm_cmd_t *cmd' here?
> 
> Ummm...  Something else I've just noticed...
> 
>   static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
>  size_t bufsiz)
> 
> would suggest that buf is read-only, but tpm_transit() keeps casting it away,
> and especially, casts it away before passing it to chip->vendor.recv()...
> This would seem to indicate a logic error somewhere.
> 
> Certainly, tpm_atml_recv() modifies the buffer it is given to...

as does tpm_tis_recv(). By the TCG spec, the return data should go in the
same input buffer.

> I suspect the argument and reply buffer pointers should be passed separately.

It seems more like a spurious "const" in tpm_transmit(). This has been in the
code for a long time. Good catch. I'll draft a cleanup for these and some
other nits and send to Rajiv...

thanks!
dave

> David
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1.5 3/5] key: add tpm_send command

2010-11-30 Thread David Howells
Serge Hallyn  wrote:

> > +int tpm_send(u32 chip_num, void *cmd, size_t buflen)
> 
> Hate to nit-pick, but any particular reason you're not following the
> rest of the file and using 'struct tpm_cmd_t *cmd' here?

Ummm...  Something else I've just noticed...

static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   size_t bufsiz)

would suggest that buf is read-only, but tpm_transit() keeps casting it away,
and especially, casts it away before passing it to chip->vendor.recv()...
This would seem to indicate a logic error somewhere.

Certainly, tpm_atml_recv() modifies the buffer it is given to...

I suspect the argument and reply buffer pointers should be passed separately.

David
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Add missing lockdep class names for AF_ALG

2010-11-30 Thread Miloslav Trmač
---
 net/core/sock.c |6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed542..634d5bc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-27"   , "sk_lock-28"  , "sk_lock-AF_CAN"  ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV",
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"  ,
   "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@ static const char *const 
af_family_slock_key_strings[AF_MAX+1] = {
   "slock-27"   , "slock-28"  , "slock-AF_CAN"  ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
   "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"  ,
   "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@ static const char *const 
af_family_clock_key_strings[AF_MAX+1] = {
   "clock-27"   , "clock-28"  , "clock-AF_CAN"  ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
   "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"  ,
   "clock-AF_MAX"
 };
 
-- 
1.7.3.2

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned

2010-11-30 Thread Herbert Xu
Dmitry Kasatkin  wrote:
> Hi,
> 
> What is the repo with algif patches?

http://git.kernel.org/?p=linux/kernel/git/herbert/cryptodev-2.6.git;a=summary

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned

2010-11-30 Thread Dmitry Kasatkin
Hi,

What is the repo with algif patches?
Would like to try it..

- Dmitry


On 30/11/10 10:51, ext Herbert Xu wrote:
> Hi:
>
> Just noticed that algif_skcipher fails to apply the sk_sndbuf limit
> correctly unless it is a multiple of PAGE_SIZE.  What happens is
> that the merge path will exceed sk_sndbuf causing the subsequent
> limit comparison to fail as it tries to do an unsigned comparison
> with a negative value.
>
> This patch fixes the problem.
>
> commit 0f6bb83cb12e4617e696ffa566f3fc6c092686e2
> Author: Herbert Xu 
> Date:   Tue Nov 30 16:49:02 2010 +0800
>
> crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned
> 
> When sk_sndbuf is not a multiple of PAGE_SIZE, the limit tests
> in sendmsg fail as the limit variable becomes negative and we're
> using an unsigned comparison.
> 
> The same thing can happen if sk_sndbuf is lowered after a sendmsg
> call.
> 
> This patch fixes this by always taking the signed maximum of limit
> and 0 before we perform the comparison.
> 
> It also rounds the value of sk_sndbuf down to a multiple of PAGE_SIZE
> so that we don't end up allocating a page only to use a small number
> of bytes in it because we're bound by sk_sndbuf.
> 
> Signed-off-by: Herbert Xu 
>
> diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
> index 9b2f440..1f33480 100644
> --- a/crypto/algif_skcipher.c
> +++ b/crypto/algif_skcipher.c
> @@ -52,12 +52,18 @@ struct skcipher_ctx {
>  #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
> sizeof(struct scatterlist) - 1)
>  
> -static inline bool skcipher_writable(struct sock *sk)
> +static inline int skcipher_sndbuf(struct sock *sk)
>  {
>   struct alg_sock *ask = alg_sk(sk);
>   struct skcipher_ctx *ctx = ask->private;
>  
> - return ctx->used + PAGE_SIZE <= max_t(int, sk->sk_sndbuf, PAGE_SIZE);
> + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
> +   ctx->used, 0);
> +}
> +
> +static inline bool skcipher_writable(struct sock *sk)
> +{
> + return PAGE_SIZE <= skcipher_sndbuf(sk);
>  }
>  
>  static int skcipher_alloc_sgl(struct sock *sk)
> @@ -245,7 +251,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
> socket *sock,
>   struct af_alg_control con = {};
>   long copied = 0;
>   bool enc = 0;
> - int limit;
>   int err;
>   int i;
>  
> @@ -281,9 +286,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
> socket *sock,
>   memcpy(ctx->iv, con.iv->iv, ivsize);
>   }
>  
> - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
> - limit -= ctx->used;
> -
>   while (size) {
>   struct scatterlist *sg;
>   unsigned long len = size;
> @@ -309,20 +311,16 @@ static int skcipher_sendmsg(struct kiocb *unused, 
> struct socket *sock,
>   ctx->used += len;
>   copied += len;
>   size -= len;
> - limit -= len;
>   continue;
>   }
>  
> - if (limit < PAGE_SIZE) {
> + if (!skcipher_writable(sk)) {
>   err = skcipher_wait_for_wmem(sk, msg->msg_flags);
>   if (err)
>   goto unlock;
> -
> - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
> - limit -= ctx->used;
>   }
>  
> - len = min_t(unsigned long, len, limit);
> + len = min_t(unsigned long, len, skcipher_sndbuf(sk));
>  
>   err = skcipher_alloc_sgl(sk);
>   if (err)
> @@ -352,7 +350,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
> socket *sock,
>   ctx->used += plen;
>   copied += plen;
>   size -= plen;
> - limit -= plen;
>   sgl->cur++;
>   } while (len && sgl->cur < MAX_SGL_ENTS);
>  
> @@ -380,7 +377,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, 
> struct page *page,
>   struct skcipher_ctx *ctx = ask->private;
>   struct skcipher_sg_list *sgl;
>   int err = -EINVAL;
> - int limit;
>  
>   lock_sock(sk);
>   if (!ctx->more && ctx->used)
> @@ -389,16 +385,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, 
> struct page *page,
>   if (!size)
>   goto done;
>  
> - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
> - limit -= ctx->used;
> -
> - if (limit < PAGE_SIZE) {
> + if (!skcipher_writable(sk)) {
>   err = skcipher_wait_for_wmem(sk, flags);
>   if (err)
>   goto unlock;
> -
> - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
> - limit -= ctx->used;
>   }
>  
>   err = skcipher_alloc_sgl(sk);
>
> Cheers,
>   
--
To unsubscribe from this list: s

crypto: algif_skcipher - Handle unaligned receive buffer

2010-11-30 Thread Herbert Xu
Hi:

This patch fixes unexpected EINVAL failures on recvmsg when
encrypting/decrypting due to unaligned receive buffers.

commit bc97e57eb21f8db55bf0e1f182d384e75b2e3c99
Author: Herbert Xu 
Date:   Tue Nov 30 17:04:31 2010 +0800

crypto: algif_skcipher - Handle unaligned receive buffer

As it is if user-space passes through a receive buffer that's not
aligned to to the cipher block size, we'll end up encrypting or
decrypting a partial block which causes a spurious EINVAL to be
returned.

This patch fixes this by moving the partial block test after the
af_alg_make_sg call.

Signed-off-by: Herbert Xu 

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 1f33480..6a6dfc0 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -454,17 +454,17 @@ static int skcipher_recvmsg(struct kiocb *unused, struct 
socket *sock,
 
used = min_t(unsigned long, used, seglen);
 
+   used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
+   err = used;
+   if (err < 0)
+   goto unlock;
+
if (ctx->more || used < ctx->used)
used -= used % bs;
 
err = -EINVAL;
if (!used)
-   goto unlock;
-
-   used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
-   err = used;
-   if (err < 0)
-   goto unlock;
+   goto free;
 
ablkcipher_request_set_crypt(&ctx->req, sg,
 ctx->rsgl.sg, used,
@@ -476,6 +476,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct 
socket *sock,
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
 
+free:
af_alg_free_sg(&ctx->rsgl);
 
if (err)

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned

2010-11-30 Thread Herbert Xu
Hi:

Just noticed that algif_skcipher fails to apply the sk_sndbuf limit
correctly unless it is a multiple of PAGE_SIZE.  What happens is
that the merge path will exceed sk_sndbuf causing the subsequent
limit comparison to fail as it tries to do an unsigned comparison
with a negative value.

This patch fixes the problem.

commit 0f6bb83cb12e4617e696ffa566f3fc6c092686e2
Author: Herbert Xu 
Date:   Tue Nov 30 16:49:02 2010 +0800

crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned

When sk_sndbuf is not a multiple of PAGE_SIZE, the limit tests
in sendmsg fail as the limit variable becomes negative and we're
using an unsigned comparison.

The same thing can happen if sk_sndbuf is lowered after a sendmsg
call.

This patch fixes this by always taking the signed maximum of limit
and 0 before we perform the comparison.

It also rounds the value of sk_sndbuf down to a multiple of PAGE_SIZE
so that we don't end up allocating a page only to use a small number
of bytes in it because we're bound by sk_sndbuf.

Signed-off-by: Herbert Xu 

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 9b2f440..1f33480 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -52,12 +52,18 @@ struct skcipher_ctx {
 #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
  sizeof(struct scatterlist) - 1)
 
-static inline bool skcipher_writable(struct sock *sk)
+static inline int skcipher_sndbuf(struct sock *sk)
 {
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
 
-   return ctx->used + PAGE_SIZE <= max_t(int, sk->sk_sndbuf, PAGE_SIZE);
+   return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->used, 0);
+}
+
+static inline bool skcipher_writable(struct sock *sk)
+{
+   return PAGE_SIZE <= skcipher_sndbuf(sk);
 }
 
 static int skcipher_alloc_sgl(struct sock *sk)
@@ -245,7 +251,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
socket *sock,
struct af_alg_control con = {};
long copied = 0;
bool enc = 0;
-   int limit;
int err;
int i;
 
@@ -281,9 +286,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
socket *sock,
memcpy(ctx->iv, con.iv->iv, ivsize);
}
 
-   limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
-   limit -= ctx->used;
-
while (size) {
struct scatterlist *sg;
unsigned long len = size;
@@ -309,20 +311,16 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
socket *sock,
ctx->used += len;
copied += len;
size -= len;
-   limit -= len;
continue;
}
 
-   if (limit < PAGE_SIZE) {
+   if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, msg->msg_flags);
if (err)
goto unlock;
-
-   limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
-   limit -= ctx->used;
}
 
-   len = min_t(unsigned long, len, limit);
+   len = min_t(unsigned long, len, skcipher_sndbuf(sk));
 
err = skcipher_alloc_sgl(sk);
if (err)
@@ -352,7 +350,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct 
socket *sock,
ctx->used += plen;
copied += plen;
size -= plen;
-   limit -= plen;
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
 
@@ -380,7 +377,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, 
struct page *page,
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
int err = -EINVAL;
-   int limit;
 
lock_sock(sk);
if (!ctx->more && ctx->used)
@@ -389,16 +385,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, 
struct page *page,
if (!size)
goto done;
 
-   limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
-   limit -= ctx->used;
-
-   if (limit < PAGE_SIZE) {
+   if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, flags);
if (err)
goto unlock;
-
-   limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
-   limit -= ctx->used;
}
 
err = skcipher_alloc_sgl(sk);

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/maj

[PATCHv2 4/6] omap-aes: unnecessary code removed

2010-11-30 Thread Dmitry Kasatkin
Key and IV should always be set before AES operation.
So no need to check if it has changed or not.

Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |   70 +++--
 1 files changed, 17 insertions(+), 53 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 704cc70..0b21dce 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
 #define FLAGS_CBC  BIT(1)
 #define FLAGS_GIV  BIT(2)
 
-#define FLAGS_NEW_KEY  BIT(4)
-#define FLAGS_NEW_IV   BIT(5)
-#define FLAGS_INIT BIT(6)
-#define FLAGS_FAST BIT(7)
-#define FLAGS_BUSY BIT(8)
+#define FLAGS_INIT BIT(4)
+#define FLAGS_FAST BIT(5)
+#define FLAGS_BUSY BIT(6)
 
 struct omap_aes_ctx {
struct omap_aes_dev *dd;
@@ -105,9 +103,6 @@ struct omap_aes_dev {
unsigned long   flags;
int err;
 
-   u32 *iv;
-   u32 ctrl;
-
spinlock_t  lock;
struct crypto_queue queue;
 
@@ -209,28 +204,13 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
unsigned int key32;
-   int i, err, init = dd->flags & FLAGS_INIT;
+   int i, err;
u32 val, mask;
 
err = omap_aes_hw_init(dd);
if (err)
return err;
 
-   val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
-   if (dd->flags & FLAGS_CBC)
-   val |= AES_REG_CTRL_CBC;
-   if (dd->flags & FLAGS_ENCRYPT)
-   val |= AES_REG_CTRL_DIRECTION;
-
-   /* check if hw state & mode have not changed */
-   if (init && dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
-  !(dd->ctx->flags & FLAGS_NEW_KEY))
-   goto out;
-
-   /* only need to write control registers for new settings */
-
-   dd->ctrl = val;
-
val = 0;
if (dd->dma_lch_out >= 0)
val |= AES_REG_MASK_DMA_OUT_EN;
@@ -241,27 +221,28 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 
omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
 
-   pr_debug("Set key\n");
key32 = dd->ctx->keylen / sizeof(u32);
-   /* set a key */
+
+   /* it seems a key should always be set even if it has not changed */
for (i = 0; i < key32; i++) {
omap_aes_write(dd, AES_REG_KEY(i),
__le32_to_cpu(dd->ctx->key[i]));
}
-   dd->ctx->flags &= ~FLAGS_NEW_KEY;
 
-   if (dd->flags & FLAGS_NEW_IV) {
-   pr_debug("Set IV\n");
-   omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
-   dd->flags &= ~FLAGS_NEW_IV;
-   }
+   if ((dd->flags & FLAGS_CBC) && dd->req->info)
+   omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+
+   val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+   if (dd->flags & FLAGS_CBC)
+   val |= AES_REG_CTRL_CBC;
+   if (dd->flags & FLAGS_ENCRYPT)
+   val |= AES_REG_CTRL_DIRECTION;
 
mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
AES_REG_CTRL_KEY_SIZE;
 
-   omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
+   omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
 
-out:
/* start DMA or disable idle mode */
omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
AES_REG_MASK_START);
@@ -561,16 +542,12 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev 
*dd)
 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 {
struct ablkcipher_request *req = dd->req;
-   struct omap_aes_ctx *ctx;
 
pr_debug("err: %d\n", err);
 
dd->flags &= ~FLAGS_BUSY;
 
-   ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
-
-   if (req->base.complete)
-   req->base.complete(&req->base, err);
+   req->base.complete(&req->base, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -636,8 +613,6 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 
req = ablkcipher_request_cast(async_req);
 
-   pr_debug("get new req\n");
-
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
@@ -651,18 +626,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 
-   dd->iv = req->info;
-   if ((dd->flags & FLAGS_CBC) && dd->iv)
-   dd->flags |= FLAGS_NEW_IV;
-   else
-   dd->flags &= ~FLAGS_NEW_IV;
-
+   dd->ctx = ctx;
ctx->dd = dd;
-   if (dd->ctx != ctx) {
-   /* assign new context to device */
-   dd->ctx = ctx;
-   ctx->fla

[PATCHv2 2/6] omap-aes: redundant locking is removed

2010-11-30 Thread Dmitry Kasatkin
Submitting request involved double locking for enqueuing and
dequeuing. Now it is done under the same lock.

FLAGS_BUSY is now handled under the same lock.

Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |   70 
 1 files changed, 32 insertions(+), 38 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 41c91f3..2d8f72e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -78,7 +78,7 @@
 #define FLAGS_NEW_IV   BIT(5)
 #define FLAGS_INIT BIT(6)
 #define FLAGS_FAST BIT(7)
-#define FLAGS_BUSY 8
+#define FLAGS_BUSY BIT(8)
 
 struct omap_aes_ctx {
struct omap_aes_dev *dd;
@@ -179,9 +179,8 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 
offset, u32 bit)
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
-   int err = 0;
-
clk_enable(dd->iclk);
+
if (!(dd->flags & FLAGS_INIT)) {
/* is it necessary to reset before every operation? */
omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,18 +192,15 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
__asm__ __volatile__("nop");
__asm__ __volatile__("nop");
 
-   err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
-   AES_REG_SYSSTATUS_RESETDONE);
-   if (!err)
-   dd->flags |= FLAGS_INIT;
+   if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
+   AES_REG_SYSSTATUS_RESETDONE)) {
+   clk_disable(dd->iclk);
+   return -ETIMEDOUT;
+   }
+   dd->flags |= FLAGS_INIT;
}
 
-   return err;
-}
-
-static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
-{
-   clk_disable(dd->iclk);
+   return 0;
 }
 
 static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
@@ -538,6 +534,8 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, 
int err)
 
pr_debug("err: %d\n", err);
 
+   dd->flags &= ~FLAGS_BUSY;
+
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
 
if (!dd->total)
@@ -553,7 +551,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 
omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
 
-   omap_aes_hw_cleanup(dd);
+   clk_disable(dd->iclk);
 
omap_stop_dma(dd->dma_lch_in);
omap_stop_dma(dd->dma_lch_out);
@@ -580,22 +578,26 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev 
*dd)
return err;
 }
 
-static int omap_aes_handle_req(struct omap_aes_dev *dd)
+static int omap_aes_handle_req(struct omap_aes_dev *dd,
+  struct ablkcipher_request *req)
 {
struct crypto_async_request *async_req, *backlog;
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
-   struct ablkcipher_request *req;
unsigned long flags;
-
-   if (dd->total)
-   goto start;
+   int err = 0;
 
spin_lock_irqsave(&dd->lock, flags);
+   if (req)
+   err = ablkcipher_enqueue_request(&dd->queue, req);
+   if (dd->flags & FLAGS_BUSY) {
+   spin_unlock_irqrestore(&dd->lock, flags);
+   return err;
+   }
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
-   if (!async_req)
-   clear_bit(FLAGS_BUSY, &dd->flags);
+   if (async_req)
+   dd->flags |= FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags);
 
if (!async_req)
@@ -637,20 +639,23 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd)
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
pr_err("request size is not exact amount of AES blocks\n");
 
-start:
-   return omap_aes_crypt_dma_start(dd);
+   omap_aes_crypt_dma_start(dd);
+
+   return err;
 }
 
 static void omap_aes_task(unsigned long data)
 {
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-   int err;
 
pr_debug("enter\n");
 
-   err = omap_aes_crypt_dma_stop(dd);
+   omap_aes_crypt_dma_stop(dd);
 
-   err = omap_aes_handle_req(dd);
+   if (dd->total)
+   omap_aes_crypt_dma_start(dd);
+   else
+   omap_aes_handle_req(dd, NULL);
 
pr_debug("exit\n");
 }
@@ -661,8 +666,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, 
unsigned long mode)
crypto_ablkcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct omap_aes_dev *dd;
-   unsigned long flags;
-   int err;
 
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
  !!(mode & FLAGS_ENCRYPT),
@@ -674,16 +677,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, 
unsigned long mode)
 
rctx-

[PATCHv2 5/6] omap-aes: initialize aes module once per request

2010-11-30 Thread Dmitry Kasatkin
AES module was initialized for every DMA transaction.
That is redundant.
Now it is initialized once per request.

Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |   54 +++-
 1 files changed, 28 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 0b21dce..b69da4f 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -176,6 +176,11 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 
offset, u32 bit)
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
+   /*
+* clocks are enabled when request starts and disabled when finished.
+* It may be long delays between requests.
+* Device might go to off mode to save power.
+*/
clk_enable(dd->iclk);
 
if (!(dd->flags & FLAGS_INIT)) {
@@ -190,10 +195,9 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
__asm__ __volatile__("nop");
 
if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
-   AES_REG_SYSSTATUS_RESETDONE)) {
-   clk_disable(dd->iclk);
+   AES_REG_SYSSTATUS_RESETDONE))
return -ETIMEDOUT;
-   }
+
dd->flags |= FLAGS_INIT;
dd->err = 0;
}
@@ -243,9 +247,19 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 
omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
 
-   /* start DMA or disable idle mode */
-   omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
-   AES_REG_MASK_START);
+   /* IN */
+   omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+dd->phys_base + AES_REG_DATA, 0, 4);
+
+   omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+   omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
+   /* OUT */
+   omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+   dd->phys_base + AES_REG_DATA, 0, 4);
+
+   omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+   omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
 
return 0;
 }
@@ -419,7 +433,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct omap_aes_dev *dd = ctx->dd;
int len32;
-   int err;
 
pr_debug("len: %d\n", length);
 
@@ -432,12 +445,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
len32 = DIV_ROUND_UP(length, sizeof(u32));
 
/* IN */
-   omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
-dd->phys_base + AES_REG_DATA, 0, 4);
-
-   omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-   omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
 len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
OMAP_DMA_DST_SYNC);
@@ -446,12 +453,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
dma_addr_in, 0, 0);
 
/* OUT */
-   omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
-   dd->phys_base + AES_REG_DATA, 0, 4);
-
-   omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-   omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
 len32, 1, OMAP_DMA_SYNC_PACKET,
dd->dma_out, OMAP_DMA_SRC_SYNC);
@@ -459,13 +460,13 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
 dma_addr_out, 0, 0);
 
-   err = omap_aes_write_ctrl(dd);
-   if (err)
-   return err;
-
omap_start_dma(dd->dma_lch_in);
omap_start_dma(dd->dma_lch_out);
 
+   /* start DMA or disable idle mode */
+   omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
+   AES_REG_MASK_START);
+
return 0;
 }
 
@@ -545,6 +546,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, 
int err)
 
pr_debug("err: %d\n", err);
 
+   clk_disable(dd->iclk);
dd->flags &= ~FLAGS_BUSY;
 
req->base.complete(&req->base, err);
@@ -562,8 +564,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
omap_stop_dma(dd->dma_lch_in);
omap_stop_dma(dd->dma_lch_out);
 
-   clk_

[PATCHv2 6/6] omap-aes: checkpatch --file warning fixes

2010-11-30 Thread Dmitry Kasatkin
Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index b69da4f..add2a1a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -96,7 +96,7 @@ struct omap_aes_reqctx {
 struct omap_aes_dev {
struct list_headlist;
unsigned long   phys_base;
-   void __iomem*io_base;
+   void __iomem*io_base;
struct clk  *iclk;
struct omap_aes_ctx *ctx;
struct device   *dev;
@@ -759,7 +759,7 @@ static struct crypto_alg algs[] = {
.cra_flags  = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize  = AES_BLOCK_SIZE,
.cra_ctxsize= sizeof(struct omap_aes_ctx),
-   .cra_alignmask  = 0,
+   .cra_alignmask  = 0,
.cra_type   = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init   = omap_aes_cra_init,
@@ -779,7 +779,7 @@ static struct crypto_alg algs[] = {
.cra_flags  = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize  = AES_BLOCK_SIZE,
.cra_ctxsize= sizeof(struct omap_aes_ctx),
-   .cra_alignmask  = 0,
+   .cra_alignmask  = 0,
.cra_type   = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init   = omap_aes_cra_init,
-- 
1.7.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCHv2 1/6] omap-aes: DMA initialization fixes for OMAP off mode

2010-11-30 Thread Dmitry Kasatkin
DMA parameters for constant data were initialized during driver probe().
It seems that those settings sometimes are lost when devices goes to off mode.
This patch makes DMA initialization just before use.
It solves off mode problems.

Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |   24 
 1 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca51..41c91f3 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -339,18 +339,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
goto err_dma_out;
}
 
-   omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
-dd->phys_base + AES_REG_DATA, 0, 4);
-
-   omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-   omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
-   omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
-   dd->phys_base + AES_REG_DATA, 0, 4);
-
-   omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-   omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
return 0;
 
 err_dma_out:
@@ -443,6 +431,12 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
len32 = DIV_ROUND_UP(length, sizeof(u32));
 
/* IN */
+   omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+dd->phys_base + AES_REG_DATA, 0, 4);
+
+   omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+   omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
 len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
OMAP_DMA_DST_SYNC);
@@ -451,6 +445,12 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
dma_addr_in, 0, 0);
 
/* OUT */
+   omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+   dd->phys_base + AES_REG_DATA, 0, 4);
+
+   omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+   omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+
omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
 len32, 1, OMAP_DMA_SYNC_PACKET,
dd->dma_out, OMAP_DMA_SRC_SYNC);
-- 
1.7.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCHv2 3/6] omap-aes: error handling implementation improved

2010-11-30 Thread Dmitry Kasatkin
Previous version had not error handling.
Request could remain uncompleted.

Also in the case of DMA error, FLAGS_INIT is unset
and accelerator will be initialized again.

Buffer size allignment is checked.

Signed-off-by: Dmitry Kasatkin 
---
 drivers/crypto/omap-aes.c |  134 +++--
 1 files changed, 93 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 2d8f72e..704cc70 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -103,14 +103,16 @@ struct omap_aes_dev {
struct omap_aes_ctx *ctx;
struct device   *dev;
unsigned long   flags;
+   int err;
 
u32 *iv;
u32 ctrl;
 
-   spinlock_t  lock;
-   struct crypto_queue queue;
+   spinlock_t  lock;
+   struct crypto_queue queue;
 
-   struct tasklet_struct   task;
+   struct tasklet_struct   done_task;
+   struct tasklet_struct   queue_task;
 
struct ablkcipher_request   *req;
size_t  total;
@@ -198,24 +200,30 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
return -ETIMEDOUT;
}
dd->flags |= FLAGS_INIT;
+   dd->err = 0;
}
 
return 0;
 }
 
-static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
+static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
unsigned int key32;
-   int i;
+   int i, err, init = dd->flags & FLAGS_INIT;
u32 val, mask;
 
+   err = omap_aes_hw_init(dd);
+   if (err)
+   return err;
+
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
 
-   if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
+   /* check if hw state & mode have not changed */
+   if (init && dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
   !(dd->ctx->flags & FLAGS_NEW_KEY))
goto out;
 
@@ -257,6 +265,8 @@ out:
/* start DMA or disable idle mode */
omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
AES_REG_MASK_START);
+
+   return 0;
 }
 
 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -284,8 +294,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, 
void *data)
 {
struct omap_aes_dev *dd = data;
 
-   if (lch == dd->dma_lch_out)
-   tasklet_schedule(&dd->task);
+   if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+   pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
+   dd->err = -EIO;
+   dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+   } else if (lch == dd->dma_lch_in) {
+   return;
+   }
+
+   /* dma_lch_out - completed */
+   tasklet_schedule(&dd->done_task);
 }
 
 static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -390,6 +408,11 @@ static int sg_copy(struct scatterlist **sg, size_t 
*offset, void *buf,
if (!count)
return off;
 
+   /*
+* buflen and total are AES_BLOCK_SIZE size aligned,
+* so count should be also aligned
+*/
+
sg_copy_buf(buf + off, *sg, *offset, count, out);
 
off += count;
@@ -415,6 +438,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct omap_aes_dev *dd = ctx->dd;
int len32;
+   int err;
 
pr_debug("len: %d\n", length);
 
@@ -454,11 +478,13 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 
dma_addr_t dma_addr_in,
omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
 dma_addr_out, 0, 0);
 
+   err = omap_aes_write_ctrl(dd);
+   if (err)
+   return err;
+
omap_start_dma(dd->dma_lch_in);
omap_start_dma(dd->dma_lch_out);
 
-   omap_aes_write_ctrl(dd);
-
return 0;
 }
 
@@ -484,8 +510,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev 
*dd)
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
 
-   if (count != dd->total)
+   if (count != dd->total) {
+   pr_err("request length != buffer length\n");
return -EINVAL;
+   }
 
pr_debug("fast\n");
 
@@ -521,25 +549,28 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev 
*dd)
 
dd->total -= count;
 
-   err = omap_aes_hw_init(dd);
-
err = omap_

[PATCHv2 0/6] omap-aes off mode and error handling fixes

2010-11-30 Thread Dmitry Kasatkin
Changes to v1:
- omap type specific handling removed
- fixed backlog handling bug in "redundunt locking is removed"
- aes module initialized once per request instead of dma transaction
  more efficient and right way to do it
- checkpatch fixes in separate patch


Dmitry Kasatkin (6):
  omap-aes: DMA initialization fixes for OMAP off mode
  omap-aes: redundant locking is removed
  omap-aes: error handling implementation improved
  omap-aes: unnecessary code removed
  omap-aes: initialize aes module once per request
  omap-aes: checkpatch --file warning fixes

 drivers/crypto/omap-aes.c |  260 +++-
 1 files changed, 136 insertions(+), 124 deletions(-)

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html