[PATCH] crypto: ccp: Check for NULL PSP pointer at module unload

2018-07-26 Thread Tom Lendacky
Should the PSP initialization fail, the PSP data structure will be
freed and the value contained in the sp_device struct set to NULL.
At module unload, psp_dev_destroy() does not check if the pointer
value is NULL and will end up dereferencing a NULL pointer.

Add a pointer check of the psp_data field in the sp_device struct
in psp_dev_destroy() and return immediately if it is NULL.

Cc:  # 4.16.x-
Fixes: 2a6170dfe755 ("crypto: ccp: Add Platform Security Processor (PSP) device 
support")
Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/psp-dev.c |3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 9b59638..218739b 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -858,6 +858,9 @@ void psp_dev_destroy(struct sp_device *sp)
 {
struct psp_device *psp = sp->psp_data;
 
+   if (!psp)
+   return;
+
if (psp->sev_misc)
kref_put(_dev->refcount, sev_exit);
 



[PATCH v1 5/5] crypto: ccp: Add support for new CCP/PSP device ID

2018-07-03 Thread Tom Lendacky
Add a new CCP/PSP PCI device ID and new PSP register offsets.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/sp-pci.c |   29 -
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 78c1e9d..7da93e9 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,7 +269,7 @@ static int sp_pci_resume(struct pci_dev *pdev)
 #endif
 
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
+static const struct psp_vdata pspv1 = {
.cmdresp_reg= 0x10580,
.cmdbuff_addr_lo_reg= 0x105e0,
.cmdbuff_addr_hi_reg= 0x105e4,
@@ -277,35 +277,54 @@ static int sp_pci_resume(struct pci_dev *pdev)
.inten_reg  = 0x10610,
.intsts_reg = 0x10614,
 };
+
+static const struct psp_vdata pspv2 = {
+   .cmdresp_reg= 0x10980,
+   .cmdbuff_addr_lo_reg= 0x109e0,
+   .cmdbuff_addr_hi_reg= 0x109e4,
+   .feature_reg= 0x109fc,
+   .inten_reg  = 0x10690,
+   .intsts_reg = 0x10694,
+};
 #endif
 
 static const struct sp_dev_vdata dev_vdata[] = {
-   {
+   {   /* 0 */
.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = ,
 #endif
},
-   {
+   {   /* 1 */
.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = ,
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
-   .psp_vdata = _entry
+   .psp_vdata = ,
 #endif
},
-   {
+   {   /* 2 */
.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = ,
 #endif
},
+   {   /* 3 */
+   .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+   .ccp_vdata = ,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+   .psp_vdata = ,
+#endif
+   },
 };
 static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)_vdata[2] },
+   { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)_vdata[3] },
/* Last entry must be zero */
{ 0, }
 };



[PATCH v1 4/5] crypto: ccp: Support register differences between PSP devices

2018-07-03 Thread Tom Lendacky
In preparation for adding a new PSP device ID that uses different register
offsets, add support to the PSP version data for register offset values.
And then update the code to use these new register offset values.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/psp-dev.c |   24 
 drivers/crypto/ccp/psp-dev.h |9 -
 drivers/crypto/ccp/sp-dev.h  |7 ++-
 drivers/crypto/ccp/sp-pci.c  |7 ++-
 4 files changed, 24 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 875756d..9b59638 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
int reg;
 
/* Read the interrupt status: */
-   status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+   status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
 
/* Check if it is command completion: */
if (!(status & PSP_CMD_COMPLETE))
goto done;
 
/* Check if it is SEV command completion: */
-   reg = ioread32(psp->io_regs + PSP_CMDRESP);
+   reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
psp->sev_int_rcvd = 1;
wake_up(>sev_int_queue);
@@ -77,7 +77,7 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
 
 done:
/* Clear the interrupt status by writing the same value we read. */
-   iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+   iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
 
return IRQ_HANDLED;
 }
@@ -85,7 +85,7 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
 static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
 {
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
-   *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+   *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
 }
 
 static int sev_cmd_buffer_len(int cmd)
@@ -143,15 +143,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int 
*psp_ret)
print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
 sev_cmd_buffer_len(cmd), false);
 
-   iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
-   iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+   iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+   iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
 
psp->sev_int_rcvd = 0;
 
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;
-   iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+   iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
 
/* wait for command completion */
sev_wait_cmd_ioc(psp, );
@@ -789,7 +789,7 @@ static int sev_misc_init(struct psp_device *psp)
 static int sev_init(struct psp_device *psp)
 {
/* Check if device supports SEV feature */
-   if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+   if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
dev_dbg(psp->dev, "device does not support SEV\n");
return 1;
}
@@ -817,11 +817,11 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
 
-   psp->io_regs = sp->io_map + psp->vdata->offset;
+   psp->io_regs = sp->io_map;
 
/* Disable and clear interrupts until ready */
-   iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
-   iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+   iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+   iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
 
/* Request an irq */
ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,7 @@ int psp_dev_init(struct sp_device *sp)
sp->set_psp_master_device(sp);
 
/* Enable interrupt */
-   iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+   iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
 
dev_notice(dev, "psp enabled\n");
 
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 5d46a2b..8b53a96 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,17 +30,8 @@
 
 #include "sp-dev.h"
 
-#define PSP_C2PMSG(_num)   ((_num) << 2)
-#define PSP_CMDRESPPSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LOPSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
-#define PSP_FEATURE_REGPSP_C2PMSG(63)
-
 #define PSP_CMD_COMPLETE   BIT(1)
 
-#define PSP_P2CMSG_INTEN  

[PATCH v1 3/5] crypto: ccp: Remove unused #defines

2018-07-03 Thread Tom Lendacky
Remove some unused #defines for register offsets that are not used. This
will lessen the changes required when register offsets change between
versions of the device.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/psp-dev.c |2 +-
 drivers/crypto/ccp/psp-dev.h |   10 +-
 2 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 91ef6ed..875756d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -65,7 +65,7 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
 
/* Check if it is command completion: */
-   if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+   if (!(status & PSP_CMD_COMPLETE))
goto done;
 
/* Check if it is SEV command completion: */
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a..5d46a2b 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -36,19 +36,11 @@
 #define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
 #define PSP_FEATURE_REGPSP_C2PMSG(63)
 
-#define PSP_P2CMSG(_num)   ((_num) << 2)
-#define PSP_CMD_COMPLETE_REG   1
-#define PSP_CMD_COMPLETE   PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
+#define PSP_CMD_COMPLETE   BIT(1)
 
 #define PSP_P2CMSG_INTEN   0x0110
 #define PSP_P2CMSG_INTSTS  0x0114
 
-#define PSP_C2PMSG_ATTR_0  0x0118
-#define PSP_C2PMSG_ATTR_1  0x011c
-#define PSP_C2PMSG_ATTR_2  0x0120
-#define PSP_C2PMSG_ATTR_3  0x0124
-#define PSP_P2CMSG_ATTR_0  0x0128
-
 #define PSP_CMDRESP_CMD_SHIFT  16
 #define PSP_CMDRESP_IOCBIT(0)
 #define PSP_CMDRESP_RESP   BIT(31)



[PATCH v1 1/5] crypto: ccp: Fix command completion detection race

2018-07-03 Thread Tom Lendacky
The wait_event() function is used to detect command completion.  The
interrupt handler will set the wait condition variable when the interrupt
is triggered.  However, the variable used for wait_event() is initialized
after the command has been submitted, which can create a race condition
with the interrupt handler and result in the wait_event() never returning.
Move the initialization of the wait condition variable to just before
command submission.

Fixes: 200664d5237f ("crypto: ccp: Add Secure Encrypted Virtualization (SEV) 
command support")
Cc:  # 4.16.x-
Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/psp-dev.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d8..973d683 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -84,8 +84,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
 
 static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
 {
-   psp->sev_int_rcvd = 0;
-
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
*reg = ioread32(psp->io_regs + PSP_CMDRESP);
 }
@@ -148,6 +146,8 @@ static int __sev_do_cmd_locked(int cmd, void *data, int 
*psp_ret)
iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
 
+   psp->sev_int_rcvd = 0;
+
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;



[PATCH v1 2/5] crypto: ccp: Add psp enabled message when initialization succeeds

2018-07-03 Thread Tom Lendacky
Add a dev_notice() message to the PSP initialization to report when the
PSP initialization has succeeded and the PSP is enabled.

Signed-off-by: Tom Lendacky 
---
 drivers/crypto/ccp/psp-dev.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 973d683..91ef6ed 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -840,6 +840,8 @@ int psp_dev_init(struct sp_device *sp)
/* Enable interrupt */
iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
 
+   dev_notice(dev, "psp enabled\n");
+
return 0;
 
 e_irq:



[PATCH v1 0/5] crypto: ccp - Driver updates 2018-07-03

2018-07-03 Thread Tom Lendacky
The following patches for the PSP support within the CCP driver are
included in this driver update series:

- Fix a possible race condition when checking for command completion
- Add a message to indicate if the PSP function has been enabled
- In preparation for moving register offsets into the version data,
  remove unused register definitions
- Add support for putting register offsets in the version data. This
  is needed as a new device (to be added in the next patch) has
  different offsets for the registers.
- Add support for a new CCP/PSP device ID
 
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (5):
  crypto: ccp: Fix command completion detection race
  crypto: ccp: Add psp enabled message when initialization succeeds
  crypto: ccp: Remove unused #defines
  crypto: ccp: Support register differences between PSP devices
  crypto: ccp: Add support for new CCP/PSP device ID


 drivers/crypto/ccp/psp-dev.c |   32 +---
 drivers/crypto/ccp/psp-dev.h |   19 +--
 drivers/crypto/ccp/sp-dev.h  |7 ++-
 drivers/crypto/ccp/sp-pci.c  |   36 ++--
 4 files changed, 54 insertions(+), 40 deletions(-)

-- 
Tom Lendacky


Re: [PATCH] crypto: hash.h: Prevent use of req->digest in ahash update

2018-03-06 Thread Tom Lendacky
On 3/6/2018 5:45 AM, Kamil Konieczny wrote:
> Prevent improper use of req->digest field in ahash update, init, export and

Shouldn't that be req->result (here and below)?

Thanks,
Tom

> import functions in drivers code. A driver should use ahash request context
> if it needs to save internal state.
> 
> Signed-off-by: Kamil Konieczny 
> ---
>  include/crypto/hash.h | 11 +++
>  1 file changed, 7 insertions(+), 4 deletions(-)
> 
> diff --git a/include/crypto/hash.h b/include/crypto/hash.h
> index 2d1849dffb80..e97c2e662d6a 100644
> --- a/include/crypto/hash.h
> +++ b/include/crypto/hash.h
> @@ -74,7 +74,8 @@ struct ahash_request {
>   * @init: **[mandatory]** Initialize the transformation context. Intended 
> only to initialize the
>   * state of the HASH transformation at the beginning. This shall fill in
>   * the internal structures used during the entire duration of the whole
> - * transformation. No data processing happens at this point.
> + * transformation. No data processing happens at this point. Driver code
> + * implementation must not use req->digest.
>   * @update: **[mandatory]** Push a chunk of data into the driver for 
> transformation. This
>   *  function actually pushes blocks of data from upper layers into the
>   *  driver, which then passes those to the hardware as seen fit. This
> @@ -83,7 +84,8 @@ struct ahash_request {
>   *  transformation. This function shall not modify the transformation
>   *  context, as this function may be called in parallel with the same
>   *  transformation object. Data processing can happen synchronously
> - *  [SHASH] or asynchronously [AHASH] at this point.
> + *  [SHASH] or asynchronously [AHASH] at this point. Driver must not use
> + *  req->digest.
>   * @final: **[mandatory]** Retrieve result from the driver. This function 
> finalizes the
>   *  transformation and retrieves the resulting hash from the driver and
>   *  pushes it back to upper layers. No data processing happens at this
> @@ -120,11 +122,12 @@ struct ahash_request {
>   *   you want to save partial result of the transformation after
>   *   processing certain amount of data and reload this partial result
>   *   multiple times later on for multiple re-use. No data processing
> - *   happens at this point.
> + *   happens at this point. Driver must not use req->digest.
>   * @import: Import partial state of the transformation. This function loads 
> the
>   *   entire state of the ongoing transformation from a provided block of
>   *   data so the transformation can continue from this point onward. No
> - *   data processing happens at this point.
> + *   data processing happens at this point. Driver must not use
> + *   req->digest.
>   * @halg: see struct hash_alg_common
>   */
>  struct ahash_alg {
> 


Re: Why are we testing an intermediate result in ahash?

2018-03-05 Thread Tom Lendacky
On 3/5/2018 12:31 PM, Kamil Konieczny wrote:
> 
> 
> On 05.03.2018 18:47, Gary R Hook wrote:
>> On 03/05/2018 03:57 AM, Kamil Konieczny wrote:
>>>
>>>
>>> On 02.03.2018 22:11, Gary R Hook wrote:
 Commit 466d7b9f6 (cryptodev-2.6) added code to testmgr to populate, for 
 async hash operations,
 the result buffer with a known value and to test the buffer against that 
 value at intermediate
 steps. If the result buffer changes the operation is failed.

 My question is: why?

 What problem does this solve? Has this requirement existed all along, or 
 is it new?

 I'm now seeing complaints for AES/CMAC and SHA in my driver. I have no 
 problem updating the driver,
 of course, but I'd like to better understand the precipitating issue for 
 the commit.

 Mar  2 12:30:56 sosxen2 kernel: [   60.919198] alg: No test for cfb(aes) 
 (cfb-aes-ccp)
 Mar  2 12:30:56 sosxen2 kernel: [   60.924787] 367: alg: hash: update 
 failed on test 3 for cmac-aes-ccp: used req->result
 Mar  2 12:30:56 sosxen2 kernel: [   60.946571] 367: alg: hash: update 
 failed on test 4 for sha1-ccp: used req->result
 Mar  2 12:30:56 sosxen2 kernel: [   60.956461] 367: alg: hash: update 
 failed on test 1 for hmac-sha1-ccp: used req->result
 Mar  2 12:30:56 sosxen2 kernel: [   60.966117] 367: alg: hash: update 
 failed on test 4 for sha224-ccp: used req->result
>>>
>>> ahash req->result can be used in digit, final and finup hash operations.
>>> It should not be used in init and update (nor in export and import).
>>
>> Where is this documented, please? I'm not seeing it in Documentation/crypto. 
>> Of course, I could be looking for the wrong thing.
> 
> It was recent addition, and you are right, the doc needs update.
> 
>>> There were some bugs in past, when drivers try to use req->result
>>> as theirs temporary storage.
>>> The bug comes up in some scenarios when caller reused ahash request
>>> and leaves in req->result undefined value, it can be NULL or 
>>> container_of(NULL)
>>> or whatever was on stack
>>>
>>
>> As I mention in my other post, our driver vets the pointer before 
>> dereference. 
> 
> Problem will not happen with NULL, but only because there is code like 
> (pseudocode)
> in ahash_update:
> 
> 1: if (req->result == NULL)
> 2:   // use as temp memory ahash request context
> 3: else
> 4:  // use as temp memory req->result
> 
> The point is - if we need temporary storage for keeping some state between 
> updates,

Maybe other drivers are using req->result for temporary storage, but the
CCP driver isn't using this as temporary storage.  It was assumed that if
req->result is non-zero then the caller wanted the intermediate result of
the hash operation.  In that case, the hash value up to that point is
copied to the caller's buffer.

> we should use ahash request context, and get rid of the code lines 1,3,4

Which the CCP driver does.  All storage needed by the driver is part of
the request context.

> 
> The line 4 can bite us when req->result will be neither NULL nor valid memory 
> pointer.

That sounds like a caller problem to me.  Similar to someone allocating
memory and passing it to a function without clearing it.  If the caller
has left garbage in it, it's the caller's issue.

> The second argument is, why we bother to check with 1: when we can should 
> already do 2: only

As mentioned above, the driver already is doing the proper thing by
allocating space in the request context.  It isn't using req->result as
temporary storage, just returning the intermediate result of the hash
operation back to the caller.

If the requirements are now to only use req->result for a final operation,
that's ok, and it's simple enough for the CCP driver to be updated to do
that.

In the future, I would expect that any change that can impact a driver
like this (testmgr failures where there were previously none) should be
better communicated to the crypto driver maintainers so they can be a bit
more proactive in making any required changes.

Thanks,
Tom

> 
>> And I don't have a problem with a clear definition of what should and should 
>> not happen to 
>> buffers offered by a caller. I simply want to know where this behavior is 
>> defined, and is it a change from the past?
> 
> This come up after some code review.
> 


Re: [PATCH] crypto/ccp: don't disable interrupts while setting up debugfs

2018-02-26 Thread Tom Lendacky
On 2/25/2018 8:04 PM, Hook, Gary wrote:
> On 2/23/2018 5:33 PM, Sebastian Andrzej Siewior wrote:
>> I don't why we need take a single write lock and disable interrupts
>> while setting up debugfs. This is what what happens when we try anyway:
> 
> There is more than one CCP on some processors. The lock is intended to
> serialize attempts to initialize the new directory, but a R/W lock isn't
> required.
> 
> My testing on  an EPYC (8 CCPs) didn't expose this problem. May I ask what
> hardware you used here?

Probably not a hardware issue as opposed to a kernel configuration. Try
using CONFIG_DEBUG_ATOMIC_SLEEP and see if you can recreate.  And if irqs
are disabled, then you're probably looking at having to use a spinlock to
serialize creation of the directory.

Thanks,
Tom

> 
>> |ccp :03:00.2: enabling device ( -> 0002)
>> |BUG: sleeping function called from invalid context at
>> kernel/locking/rwsem.c:69
>> |in_atomic(): 1, irqs_disabled(): 1, pid: 3, name: kworker/0:0
>> |irq event stamp: 17150
>> |hardirqs last  enabled at (17149): [<97a18c49>]
>> restore_regs_and_return_to_kernel+0x0/0x23
>> |hardirqs last disabled at (17150): [<0773b3a9>]
>> _raw_write_lock_irqsave+0x1b/0x50
>> |softirqs last  enabled at (17148): [<64d56155>]
>> __do_softirq+0x3b8/0x4c1
>> |softirqs last disabled at (17125): [<92633c18>] irq_exit+0xb1/0xc0
>> |CPU: 0 PID: 3 Comm: kworker/0:0 Not tainted 4.16.0-rc2+ #30
>> |Workqueue: events work_for_cpu_fn
>> |Call Trace:
>> | dump_stack+0x7d/0xb6
>> | ___might_sleep+0x1eb/0x250
>> | down_write+0x17/0x60
>> | start_creating+0x4c/0xe0
>> | debugfs_create_dir+0x9/0x100
>> | ccp5_debugfs_setup+0x191/0x1b0
>> | ccp5_init+0x8a7/0x8c0
>> | ccp_dev_init+0xb8/0xe0
>> | sp_init+0x6c/0x90
>> | sp_pci_probe+0x26e/0x590
>> | local_pci_probe+0x3f/0x90
>> | work_for_cpu_fn+0x11/0x20
>> | process_one_work+0x1ff/0x650
>> | worker_thread+0x1d4/0x3a0
>> | kthread+0xfe/0x130
>> | ret_from_fork+0x27/0x50
>>
>> If any locking is required, a simple mutex will do it.
>>
>> Cc: Gary R Hook 
>> Signed-off-by: Sebastian Andrzej Siewior 
>> ---
>>   drivers/crypto/ccp/ccp-debugfs.c | 7 +++
>>   1 file changed, 3 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/crypto/ccp/ccp-debugfs.c
>> b/drivers/crypto/ccp/ccp-debugfs.c
>> index 59d4ca4e72d8..1a734bd2070a 100644
>> --- a/drivers/crypto/ccp/ccp-debugfs.c
>> +++ b/drivers/crypto/ccp/ccp-debugfs.c
>> @@ -278,7 +278,7 @@ static const struct file_operations
>> ccp_debugfs_stats_ops = {
>>   };
>>     static struct dentry *ccp_debugfs_dir;
>> -static DEFINE_RWLOCK(ccp_debugfs_lock);
>> +static DEFINE_MUTEX(ccp_debugfs_lock);
>>     #define    MAX_NAME_LEN    20
>>   @@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
>>   struct dentry *debugfs_stats;
>>   struct dentry *debugfs_q_instance;
>>   struct dentry *debugfs_q_stats;
>> -    unsigned long flags;
>>   int i;
>>     if (!debugfs_initialized())
>>   return;
>>   -    write_lock_irqsave(_debugfs_lock, flags);
>> +    mutex_lock(_debugfs_lock);
>>   if (!ccp_debugfs_dir)
>>   ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
>> -    write_unlock_irqrestore(_debugfs_lock, flags);
>> +    mutex_unlock(_debugfs_lock);
>>   if (!ccp_debugfs_dir)
>>   return;
>>  
> 


Re: [Part2 PATCH v5.1 12.1/31] crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support

2017-10-10 Thread Tom Lendacky

On 10/10/2017 10:00 AM, Brijesh Singh wrote:



On 10/09/2017 10:21 AM, Borislav Petkov wrote:
...




03:00.1 Encryption controller: Advanced Micro Devices, Inc. [AMD] Device
1468
13:00.2 Encryption controller: Advanced Micro Devices, Inc. [AMD] Device
1456


Btw, what do those PCI functions each do? Public PPR doesn't have them
documented.



Looking at the pci_device_id table (sp-pci.c), the devices id 0x1468 
provides the support CCP support directly on the x86-side and device id 
0x1456 provides the support for both CCP and PSP features through the AMD 
Secure Processor (AMD-SP).





Sure, and if you manage all the devices in a single driver, you can
simply keep them all in a linked list or in an array and iterating over
them is trivial.

Because right now you have

1. sp-pci.c::sp_pci_probe() execute upon the PCI device detection

2. at some point, it does sp-dev.c::sp_init() which decides whether CCP 
or PSP


3. If PSP, it calls pcp-dev.c::psp_dev_init() which gets that
sp->dev_vdata->psp_vdata which is nothing more than a simple offset
0x10500 which is where the PSP io regs are. For example, if this offset
is hardcoded, why are we even passing that vdata? Just set psp->io_regs =
0x10500. No need for all that passing of structs around.


Maybe for the very first implementation we could do that and that was what
was originally done for the CCP.  But as you can see the CCP does not have
a set register offset between various iterations of the device and it can
be expected the same will hold true for the PSP.  This just makes future
changes easier in order to support newer devices.



4. and finally, after that *whole* init has been done, you get to do
->set_psp_master_device(sp);

Or, you can save yourself all that jumping through hoops, merge sp-pci.c
and sp-dev.c into a single sp.c and put *everything* sp-related into
it. And then do the whole work of picking hw apart, detection and
configuration in sp_pci_probe() and have helper functions preparing and
configuring the device.

At the end, it adds it to the list of devices sp.c manages and done. You
actually have that list already:

static LIST_HEAD(sp_units);

in sp-dev.c.

You don't need the set_master thing either - you simply set the
sp_dev_master pointer inside sp.c




I was trying to avoid putting PSP/SEV specific changes in sp-dev.* files. 
But if sp.c approach is acceptable to the maintainer then I can work 
towards merging sp-dev.c and sp-pci.c into sp.c and then add the PSP/SEV 
support.


I would prefer to keep things separated as they are.  The common code is
one place and the pci/platform specific code resides in unique files. For
sp-pci.c, it can be excluded from compilation if CONFIG_PCI is not defined
vs. adding #ifdefs into sp-dev.c or sp.c.  The code is working nicely and,
at least to me, seems easily maintainable this way.  If we really want to
avoid the extra calls during probing, etc. then we can take a closer look
afterwards and determine what is the best approach taking into account
the CCP and some of the other PSP functionality that is coming.

Thanks,
Tom





sp_init() can then go and you can replace it with its function body,
deciding whether it is a CCP or PSP and then call the respective
function which is also in sp.c or ccp-dev.c

And then all those separate compilation units and the interfaces between
them disappear - you have only the calls into the PSP and that's it.

Btw, the CCP thing could remain separate initially, I guess, with all
that ccp-* stuff in there.



Yep, if we decide to go with your recommended approach then we should 
leave the CCP as-is for now.




I was trying to follow the CCP  model -- in which sp-dev.c simply
forwards the call to ccp-dev.c which does the real work.


And you don't really need that - you can do the real work directly in
sp-dev.c or sp.c or whatever.
 >> Currently, sev-dev.c contains barebone common code. IMO, keeping all

the PSP private functions and data structure outside the sp-dev.c/.h
is right thing.


By this model probably, but it causes all that init and registration
jump-through-hoops for no real reason. It is basically wasting cycles
and energy.

I'm all for splitting if it makes sense. But right now I don't see much
sense in this - it is basically a bunch of small compilation units
calling each other. And they could be merged into a single sp.c which
does it all in one go, without a lot of blabla.



Additionally, I would like to highlight that if we decide to go with
moving all the PSP functionality in sp-dev.c then we have to add #ifdef
CONFIG_CRYPTO_DEV_SP_PSP because PSP feature depends on X86_66, whereas
the sp-dev.c gets compiled for all architectures (including aarch64,
i386 and x86_64).


That's fine. You can build it on 32-bit but add to the init function

if (IS_ENABLED(CONFIG_X86_32))
    return -ENODEV;

and be done with it. No need for the ifdeffery.



OK, i will use IS_ENABLED where applicable.


Re: [PATCH v2 3/4] crypto: ccp - Rework the unit-size check for XTS-AES

2017-07-24 Thread Tom Lendacky

On 7/21/2017 2:05 PM, Gary R Hook wrote:

The CCP supports a limited set of unit-size values. Change the check
for this parameter such that acceptable values match the enumeration.
Then clarify the conditions under which we must use the fallback
implementation.

Signed-off-by: Gary R Hook <gary.h...@amd.com>
---
  drivers/crypto/ccp/ccp-crypto-aes-xts.c |   75 ++-
  1 file changed, 35 insertions(+), 40 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 4a313f62dbea..3c37794ffe2d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,8 +1,9 @@
  /*
   * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
   *
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
   *
+ * Author: Gary R Hook <gary.h...@amd.com>
   * Author: Tom Lendacky <thomas.lenda...@amd.com>
   *
   * This program is free software; you can redistribute it and/or modify
@@ -38,46 +39,26 @@ struct ccp_unit_size_map {
u32 value;
  };
  
-static struct ccp_unit_size_map unit_size_map[] = {

+static struct ccp_unit_size_map xts_unit_sizes[] = {
{
-   .size   = 4096,
-   .value  = CCP_XTS_AES_UNIT_SIZE_4096,
-   },
-   {
-   .size   = 2048,
-   .value  = CCP_XTS_AES_UNIT_SIZE_2048,
-   },
-   {
-   .size   = 1024,
-   .value  = CCP_XTS_AES_UNIT_SIZE_1024,
+   .size   = 16,
+   .value  = CCP_XTS_AES_UNIT_SIZE_16,
},
{
-   .size   = 512,
+   .size   = 512,
.value  = CCP_XTS_AES_UNIT_SIZE_512,
},
{
-   .size   = 256,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 128,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 64,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 32,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   .size   = 1024,
+   .value  = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
-   .size   = 16,
-   .value  = CCP_XTS_AES_UNIT_SIZE_16,
+   .size   = 2048,
+   .value  = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
-   .size   = 1,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   .size   = 4096,
+   .value  = CCP_XTS_AES_UNIT_SIZE_4096,
},
  };
  
@@ -124,7 +105,9 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,

  {
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+   unsigned int fallback = 0;
unsigned int unit;
+   u32 block_size;


I don't see this variable used anywhere. It should be deleted.


u32 unit_size;
int ret;
  
@@ -137,18 +120,30 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,

if (!req->info)
return -EINVAL;
  
+	/* Check conditions under which the CCP can fulfill a request. The

+* device can handle input plaintext of a length that is a multiple
+* of the unit_size, bug the crypto implementation only supports
+* the unit_size being equal to the input length. This limits the
+* number of scenarios we can handle. Also validate the key length.


Remove the "Also validate the key length." since that happens below and
is covered by a different comment.


+*/
+   block_size = 0;
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
-   if (req->nbytes <= unit_size_map[0].size) {
-   for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
-   if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
-   unit_size = unit_size_map[unit].value;
-   break;
-   }
+   for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
+   if (req->nbytes == xts_unit_sizes[unit].size) {
+   unit_size = unit;
+   block_size = xts_unit_sizes[unit].size;
+   break;
}
}
-
-   if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
-   (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+   /* The CCP has restrictions on block sizes. Also, a version 3 device
+* only supports AES-128 operations; version 5 CCPs support both
+* AES-128 and -256 operations.


The 256-bit XTS support isn't here yet, so shouldn't mention it now.


+*/
+   if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
+   fallback = 1;
+   if (ctx->

Re: [PATCH v2 2/4] crypto: ccp - Enable XTS-AES-128 support on all CCPs

2017-07-24 Thread Tom Lendacky

On 7/21/2017 2:04 PM, Gary R Hook wrote:

Version 5 CCPs have some new requirements for XTS-AES: the type field
must be specified, and the key requires 512 bits, with each part
occupying 256 bits and padded with zeroes.


This appears to be a fix and not a feature. You need to send this as
a separate patch through the fix process and back through to the stable
releases.



Signed-off-by: Gary R Hook 
---
  drivers/crypto/ccp/ccp-dev-v5.c |2 ++
  drivers/crypto/ccp/ccp-dev.h|2 ++
  drivers/crypto/ccp/ccp-ops.c|   52 ---
  3 files changed, 47 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b3526336d608..0fb4519c5194 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -145,6 +145,7 @@ union ccp_function {
  #define   CCP_AES_MODE(p) ((p)->aes.mode)
  #define   CCP_AES_TYPE(p) ((p)->aes.type)
  #define   CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#defineCCP_XTS_TYPE(p) ((p)->aes_xts.type)
  #define   CCP_XTS_ENCRYPT(p)  ((p)->aes_xts.encrypt)
  #define   CCP_DES3_SIZE(p)((p)->des3.size)
  #define   CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
@@ -346,6 +347,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
function.raw = 0;
CCP_XTS_ENCRYPT() = op->u.xts.action;
CCP_XTS_SIZE() = op->u.xts.unit_size;
+   CCP_XTS_TYPE() = op->u.xts.type;
CCP5_CMD_FUNCTION() = function.raw;
  
  	CCP5_CMD_LEN() = op->src.u.dma.length;

diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 9320931d89da..3d51180199ac 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -194,6 +194,7 @@
  #define CCP_AES_CTX_SB_COUNT  1
  
  #define CCP_XTS_AES_KEY_SB_COUNT	1

+#define CCP5_XTS_AES_KEY_SB_COUNT  2
  #define CCP_XTS_AES_CTX_SB_COUNT  1
  
  #define CCP_DES3_KEY_SB_COUNT		1

@@ -497,6 +498,7 @@ struct ccp_aes_op {
  };
  
  struct ccp_xts_aes_op {

+   enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
  };
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e23d138fc1ce..8113355151d2 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue 
*cmd_q,
struct ccp_op op;
unsigned int unit_size, dm_offset;
bool in_place = false;
+   unsigned int sb_count = 0;


No need to initialize to zero here.


+   enum ccp_aes_type aestype;
int ret;
  
  	switch (xts->unit_size) {

@@ -1061,9 +1063,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue 
*cmd_q,
return -EINVAL;
}
  
-	if (xts->key_len != AES_KEYSIZE_128)

+   if (xts->key_len == AES_KEYSIZE_128)
+   aestype = CCP_AES_TYPE_128;
+   else
return -EINVAL;
  
+


Remove extra blank line.


if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
  
@@ -1086,20 +1091,47 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,

op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
  
-	/* All supported key sizes fit in a single (32-byte) SB entry

-* and must be in little endian format. Use the 256-bit byte
-* swap passthru option to convert from big endian to little
-* endian.
+   /* A version 3 device only supports 128-bit keys, which fits into a
+* single SB entry. A version 5 device uses a 512-bit vector, so two
+* SB entries.
 */
+   if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+   sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+   else
+   sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
ret = ccp_init_dm_workarea(, cmd_q,
-  CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+  sb_count * CCP_SB_BYTES,
   DMA_TO_DEVICE);
if (ret)
return ret;
  
-	dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;

-   ccp_set_dm_area(, dm_offset, xts->key, 0, xts->key_len);
-   ccp_set_dm_area(, 0, xts->key, dm_offset, xts->key_len);
+   if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+   /* All supported key sizes must be in little endian format.
+* Use the 256-bit byte swap passthru option to convert from
+* big endian to little endian.
+*/
+   dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+   ccp_set_dm_area(, dm_offset, xts->key, 0, xts->key_len);
+   ccp_set_dm_area(, 0, xts->key, xts->key_len, xts->key_len);
+   } else {
+   /* The AES key is at the little end and the tweak key is
+   

Re: [PATCH] crypto: ccp - Fix XTS-AES support on a version 5 CCP

2017-07-17 Thread Tom Lendacky

On 7/17/2017 3:08 PM, Gary R Hook wrote:

Version 5 CCPs have differing requirements for XTS-AES: key components
are stored in a 512-bit vector. The context must be little-endian
justified. AES-256 is supported now, so propagate the cipher size to
the command descriptor.

Signed-off-by: Gary R Hook <gary.h...@amd.com>
---
  drivers/crypto/ccp/ccp-crypto-aes-xts.c |   79 ---
  drivers/crypto/ccp/ccp-dev-v5.c |2 +
  drivers/crypto/ccp/ccp-dev.h|2 +
  drivers/crypto/ccp/ccp-ops.c|   56 ++
  4 files changed, 89 insertions(+), 50 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 58a4244b4752..8d248b198e22 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,7 +1,7 @@
  /*
   * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
   *
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
   *
   * Author: Tom Lendacky <thomas.lenda...@amd.com>
   *
@@ -37,46 +37,26 @@ struct ccp_unit_size_map {
u32 value;
  };
  
-static struct ccp_unit_size_map unit_size_map[] = {

+static struct ccp_unit_size_map xts_unit_sizes[] = {
{
-   .size   = 4096,
-   .value  = CCP_XTS_AES_UNIT_SIZE_4096,
-   },
-   {
-   .size   = 2048,
-   .value  = CCP_XTS_AES_UNIT_SIZE_2048,
-   },
-   {
-   .size   = 1024,
-   .value  = CCP_XTS_AES_UNIT_SIZE_1024,
+   .size   = 16,
+   .value  = CCP_XTS_AES_UNIT_SIZE_16,
},
{
-   .size   = 512,
+   .size   = 512,
.value  = CCP_XTS_AES_UNIT_SIZE_512,
},
{
-   .size   = 256,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 128,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 64,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
-   },
-   {
-   .size   = 32,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   .size   = 1024,
+   .value  = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
-   .size   = 16,
-   .value  = CCP_XTS_AES_UNIT_SIZE_16,
+   .size   = 2048,
+   .value  = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
-   .size   = 1,
-   .value  = CCP_XTS_AES_UNIT_SIZE__LAST,
+   .size   = 4096,
+   .value  = CCP_XTS_AES_UNIT_SIZE_4096,
},
  };


Because of the way the unit size check is performed, you can't delete
the intermediate size checks.  Those must remain so that unit sizes
that aren't supported by the CCP are sent to the fallback mechanism.

Also, re-arranging the order should be a separate patch if that doesn't
really fix anything.

  
@@ -97,14 +77,20 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,

  unsigned int key_len)
  {
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+   unsigned int ccpversion = ccp_version();
  
  	/* Only support 128-bit AES key with a 128-bit Tweak key,

 * otherwise use the fallback
 */
+


Remove the addition of the blank line and update the above comment to
indicate the new supported key size added below.


switch (key_len) {
case AES_KEYSIZE_128 * 2:
memcpy(ctx->u.aes.key, key, key_len);
break;
+   case AES_KEYSIZE_256 * 2:
+   if (ccpversion > CCP_VERSION(3, 0))
+   memcpy(ctx->u.aes.key, key, key_len);
+   break;


Isn't u.aes.key defined with a maximum buffer size of AES_MAX_KEY_SIZE
(which is 32)?  I think this will cause a buffer overrun on memcpy.


}
ctx->u.aes.key_len = key_len / 2;
sg_init_one(>u.aes.key_sg, ctx->u.aes.key, key_len);
@@ -117,7 +103,10 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request 
*req,
  {
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+   unsigned int ccpversion = ccp_version();
+   unsigned int fallback = 0;
unsigned int unit;
+   u32 block_size = 0;
u32 unit_size;
int ret;
  
@@ -131,17 +120,29 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,

return -EINVAL;
  
  	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;

-   if (req->nbytes <= unit_size_map[0].size) {


This check can't be deleted.  It was added specifically to catch cases
where the size was greater than 4096.


-   for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
-

Re: [PATCH v2 2/3] crypto: ccp - Introduce the AMD Secure Processor device

2017-06-28 Thread Tom Lendacky

On 6/28/2017 3:26 PM, Brijesh Singh wrote:



On 06/28/2017 02:53 PM, Tom Lendacky wrote:


In this I am leaving the top level config as-is and adding
CONFIG_CRYPTO_DEV_SP_CCP to enable the CCP device support inside the 
SP device driver.


[*] Support for AMD Secure Processor
  Secure Processor device driver
Encryption and hashing offload support
-*-Cryptographic Coprocessor device


I think the "Encryption and hashing offload support" should be indented
under "Cryptographic Coprocessor device" since it is a function of the
CCP and not the SP. Not sure if we can remove a level of menu somehow,
something to explore.


Good point, the updated Kconfig looks like this and let me know it works.


I think that looks better.

Thanks,
Tom



[*] Support for AMD Secure Processor
   Secure Processor device driver
[*] Cryptographic Coprocessor device
   Encryption and hashing offload support

# cat drivers/crypto/ccp/Kconfig

config CRYPTO_DEV_CCP_DD
  tristate "Secure Processor device driver"
  default m
  help
Provides AMD Secure Processor device driver.
If you choose 'M' here, this module will be called ccp.

  config CRYPTO_DEV_SP_CCP
  bool "Cryptographic Coprocessor device"
  default y
  depends on CRYPTO_DEV_CCP_DD
  select HW_RANDOM
  select DMA_ENGINE
  select DMADEVICES
  select CRYPTO_SHA1
  select CRYPTO_SHA256
  help
Provides the support for AMD Cryptographic Coprocessor (CCP) 
device
which can be used to offload encryption operations such as 
SHA, AES

and more.

  config CRYPTO_DEV_CCP_CRYPTO
  tristate "Encryption and hashing offload support"
  default m
  depends on CRYPTO_DEV_CCP_DD
  depends on CRYPTO_DEV_SP_CCP
  select CRYPTO_HASH
  select CRYPTO_BLKCIPHER
  select CRYPTO_AUTHENC
  help
Support for using the cryptographic API with the AMD 
Cryptographic
Coprocessor. This module supports offload of SHA and AES 
algorithms.

If you choose 'M' here, this module will be called ccp_crypto.


Re: [PATCH v2 2/3] crypto: ccp - Introduce the AMD Secure Processor device

2017-06-28 Thread Tom Lendacky

On 6/28/2017 2:39 PM, Brijesh Singh wrote:



On 06/28/2017 12:47 PM, Tom Lendacky wrote:


diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0528a62..418f991 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -512,14 +512,14 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
-config CRYPTO_DEV_CCP
-bool "Support for AMD Cryptographic Coprocessor"
+config CRYPTO_DEV_SP
+bool "Support for AMD Secure Processor"
  depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && 
HAS_IOMEM

  help
-  The AMD Cryptographic Coprocessor provides hardware offload 
support

-  for encryption, hashing and related operations.
+  The AMD Secure Processor provides hardware offload support for 
memory
+  encryption in virtualization and cryptographic hashing and 
related operations.

-if CRYPTO_DEV_CCP
+if CRYPTO_DEV_SP
  source "drivers/crypto/ccp/Kconfig"
  endif
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 2238f77..bc08f03 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,26 +1,37 @@
-config CRYPTO_DEV_CCP_DD
-tristate "Cryptographic Coprocessor device driver"
-depends on CRYPTO_DEV_CCP
-default m
-select HW_RANDOM
-select DMA_ENGINE
-select DMADEVICES
-select CRYPTO_SHA1
-select CRYPTO_SHA256
-help
-  Provides the interface to use the AMD Cryptographic Coprocessor
-  which can be used to offload encryption operations such as SHA,
-  AES and more. If you choose 'M' here, this module will be called
-  ccp.
-
  config CRYPTO_DEV_CCP_CRYPTO
  tristate "Encryption and hashing offload support"
-depends on CRYPTO_DEV_CCP_DD
+depends on CRYPTO_DEV_SP_DD
  default m
  select CRYPTO_HASH
  select CRYPTO_BLKCIPHER
  select CRYPTO_AUTHENC
+select CRYPTO_DEV_CCP
  help
Support for using the cryptographic API with the AMD 
Cryptographic
Coprocessor. This module supports offload of SHA and AES 
algorithms.

If you choose 'M' here, this module will be called ccp_crypto.
+
+config CRYPTO_DEV_SP_DD
+tristate "Secure Processor device driver"
+depends on CRYPTO_DEV_SP
+default m
+help
+  Provides the interface to use the AMD Secure Processor. The
+  AMD Secure Processor support the Platform Security Processor 
(PSP)

+  and Cryptographic Coprocessor (CCP). If you choose 'M' here, this
+  module will be called ccp.
+
+if CRYPTO_DEV_SP_DD
+config CRYPTO_DEV_CCP
+bool "Cryptographic Coprocessor interface"
+default y
+select HW_RANDOM
+select DMA_ENGINE
+select DMADEVICES
+select CRYPTO_SHA1
+select CRYPTO_SHA256
+help
+  Provides the interface to use the AMD Cryptographic Coprocessor
+  which can be used to offload encryption operations such as SHA,
+  AES and more.
+endif


I think the Kconfig changes need to be looked at a bit closer. The
hierarchy of original version is changed and the number of entries
might be able to be reduced.



Thanks Tom, how about the below patch?

In this I am leaving the top level config as-is and adding
CONFIG_CRYPTO_DEV_SP_CCP to enable the CCP device support inside the SP 
device driver.


[*] Support for AMD Secure Processor
  Secure Processor device driver
Encryption and hashing offload support
-*-Cryptographic Coprocessor device


I think the "Encryption and hashing offload support" should be indented
under "Cryptographic Coprocessor device" since it is a function of the
CCP and not the SP. Not sure if we can remove a level of menu somehow,
something to explore.

Thanks,
Tom




diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0528a62..148b516 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -513,11 +513,11 @@ config CRYPTO_DEV_ATMEL_SHA
   will be called atmel-sha.

  config CRYPTO_DEV_CCP
-   bool "Support for AMD Cryptographic Coprocessor"
+   bool "Support for AMD Secure Processor"
 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && 
HAS_IOMEM

 help
- The AMD Cryptographic Coprocessor provides hardware offload 
support

- for encryption, hashing and related operations.
+ The AMD Secure Processor provides hardware offload support for 
memory
+ encryption in virtualization and cryptographic hashing and 
related operations.


  if CRYPTO_DEV_CCP
 source "drivers/crypto/ccp/Kconfig"
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 2238f77..ef3a5fb 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,26 +1,34 @@
  config CRYPTO_DEV_CCP_DD
-   tristate &

Re: [PATCH v2 2/3] crypto: ccp - Introduce the AMD Secure Processor device

2017-06-28 Thread Tom Lendacky

On 6/23/2017 11:06 AM, Brijesh Singh wrote:

The CCP device is part of the AMD Secure Processor. In order to expand
the usage of the AMD Secure Processor, create a framework that allows
functional components of the AMD Secure Processor to be initialized and
handled appropriately.

Signed-off-by: Brijesh Singh 
---
  drivers/crypto/Kconfig|  10 +--
  drivers/crypto/ccp/Kconfig|  43 +
  drivers/crypto/ccp/Makefile   |   6 +-
  drivers/crypto/ccp/ccp-dev-v3.c   |   5 +-
  drivers/crypto/ccp/ccp-dev-v5.c   |   5 +-
  drivers/crypto/ccp/ccp-dev.c  | 106 +-
  drivers/crypto/ccp/ccp-dev.h  |  21 +
  drivers/crypto/ccp/ccp-pci.c  |  81 +++--
  drivers/crypto/ccp/ccp-platform.c |  70 ---
  drivers/crypto/ccp/sp-dev.c   | 180 ++
  drivers/crypto/ccp/sp-dev.h   | 120 +
  include/linux/ccp.h   |   3 +-
  12 files changed, 475 insertions(+), 175 deletions(-)
  create mode 100644 drivers/crypto/ccp/sp-dev.c
  create mode 100644 drivers/crypto/ccp/sp-dev.h

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0528a62..418f991 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -512,14 +512,14 @@ config CRYPTO_DEV_ATMEL_SHA
  To compile this driver as a module, choose M here: the module
  will be called atmel-sha.
  
-config CRYPTO_DEV_CCP

-   bool "Support for AMD Cryptographic Coprocessor"
+config CRYPTO_DEV_SP
+   bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && 
HAS_IOMEM
help
- The AMD Cryptographic Coprocessor provides hardware offload support
- for encryption, hashing and related operations.
+ The AMD Secure Processor provides hardware offload support for memory
+ encryption in virtualization and cryptographic hashing and related 
operations.
  
-if CRYPTO_DEV_CCP

+if CRYPTO_DEV_SP
source "drivers/crypto/ccp/Kconfig"
  endif
  
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig

index 2238f77..bc08f03 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,26 +1,37 @@
-config CRYPTO_DEV_CCP_DD
-   tristate "Cryptographic Coprocessor device driver"
-   depends on CRYPTO_DEV_CCP
-   default m
-   select HW_RANDOM
-   select DMA_ENGINE
-   select DMADEVICES
-   select CRYPTO_SHA1
-   select CRYPTO_SHA256
-   help
- Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to offload encryption operations such as SHA,
- AES and more. If you choose 'M' here, this module will be called
- ccp.
-
  config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing offload support"
-   depends on CRYPTO_DEV_CCP_DD
+   depends on CRYPTO_DEV_SP_DD
default m
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
+   select CRYPTO_DEV_CCP
help
  Support for using the cryptographic API with the AMD Cryptographic
  Coprocessor. This module supports offload of SHA and AES algorithms.
  If you choose 'M' here, this module will be called ccp_crypto.
+
+config CRYPTO_DEV_SP_DD
+   tristate "Secure Processor device driver"
+   depends on CRYPTO_DEV_SP
+   default m
+   help
+ Provides the interface to use the AMD Secure Processor. The
+ AMD Secure Processor support the Platform Security Processor (PSP)
+ and Cryptographic Coprocessor (CCP). If you choose 'M' here, this
+ module will be called ccp.
+
+if CRYPTO_DEV_SP_DD
+config CRYPTO_DEV_CCP
+   bool "Cryptographic Coprocessor interface"
+   default y
+   select HW_RANDOM
+   select DMA_ENGINE
+   select DMADEVICES
+   select CRYPTO_SHA1
+   select CRYPTO_SHA256
+   help
+ Provides the interface to use the AMD Cryptographic Coprocessor
+ which can be used to offload encryption operations such as SHA,
+ AES and more.
+endif


I think the Kconfig changes need to be looked at a bit closer. The
hierarchy of original version is changed and the number of entries
might be able to be reduced.

Thanks,
Tom



Re: [PATCH 2] crypto: ccp - Provide a roll-back method for debugfs setup

2017-06-27 Thread Tom Lendacky

On 6/27/2017 8:57 AM, Gary R Hook wrote:

Changes since v1:
  - Remove unneeded local variable

Signed-off-by: Gary R Hook 
---
  drivers/crypto/ccp/ccp-debugfs.c |   17 -
  1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 3cd6c83754e0..88191c45ca7d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
  
  	ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);

if (!ccp->debugfs_instance)
-   return;
+   goto err;
  
  	debugfs_info = debugfs_create_file("info", 0400,

   ccp->debugfs_instance, ccp,
   _debugfs_info_ops);
if (!debugfs_info)
-   return;
+   goto err;
  
  	debugfs_stats = debugfs_create_file("stats", 0600,

ccp->debugfs_instance, ccp,
_debugfs_stats_ops);
if (!debugfs_stats)
-   return;
+   goto err;
  
  	for (i = 0; i < ccp->cmd_q_count; i++) {

cmd_q = >cmd_q[i];
@@ -327,15 +327,22 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
if (!debugfs_q_instance)
-   return;
+   goto err;
  
  		debugfs_q_stats =

debugfs_create_file("stats", 0600,
debugfs_q_instance, cmd_q,
_debugfs_queue_ops);
if (!debugfs_q_stats)
-   return;
+   goto err;
}
+   return;
+
+err:
+   write_lock_irqsave(_debugfs_lock, flags);
+   debugfs_remove_recursive(ccp_debugfs_dir);


This is removing the whole debugfs directory structure. Did you want to
do that or just the directory entry for this instance?  If you want the
whole directory structure you should probably hold the debugfs lock the
whole time you're creating entries.

Thanks,
Tom


+   ccp_debugfs_dir = NULL;
+   write_unlock_irqrestore(_debugfs_lock, flags);
  }
  
  void ccp5_debugfs_destroy(void)




Re: [PATCH v2 1/3] crypto: ccp - Use devres interface to allocate PCI/iomap and cleanup

2017-06-26 Thread Tom Lendacky

On 6/23/2017 11:06 AM, Brijesh Singh wrote:

Update pci and platform files to use devres interface to allocate the PCI
and iomap resources. Also add helper functions to consolicate module init,
exit and power mangagement code duplication.

Signed-off-by: Brijesh Singh 
---
  drivers/crypto/ccp/ccp-dev-v3.c   |   8 +++
  drivers/crypto/ccp/ccp-dev.c  |  61 
  drivers/crypto/ccp/ccp-dev.h  |   6 ++
  drivers/crypto/ccp/ccp-pci.c  | 114 +-
  drivers/crypto/ccp/ccp-platform.c |  56 ++-
  5 files changed, 107 insertions(+), 138 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e3..1cae5a3 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,14 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler,
  };
  
+const struct ccp_vdata ccpv3_platform = {

+   .version = CCP_VERSION(3, 0),
+   .setup = NULL,
+   .perform = _actions,
+   .bar = 2,


Platform devices don't use BARs so should probably delete this (unless
you want to make it more generic and then use this value for the
IORESOURCE_MEM entry).


+   .offset = 0,
+};
+
  const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2506b50..ce35e43 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -538,8 +538,69 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
  
  	return ccp->cmd_q_count == suspended;

  }
+
+int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state)
+{
+   unsigned long flags;
+   unsigned int i;
+
+   spin_lock_irqsave(>cmd_lock, flags);
+
+   ccp->suspending = 1;
+
+   /* Wake all the queue kthreads to prepare for suspend */
+   for (i = 0; i < ccp->cmd_q_count; i++)
+   wake_up_process(ccp->cmd_q[i].kthread);
+
+   spin_unlock_irqrestore(>cmd_lock, flags);
+
+   /* Wait for all queue kthreads to say they're done */
+   while (!ccp_queues_suspended(ccp))
+   wait_event_interruptible(ccp->suspend_queue,
+ccp_queues_suspended(ccp));
+
+   return 0;
+}
+
+int ccp_dev_resume(struct ccp_device *ccp)
+{
+   unsigned long flags;
+   unsigned int i;
+
+   spin_lock_irqsave(>cmd_lock, flags);
+
+   ccp->suspending = 0;
+
+   /* Wake up all the kthreads */
+   for (i = 0; i < ccp->cmd_q_count; i++) {
+   ccp->cmd_q[i].suspended = 0;
+   wake_up_process(ccp->cmd_q[i].kthread);
+   }
+
+   spin_unlock_irqrestore(>cmd_lock, flags);
+
+   return 0;
+}
  #endif
  
+int ccp_dev_init(struct ccp_device *ccp)

+{
+   if (ccp->vdata->setup)
+   ccp->vdata->setup(ccp);
+
+   ccp->io_regs = ccp->io_map + ccp->vdata->offset;


This should be before the above call to setup().

Thanks,
Tom


+
+   return ccp->vdata->perform->init(ccp);
+}
+
+void ccp_dev_destroy(struct ccp_device *ccp)
+{
+   if (!ccp)
+   return;
+
+   ccp->vdata->perform->destroy(ccp);
+}
+
  static int __init ccp_mod_init(void)
  {
  #ifdef CONFIG_X86


Re: [PATCH 4/4] crypto: ccp - Expand RSA support for a v5 ccp

2017-06-22 Thread Tom Lendacky

On 6/21/2017 5:48 PM, Gary R Hook wrote:

A V5 device can accommodate larger keys, as well as read the keys
directly from memory instead of requiring them to be in a local
storage block.


The previous patch already reads them from memory so just the first
part of this sentence is needed.




Signed-off-by: Gary R Hook 
---
  drivers/crypto/ccp/ccp-crypto-rsa.c |5 -
  drivers/crypto/ccp/ccp-crypto.h |1 +
  drivers/crypto/ccp/ccp-dev-v3.c |1 +
  drivers/crypto/ccp/ccp-dev-v5.c |2 ++
  drivers/crypto/ccp/ccp-dev.h|2 ++
  drivers/crypto/ccp/ccp-ops.c|3 ++-
  6 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c 
b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 4a2a71463594..93e6b00ce34d 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -43,7 +43,10 @@ static int ccp_rsa_complete(struct crypto_async_request 
*async_req, int ret)
  
  static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)

  {
-   return CCP_RSA_MAXMOD;
+   if (ccp_version() > CCP_VERSION(3, 0))
+   return CCP5_RSA_MAXMOD;
+   else
+   return CCP_RSA_MAXMOD;


The ccp_check_key_length() function in this file has a hardcoded 4096
that should be changed to use vdata value.

Thanks,
Tom


  }
  
  static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)

diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 5d592ecc9af5..40598894113b 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -255,6 +255,7 @@ struct ccp_rsa_req_ctx {
  };
  
  #define	CCP_RSA_MAXMOD	(4 * 1024 / 8)

+#defineCCP5_RSA_MAXMOD (16 * 1024 / 8)
  
  /* Common Context Structure */

  struct ccp_ctx {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e30656f..9b159b0a891e 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -592,4 +592,5 @@ static void ccp_destroy(struct ccp_device *ccp)
.perform = _actions,
.bar = 2,
.offset = 0x2,
+   .rsamax = CCP_RSA_MAX_WIDTH,
  };
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 632518efd685..6043552322fd 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1115,6 +1115,7 @@ static void ccp5other_config(struct ccp_device *ccp)
.perform = _actions,
.bar = 2,
.offset = 0x0,
+   .rsamax = CCP5_RSA_MAX_WIDTH,
  };
  
  const struct ccp_vdata ccpv5b = {

@@ -1124,4 +1125,5 @@ static void ccp5other_config(struct ccp_device *ccp)
.perform = _actions,
.bar = 2,
.offset = 0x0,
+   .rsamax = CCP5_RSA_MAX_WIDTH,
  };
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a70154ac7405..8242cf54d90f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -200,6 +200,7 @@
  #define CCP_SHA_SB_COUNT  1
  
  #define CCP_RSA_MAX_WIDTH		4096

+#define CCP5_RSA_MAX_WIDTH 16384
  
  #define CCP_PASSTHRU_BLOCKSIZE		256

  #define CCP_PASSTHRU_MASKSIZE 32
@@ -677,6 +678,7 @@ struct ccp_vdata {
const struct ccp_actions *perform;
const unsigned int bar;
const unsigned int offset;
+   const unsigned int rsamax;
  };
  
  extern const struct ccp_vdata ccpv3;

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 2cdd15a92178..ea5e4ede1eed 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1737,7 +1737,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
unsigned int key_size_bytes;
int ret;
  
-	if (rsa->key_size > CCP_RSA_MAX_WIDTH)

+   /* Check against the maximum allowable size, in bits */
+   if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
return -EINVAL;
  
  	if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)




Re: [PATCH 3/4] crypto: ccp - Add support for RSA on the CCP

2017-06-22 Thread Tom Lendacky

On 6/21/2017 5:48 PM, Gary R Hook wrote:

Wire up the v3 CCP as a cipher provider.


The V5 support will be invoked through this also.  Maybe something like:

Wire up the CCP as an RSA cipher provider.



Signed-off-by: Gary R Hook 
---
  drivers/crypto/ccp/Makefile  |1
  drivers/crypto/ccp/ccp-crypto-main.c |   21 ++
  drivers/crypto/ccp/ccp-crypto-rsa.c  |  286 ++
  drivers/crypto/ccp/ccp-crypto.h  |   31 
  drivers/crypto/ccp/ccp-debugfs.c |1
  drivers/crypto/ccp/ccp-dev.c |1
  drivers/crypto/ccp/ccp-ops.c |2
  include/linux/ccp.h  |1
  8 files changed, 341 insertions(+), 3 deletions(-)
  create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 59493fd3a751..439bc2fcb464 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
   ccp-crypto-aes-xts.o \
   ccp-crypto-aes-galois.o \
   ccp-crypto-des3.o \
+  ccp-crypto-rsa.o \
   ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 8dccbddabef1..dd7d00c680e7 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -17,6 +17,7 @@
  #include 
  #include 
  #include 
+#include 
  
  #include "ccp-crypto.h"
  
@@ -37,10 +38,15 @@

  module_param(des3_disable, uint, 0444);
  MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
  
+static unsigned int rsa_disable;

+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
  /* List heads for the supported algorithms */
  static LIST_HEAD(hash_algs);
  static LIST_HEAD(cipher_algs);
  static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);
  
  /* For any tfm, requests for that tfm must be returned on the order

   * received.  With multiple queues available, the CCP can process more
@@ -358,6 +364,14 @@ static int ccp_register_algs(void)
return ret;
}
  
+	if (!rsa_disable) {

+   ret = ccp_register_rsa_algs(_algs);
+   if (ret) {
+   rsa_disable = 1;


Not sure what this does...  The return of the error code will cause the
init to fail and unregister everything. This path won't be taken again
to make use of the change in value.


+   return ret;
+   }
+   }
+
return 0;
  }
  
@@ -366,6 +380,7 @@ static void ccp_unregister_algs(void)

struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
+   struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
  
  	list_for_each_entry_safe(ahash_alg, ahash_tmp, _algs, entry) {

crypto_unregister_ahash(_alg->alg);
@@ -384,6 +399,12 @@ static void ccp_unregister_algs(void)
list_del(_alg->entry);
kfree(aead_alg);
}
+
+   list_for_each_entry_safe(akc_alg, akc_tmp, _algs, entry) {
+   crypto_unregister_akcipher(_alg->alg);
+   list_del(_alg->entry);
+   kfree(akc_alg);
+   }
  }
  
  static int ccp_crypto_init(void)

diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c 
b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index ..4a2a71463594
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,286 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+   struct crypto_async_request *req)
+{
+   return container_of(req, struct akcipher_request, base);
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+   struct akcipher_request *req = akcipher_request_cast(async_req);
+   struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+   if (!ret)
+   req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+   ret = 0;
+
+   return ret;


This seems odd.  You should probably make this similar to the other CCP
complete functions:

if (ret)
return ret;

req->dst_len = ...

return 0;


+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+   return 

Re: [PATCH 1/4] crypto: ccp - Fix base RSA function for version 5 CCPs

2017-06-22 Thread Tom Lendacky

On 6/21/2017 5:47 PM, Gary R Hook wrote:

Version 5 devices have requirements for buffer lengths, as well as
parameter format (e.g. bits vs. bytes). Fix the base CCP driver
code to meet requirements all supported versions.

Signed-off-by: Gary R Hook 
---
  drivers/crypto/ccp/ccp-dev-v5.c |   10 ++--
  drivers/crypto/ccp/ccp-ops.c|   95 ---
  2 files changed, 64 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b10d2d2075cb..632518efd685 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -469,7 +469,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT() = 0;
  
  	function.raw = 0;

-   CCP_RSA_SIZE() = op->u.rsa.mod_size >> 3;
+   CCP_RSA_SIZE() = (op->u.rsa.mod_size + 7) >> 3;
CCP5_CMD_FUNCTION() = function.raw;
  
  	CCP5_CMD_LEN() = op->u.rsa.input_len;

@@ -484,10 +484,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI() = ccp_addr_hi(>dst.u.dma);
CCP5_CMD_DST_MEM() = CCP_MEMTYPE_SYSTEM;
  
-	/* Exponent is in LSB memory */

-   CCP5_CMD_KEY_LO() = op->sb_key * LSB_ITEM_SIZE;
-   CCP5_CMD_KEY_HI() = 0;
-   CCP5_CMD_KEY_MEM() = CCP_MEMTYPE_SB;
+   /* Key (Exponent) is in external memory */
+   CCP5_CMD_KEY_LO() = ccp_addr_lo(>exp.u.dma);
+   CCP5_CMD_KEY_HI() = ccp_addr_hi(>exp.u.dma);
+   CCP5_CMD_KEY_MEM() = CCP_MEMTYPE_SYSTEM;
  
  	return ccp5_do_cmd(, op->cmd_q);

  }
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c0dfdacbdff5..11155e52c52c 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1731,10 +1731,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
  static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  {
struct ccp_rsa_engine *rsa = >u.rsa;
-   struct ccp_dm_workarea exp, src;
-   struct ccp_data dst;
+   struct ccp_dm_workarea exp, src, dst;
struct ccp_op op;
unsigned int sb_count, i_len, o_len;
+   unsigned int key_size_bytes;
int ret;
  
  	if (rsa->key_size > CCP_RSA_MAX_WIDTH)

@@ -1743,31 +1743,41 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
return -EINVAL;
  
-	/* The RSA modulus must precede the message being acted upon, so

-* it must be copied to a DMA area where the message and the
-* modulus can be concatenated.  Therefore the input buffer
-* length required is twice the output buffer length (which
-* must be a multiple of 256-bits).
-*/
-   o_len = ((rsa->key_size + 255) / 256) * 32;
-   i_len = o_len * 2;
-
-   sb_count = o_len / CCP_SB_BYTES;
-
memset(, 0, sizeof(op));
op.cmd_q = cmd_q;
-   op.jobid = ccp_gen_jobid(cmd_q->ccp);
-   op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
+   op.jobid = CCP_NEW_JOBID(cmd_q->ccp);


This change isn't related to RSA support, should be a separate patch.

  
-	if (!op.sb_key)

-   return -EIO;
+   /* Compute o_len, i_len in bytes. */
+   if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+   /* The RSA modulus must precede the message being acted upon, so
+* it must be copied to a DMA area where the message and the
+* modulus can be concatenated.  Therefore the input buffer
+* length required is twice the output buffer length (which
+* must be a multiple of 256-bits). sb_count is the
+* number of storage block slots required for the modulus
+*/
+   key_size_bytes = (rsa->key_size + 7) >> 3; > +  o_len 
= ((rsa->key_size + 255) / 256) * CCP_SB_BYTES;


This calculation shouldn't change the "32" to CCP_SB_BYTES.  This is
purely to get the 256-bit alignment.


+   i_len = key_size_bytes * 2;


This violates the comment above, key_size_bytes is byte aligned vs the
256-bit/8-byte alignment required.  i_len should stay as o_len * 2.
Should key_size_bytes be moved down and set to o_len for this path?


+
+   sb_count = o_len / CCP_SB_BYTES;
+
+   op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+   sb_count);
+   if (!op.sb_key)
+   return -EIO;
+   } else {
+   /* A version 5 device allows a modulus size that will not fit
+* in the LSB, so the command will transfer it from memory.
+* But more importantly, the buffer sizes must be a multiple
+* of 32 bytes; rounding up may be required.
+*/
+   key_size_bytes = 32 * ((rsa->key_size + 255) / 

Re: [RFC PATCH v2 08/32] x86: Use PAGE_KERNEL protection for ioremap of memory page

2017-03-17 Thread Tom Lendacky

On 3/16/2017 3:04 PM, Tom Lendacky wrote:

On 3/7/2017 8:59 AM, Borislav Petkov wrote:

On Thu, Mar 02, 2017 at 10:13:32AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

In order for memory pages to be properly mapped when SEV is active, we
need to use the PAGE_KERNEL protection attribute as the base protection.
This will insure that memory mapping of, e.g. ACPI tables, receives the
proper mapping attributes.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---



diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c400ab5..481c999 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -151,7 +151,15 @@ static void __iomem
*__ioremap_caller(resource_size_t phys_addr,
pcm = new_pcm;
}

+   /*
+* If the page being mapped is in memory and SEV is active then
+* make sure the memory encryption attribute is enabled in the
+* resulting mapping.
+*/
prot = PAGE_KERNEL_IO;
+   if (sev_active() && page_is_mem(pfn))


Hmm, a resource tree walk per ioremap call. This could get expensive for
ioremap-heavy workloads.

__ioremap_caller() gets called here during boot 55 times so not a whole
lot but I wouldn't be surprised if there were some nasty use cases which
ioremap a lot.

...


diff --git a/kernel/resource.c b/kernel/resource.c
index 9b5f044..db56ba3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -518,6 +518,46 @@ int __weak page_is_ram(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(page_is_ram);

+/*
+ * This function returns true if the target memory is marked as
+ * IORESOURCE_MEM and IORESOUCE_BUSY and described as other than
+ * IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ */
+static int walk_mem_range(unsigned long start_pfn, unsigned long
nr_pages)
+{
+struct resource res;
+unsigned long pfn, end_pfn;
+u64 orig_end;
+int ret = -1;
+
+res.start = (u64) start_pfn << PAGE_SHIFT;
+res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+orig_end = res.end;
+while ((res.start < res.end) &&
+(find_next_iomem_res(, IORES_DESC_NONE, true) >= 0)) {
+pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+end_pfn = (res.end + 1) >> PAGE_SHIFT;
+if (end_pfn > pfn)
+ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;
+if (ret)
+break;
+res.start = res.end + 1;
+res.end = orig_end;
+}
+return ret;
+}


So the relevant difference between this one and walk_system_ram_range()
is this:

-ret = (*func)(pfn, end_pfn - pfn, arg);
+ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;

so it seems to me you can have your own *func() pointer which does that
IORES_DESC_NONE comparison. And then you can define your own workhorse
__walk_memory_range() which gets called by both walk_mem_range() and
walk_system_ram_range() instead of almost duplicating them.

And looking at walk_system_ram_res(), that one looks similar too except
the pfn computation. But AFAICT the pfn/end_pfn things are computed from
res.start and res.end so it looks to me like all those three functions
are crying for unification...


I'll take a look at what it takes to consolidate these with a pre-patch.
Then I'll add the new support.


It looks pretty straight forward to combine walk_iomem_res_desc() and
walk_system_ram_res(). The walk_system_ram_range() function would fit
easily into this, also, except for the fact that the callback function
takes unsigned longs vs the u64s of the other functions.  Is it worth
modifying all of the callers of walk_system_ram_range() (which are only
about 8 locations) to change the callback functions to accept u64s in
order to consolidate the walk_system_ram_range() function, too?

Thanks,
Tom



Thanks,
Tom





Re: [RFC PATCH v2 08/32] x86: Use PAGE_KERNEL protection for ioremap of memory page

2017-03-17 Thread Tom Lendacky

On 3/17/2017 9:32 AM, Tom Lendacky wrote:

On 3/16/2017 3:04 PM, Tom Lendacky wrote:

On 3/7/2017 8:59 AM, Borislav Petkov wrote:

On Thu, Mar 02, 2017 at 10:13:32AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

In order for memory pages to be properly mapped when SEV is active, we
need to use the PAGE_KERNEL protection attribute as the base
protection.
This will insure that memory mapping of, e.g. ACPI tables, receives the
proper mapping attributes.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---



diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c400ab5..481c999 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -151,7 +151,15 @@ static void __iomem
*__ioremap_caller(resource_size_t phys_addr,
pcm = new_pcm;
}

+   /*
+* If the page being mapped is in memory and SEV is active then
+* make sure the memory encryption attribute is enabled in the
+* resulting mapping.
+*/
prot = PAGE_KERNEL_IO;
+   if (sev_active() && page_is_mem(pfn))


Hmm, a resource tree walk per ioremap call. This could get expensive for
ioremap-heavy workloads.

__ioremap_caller() gets called here during boot 55 times so not a whole
lot but I wouldn't be surprised if there were some nasty use cases which
ioremap a lot.

...


diff --git a/kernel/resource.c b/kernel/resource.c
index 9b5f044..db56ba3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -518,6 +518,46 @@ int __weak page_is_ram(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(page_is_ram);

+/*
+ * This function returns true if the target memory is marked as
+ * IORESOURCE_MEM and IORESOUCE_BUSY and described as other than
+ * IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ */
+static int walk_mem_range(unsigned long start_pfn, unsigned long
nr_pages)
+{
+struct resource res;
+unsigned long pfn, end_pfn;
+u64 orig_end;
+int ret = -1;
+
+res.start = (u64) start_pfn << PAGE_SHIFT;
+res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+orig_end = res.end;
+while ((res.start < res.end) &&
+(find_next_iomem_res(, IORES_DESC_NONE, true) >= 0)) {
+pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+end_pfn = (res.end + 1) >> PAGE_SHIFT;
+if (end_pfn > pfn)
+ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;
+if (ret)
+break;
+res.start = res.end + 1;
+res.end = orig_end;
+}
+return ret;
+}


So the relevant difference between this one and walk_system_ram_range()
is this:

-ret = (*func)(pfn, end_pfn - pfn, arg);
+ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;

so it seems to me you can have your own *func() pointer which does that
IORES_DESC_NONE comparison. And then you can define your own workhorse
__walk_memory_range() which gets called by both walk_mem_range() and
walk_system_ram_range() instead of almost duplicating them.

And looking at walk_system_ram_res(), that one looks similar too except
the pfn computation. But AFAICT the pfn/end_pfn things are computed from
res.start and res.end so it looks to me like all those three functions
are crying for unification...


I'll take a look at what it takes to consolidate these with a pre-patch.
Then I'll add the new support.


It looks pretty straight forward to combine walk_iomem_res_desc() and
walk_system_ram_res(). The walk_system_ram_range() function would fit
easily into this, also, except for the fact that the callback function
takes unsigned longs vs the u64s of the other functions.  Is it worth
modifying all of the callers of walk_system_ram_range() (which are only
about 8 locations) to change the callback functions to accept u64s in
order to consolidate the walk_system_ram_range() function, too?


The more I dig, the more I find that the changes keep expanding. I'll
leave walk_system_ram_range() out of the consolidation for now.

Thanks,
Tom



Thanks,
Tom



Thanks,
Tom





Re: [RFC PATCH v2 08/32] x86: Use PAGE_KERNEL protection for ioremap of memory page

2017-03-16 Thread Tom Lendacky

On 3/7/2017 8:59 AM, Borislav Petkov wrote:

On Thu, Mar 02, 2017 at 10:13:32AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

In order for memory pages to be properly mapped when SEV is active, we
need to use the PAGE_KERNEL protection attribute as the base protection.
This will insure that memory mapping of, e.g. ACPI tables, receives the
proper mapping attributes.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---



diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c400ab5..481c999 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -151,7 +151,15 @@ static void __iomem *__ioremap_caller(resource_size_t 
phys_addr,
pcm = new_pcm;
}

+   /*
+* If the page being mapped is in memory and SEV is active then
+* make sure the memory encryption attribute is enabled in the
+* resulting mapping.
+*/
prot = PAGE_KERNEL_IO;
+   if (sev_active() && page_is_mem(pfn))


Hmm, a resource tree walk per ioremap call. This could get expensive for
ioremap-heavy workloads.

__ioremap_caller() gets called here during boot 55 times so not a whole
lot but I wouldn't be surprised if there were some nasty use cases which
ioremap a lot.

...


diff --git a/kernel/resource.c b/kernel/resource.c
index 9b5f044..db56ba3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -518,6 +518,46 @@ int __weak page_is_ram(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(page_is_ram);

+/*
+ * This function returns true if the target memory is marked as
+ * IORESOURCE_MEM and IORESOUCE_BUSY and described as other than
+ * IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ */
+static int walk_mem_range(unsigned long start_pfn, unsigned long nr_pages)
+{
+   struct resource res;
+   unsigned long pfn, end_pfn;
+   u64 orig_end;
+   int ret = -1;
+
+   res.start = (u64) start_pfn << PAGE_SHIFT;
+   res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+   res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+   orig_end = res.end;
+   while ((res.start < res.end) &&
+   (find_next_iomem_res(, IORES_DESC_NONE, true) >= 0)) {
+   pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+   end_pfn = (res.end + 1) >> PAGE_SHIFT;
+   if (end_pfn > pfn)
+   ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;
+   if (ret)
+   break;
+   res.start = res.end + 1;
+   res.end = orig_end;
+   }
+   return ret;
+}


So the relevant difference between this one and walk_system_ram_range()
is this:

-   ret = (*func)(pfn, end_pfn - pfn, arg);
+   ret = (res.desc != IORES_DESC_NONE) ? 1 : 0;

so it seems to me you can have your own *func() pointer which does that
IORES_DESC_NONE comparison. And then you can define your own workhorse
__walk_memory_range() which gets called by both walk_mem_range() and
walk_system_ram_range() instead of almost duplicating them.

And looking at walk_system_ram_res(), that one looks similar too except
the pfn computation. But AFAICT the pfn/end_pfn things are computed from
res.start and res.end so it looks to me like all those three functions
are crying for unification...


I'll take a look at what it takes to consolidate these with a pre-patch. 
Then I'll add the new support.


Thanks,
Tom





Re: [RFC PATCH v2 05/32] x86: Use encrypted access of BOOT related data with SEV

2017-03-16 Thread Tom Lendacky

On 3/7/2017 5:09 AM, Borislav Petkov wrote:

On Thu, Mar 02, 2017 at 10:12:59AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

When Secure Encrypted Virtualization (SEV) is active, BOOT data (such as
EFI related data, setup data) is encrypted and needs to be accessed as
such when mapped. Update the architecture override in early_memremap to
keep the encryption attribute when mapping this data.


This could also explain why persistent memory needs to be accessed
decrypted with SEV.


I'll add some comments about why persistent memory needs to be accessed
decrypted (because the encryption key changes across reboots) for both
SME and SEV.



In general, what the difference in that aspect is in respect to SME. And
I'd write that in the comment over the function. And not say "E820 areas
are checked in making this determination." because that is visible but
say *why* we need to check those ranges and determine access depending
on their type.


Will do.

Thanks,
Tom





Re: [RFC PATCH v2 12/32] x86: Add early boot support when running with SEV active

2017-03-16 Thread Tom Lendacky

On 3/16/2017 10:09 AM, Borislav Petkov wrote:

On Thu, Mar 16, 2017 at 09:28:58AM -0500, Tom Lendacky wrote:

Because there are differences between how SME and SEV behave
(instruction fetches are always decrypted under SEV, DMA to an
encrypted location is not supported under SEV, etc.) we need to
determine which mode we are in so that things can be setup properly
during boot. For example, if SEV is active the kernel will already
be encrypted and so we don't perform that step or the trampoline area
for bringing up an AP must be decrypted for SME but encrypted for SEV.


So with SEV enabled, it seems to me a guest doesn't know anything about
encryption and can run as if SME is disabled. So sme_active() will be
false. And then the kernel can bypass all that code dealing with SME.

So a guest should simply run like on baremetal with no SME, IMHO.



Not quite. The guest still needs to understand about the encryption mask
so that it can protect memory by setting the encryption mask in the
pagetable entries.  It can also decide when to share memory with the
hypervisor by not setting the encryption mask in the pagetable entries.


But then there's that part: "instruction fetches are always decrypted
under SEV". What does that mean exactly? And how much of that code can


"Instruction fetches are always decrypted under SEV" means that,
regardless of how a virtual address is mapped, encrypted or decrypted,
if an instruction fetch is performed by the CPU from that address it
will always be decrypted. This is to prevent the hypervisor from
injecting executable code into the guest since it would have to be
valid encrypted instructions.


be reused so that

* SME on baremetal
* SEV on guest

use the same logic?


There are many areas that use the same logic, but there are certain
situations where we need to check between SME vs SEV (e.g. DMA operation
setup or decrypting the trampoline area) and act accordingly.

Thanks,
Tom



Having the larger SEV preparation part on the kvm host side is perfectly
fine. But I'd like to keep kernel initialization paths clean.

Thanks.



Re: [RFC PATCH v2 12/32] x86: Add early boot support when running with SEV active

2017-03-16 Thread Tom Lendacky

On 3/16/2017 5:16 AM, Borislav Petkov wrote:

On Fri, Mar 10, 2017 at 10:35:30AM -0600, Brijesh Singh wrote:

We could update this patch to use the below logic:

 * CPUID(0) - Check for AuthenticAMD
 * CPID(1) - Check if under hypervisor
 * CPUID(0x8000) - Check for highest supported leaf
 * CPUID(0x801F).EAX - Check for SME and SEV support
 * rdmsr (MSR_K8_SYSCFG)[MemEncryptionModeEnc] - Check if SMEE is set


Actually, it is still not clear to me *why* we need to do anything
special wrt SEV in the guest.

Lemme clarify: why can't the guest boot just like a normal Linux on
baremetal and use the SME(!) detection code to set sme_enable and so
on? IOW, I'd like to avoid all those checks whether we're running under
hypervisor and handle all that like we're running on baremetal.


Because there are differences between how SME and SEV behave
(instruction fetches are always decrypted under SEV, DMA to an
encrypted location is not supported under SEV, etc.) we need to
determine which mode we are in so that things can be setup properly
during boot. For example, if SEV is active the kernel will already
be encrypted and so we don't perform that step or the trampoline area
for bringing up an AP must be decrypted for SME but encrypted for SEV.
The hypervisor check will provide that ability to determine how we
handle things.

Thanks,
Tom





Re: [RFC PATCH v2 06/32] x86/pci: Use memremap when walking setup data

2017-03-13 Thread Tom Lendacky

On 3/6/2017 6:03 PM, Bjorn Helgaas wrote:

On Fri, Mar 03, 2017 at 03:15:34PM -0600, Tom Lendacky wrote:

On 3/3/2017 2:42 PM, Bjorn Helgaas wrote:

On Thu, Mar 02, 2017 at 10:13:10AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

The use of ioremap will force the setup data to be mapped decrypted even
though setup data is encrypted.  Switch to using memremap which will be
able to perform the proper mapping.


How should callers decide whether to use ioremap() or memremap()?

memremap() existed before SME and SEV, and this code is used even if
SME and SEV aren't supported, so the rationale for this change should
not need the decryption argument.


When SME or SEV is active an ioremap() will remove the encryption bit
from the pagetable entry when it is mapped.  This allows MMIO, which
doesn't support SME/SEV, to be performed successfully.  So my take is
that ioremap() should be used for MMIO and memremap() for pages in RAM.


OK, thanks.  The commit message should say something like "this is
RAM, not MMIO, so we should map it with memremap(), not ioremap()".
That's the part that determines whether the change is correct.

You can mention the encryption part, too, but it's definitely
secondary because the change has to make sense on its own, without
SME/SEV.



Ok, that makes sense, will do.


The following commits (from https://github.com/codomania/tip/branches)
all do basically the same thing so the changelogs (and summaries)
should all be basically the same:

  cb0d0d1eb0a6 x86: Change early_ioremap to early_memremap for BOOT data
  91acb68b8333 x86/pci: Use memremap when walking setup data
  4f687503e23f x86: Access the setup data through sysfs decrypted
  e90246b8c229 x86: Access the setup data through debugfs decrypted

I would collect them all together and move them to the beginning of
your series, since they don't depend on anything else.


I'll do that.



Also, change "x86/pci: " to "x86/PCI" so it matches the previous
convention.


Will do.

Thanks,
Tom




Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>


Acked-by: Bjorn Helgaas <bhelg...@google.com>


---
arch/x86/pci/common.c |4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index a4fdfa7..0b06670 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -691,7 +691,7 @@ int pcibios_add_device(struct pci_dev *dev)

pa_data = boot_params.hdr.setup_data;
while (pa_data) {
-   data = ioremap(pa_data, sizeof(*rom));
+   data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);


I can't quite connect the dots here.  ioremap() on x86 would do
ioremap_nocache().  memremap(MEMREMAP_WB) would do arch_memremap_wb(),
which is ioremap_cache().  Is making a cacheable mapping the important
difference?


The memremap(MEMREMAP_WB) will actually check to see if it can perform
a __va(pa_data) in try_ram_remap() and then fallback to the
arch_memremap_wb().  So it's actually the __va() vs the ioremap_cache()
that is the difference.

Thanks,
Tom




if (!data)
return -ENOMEM;

@@ -710,7 +710,7 @@ int pcibios_add_device(struct pci_dev *dev)
}
}
pa_data = data->next;
-   iounmap(data);
+   memunmap(data);
}
set_dma_domain_ops(dev);
set_dev_domain_options(dev);



Re: [RFC PATCH v2 06/32] x86/pci: Use memremap when walking setup data

2017-03-03 Thread Tom Lendacky

On 3/3/2017 2:42 PM, Bjorn Helgaas wrote:

On Thu, Mar 02, 2017 at 10:13:10AM -0500, Brijesh Singh wrote:

From: Tom Lendacky <thomas.lenda...@amd.com>

The use of ioremap will force the setup data to be mapped decrypted even
though setup data is encrypted.  Switch to using memremap which will be
able to perform the proper mapping.


How should callers decide whether to use ioremap() or memremap()?

memremap() existed before SME and SEV, and this code is used even if
SME and SEV aren't supported, so the rationale for this change should
not need the decryption argument.


When SME or SEV is active an ioremap() will remove the encryption bit
from the pagetable entry when it is mapped.  This allows MMIO, which
doesn't support SME/SEV, to be performed successfully.  So my take is
that ioremap() should be used for MMIO and memremap() for pages in RAM.




Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 arch/x86/pci/common.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index a4fdfa7..0b06670 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -691,7 +691,7 @@ int pcibios_add_device(struct pci_dev *dev)

pa_data = boot_params.hdr.setup_data;
while (pa_data) {
-   data = ioremap(pa_data, sizeof(*rom));
+   data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);


I can't quite connect the dots here.  ioremap() on x86 would do
ioremap_nocache().  memremap(MEMREMAP_WB) would do arch_memremap_wb(),
which is ioremap_cache().  Is making a cacheable mapping the important
difference?


The memremap(MEMREMAP_WB) will actually check to see if it can perform
a __va(pa_data) in try_ram_remap() and then fallback to the
arch_memremap_wb().  So it's actually the __va() vs the ioremap_cache()
that is the difference.

Thanks,
Tom




if (!data)
return -ENOMEM;

@@ -710,7 +710,7 @@ int pcibios_add_device(struct pci_dev *dev)
}
}
pa_data = data->next;
-   iounmap(data);
+   memunmap(data);
}
set_dma_domain_ops(dev);
set_dev_domain_options(dev);



Re: [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support

2016-10-13 Thread Tom Lendacky
On 10/13/2016 09:53 AM, Gary R Hook wrote:
> The reverse-get/set functions can be simplified by
> eliminating unused code.
> 
> 
> Signed-off-by: Gary R Hook 
> ---
>  drivers/crypto/ccp/ccp-ops.c |  145 
> +-
>  1 file changed, 59 insertions(+), 86 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 8fedb14..82cc637 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -198,62 +198,46 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, 
> unsigned int wa_offset,
>  }
>  
>  static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
> +unsigned int wa_offset,
>  struct scatterlist *sg,
> -unsigned int len, unsigned int se_len,
> -bool sign_extend)
> +unsigned int sg_offset,
> +unsigned int len)
>  {
> - unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
> - u8 buffer[CCP_REVERSE_BUF_SIZE];
> -
> - if (WARN_ON(se_len > sizeof(buffer)))
> - return -EINVAL;
> -
> - sg_offset = len;
> - dm_offset = 0;
> - nbytes = len;
> - while (nbytes) {
> - sb_len = min_t(unsigned int, nbytes, se_len);
> - sg_offset -= sb_len;
> -
> - scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
> - for (i = 0; i < sb_len; i++)
> - wa->address[dm_offset + i] = buffer[sb_len - i - 1];
> -
> - dm_offset += sb_len;
> - nbytes -= sb_len;
> -
> - if ((sb_len != se_len) && sign_extend) {
> - /* Must sign-extend to nearest sign-extend length */
> - if (wa->address[dm_offset - 1] & 0x80)
> - memset(wa->address + dm_offset, 0xff,
> -se_len - sb_len);
> - }
> + u8 *p, *q;
> +
> + ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
> +
> + p = wa->address + wa_offset;
> + q = p + len - 1;
> + while (p < q) {
> + *p = *p ^ *q;
> + *q = *p ^ *q;
> + *p = *p ^ *q;
> + p++;
> + q--;
>   }
> -
>   return 0;
>  }
>  
>  static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
> + unsigned int wa_offset,
>   struct scatterlist *sg,
> + unsigned int sg_offset,
>   unsigned int len)
>  {
> - unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
> - u8 buffer[CCP_REVERSE_BUF_SIZE];
> -
> - sg_offset = 0;
> - dm_offset = len;
> - nbytes = len;
> - while (nbytes) {
> - sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
> - dm_offset -= sb_len;
> -
> - for (i = 0; i < sb_len; i++)
> - buffer[sb_len - i - 1] = wa->address[dm_offset + i];
> - scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
> -
> - sg_offset += sb_len;
> - nbytes -= sb_len;
> + u8 *p, *q;
> +
> + p = wa->address + wa_offset;
> + q = p + len - 1;
> + while (p < q) {
> + *p = *p ^ *q;
> + *q = *p ^ *q;
> + *p = *p ^ *q;
> + p++;
> + q--;
>   }
> +
> + ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
>  }
>  
>  static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
> @@ -1294,7 +1278,9 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
> struct ccp_cmd *cmd)
>   struct ccp_data dst;
>   struct ccp_op op;
>   unsigned int sb_count, i_len, o_len;
> - int ret;
> + unsigned int dm_offset;
> + int i = 0;

Is "dm_offset" and "i" used anywhere?  I don't see them used in this
function...

> + int ret = 0;

No need to change this, is there?

Thanks,
Tom

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 6/6] crypto: ccp - Enable 3DES function on v5 CCPs

2016-10-13 Thread Tom Lendacky
On 10/13/2016 09:53 AM, Gary R Hook wrote:
> Wire up support for Triple DES in ECB mode.
> 
> Signed-off-by: Gary R Hook 
> ---
>  drivers/crypto/ccp/Makefile  |1 
>  drivers/crypto/ccp/ccp-crypto-des3.c |  254 
> ++
>  drivers/crypto/ccp/ccp-crypto-main.c |   10 +
>  drivers/crypto/ccp/ccp-crypto.h  |   25 +++
>  drivers/crypto/ccp/ccp-dev-v3.c  |1 
>  drivers/crypto/ccp/ccp-dev-v5.c  |   65 -
>  drivers/crypto/ccp/ccp-dev.h |   18 ++
>  drivers/crypto/ccp/ccp-ops.c |  201 +++
>  drivers/crypto/ccp/ccp-pci.c |2 
>  include/linux/ccp.h  |   57 +++-
>  10 files changed, 624 insertions(+), 10 deletions(-)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-des3.c
> 

...  ...

> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -26,6 +26,8 @@
>  #include 
>  #include 
>  
> +#define  CCP_LOG_LEVEL   KERN_INFO
> +

Not used anywhere that I can tell.

>  #define CCP_CRA_PRIORITY 300
>  
>  struct ccp_crypto_ablkcipher_alg {
> @@ -151,7 +153,26 @@ struct ccp_aes_cmac_exp_ctx {
>   u8 buf[AES_BLOCK_SIZE];
>  };
>  
> -/* SHA-related defines
> +/* 3DES related defines */
> +struct ccp_des3_ctx {
> + enum ccp_engine engine;
> + enum ccp_des3_type type;
> + enum ccp_des3_mode mode;
> +
> + struct scatterlist key_sg;
> + unsigned int key_len;
> + u8 key[AES_MAX_KEY_SIZE];
> +};
> +
> +struct ccp_des3_req_ctx {
> + struct scatterlist iv_sg;
> + u8 iv[AES_BLOCK_SIZE];
> +
> + struct ccp_cmd cmd;
> +};
> +
> +/*
> + * SHA-related defines
>   * These values must be large enough to accommodate any variant
>   */
>  #define MAX_SHA_CONTEXT_SIZE SHA512_DIGEST_SIZE
> @@ -236,6 +257,7 @@ struct ccp_ctx {
>   struct ccp_aes_ctx aes;
>   struct ccp_rsa_ctx rsa;
>   struct ccp_sha_ctx sha;
> + struct ccp_des3_ctx des3;
>   } u;
>  };
>  
> @@ -251,5 +273,6 @@ int ccp_register_aes_aeads(struct list_head *head);
>  int ccp_register_sha_algs(struct list_head *head);
>  int ccp_register_rsa_algs(void);
>  void ccp_unregister_rsa_algs(void);
> +int ccp_register_des3_algs(struct list_head *head);
>  
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
> index 75a0978..fccca16 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -595,6 +595,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
>  static const struct ccp_actions ccp3_actions = {
>   .aes = ccp_perform_aes,
>   .xts_aes = ccp_perform_xts_aes,
> + .des3 = NULL,
>   .sha = ccp_perform_sha,
>   .rsa = ccp_perform_rsa,
>   .passthru = ccp_perform_passthru,
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index dcae391..85387dc 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -101,6 +101,12 @@ union ccp_function {
>   u16 type:2;
>   } aes_xts;
>   struct {
> + u16 size:7;
> + u16 encrypt:1;
> + u16 mode:5;
> + u16 type:2;
> + } des3;
> + struct {
>   u16 rsvd1:10;
>   u16 type:4;
>   u16 rsvd2:1;
> @@ -132,6 +138,10 @@ union ccp_function {
>  #define  CCP_AES_TYPE(p) ((p)->aes.type)
>  #define  CCP_XTS_SIZE(p) ((p)->aes_xts.size)
>  #define  CCP_XTS_ENCRYPT(p)  ((p)->aes_xts.encrypt)
> +#define  CCP_DES3_SIZE(p)((p)->des3.size)
> +#define  CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
> +#define  CCP_DES3_MODE(p)((p)->des3.mode)
> +#define  CCP_DES3_TYPE(p)((p)->des3.type)
>  #define  CCP_SHA_TYPE(p) ((p)->sha.type)
>  #define  CCP_RSA_SIZE(p) ((p)->rsa.size)
>  #define  CCP_PT_BYTESWAP(p)  ((p)->pt.byteswap)
> @@ -242,13 +252,16 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
>   /* Wait for the job to complete */
>   ret = wait_event_interruptible(cmd_q->int_queue,
>  cmd_q->int_rcvd);
> - if (ret || cmd_q->cmd_error) {
> + if (cmd_q->cmd_error) {
> + /*
> +  * Log the error and flush the queue by
> +  * moving the head pointer
> +  */

I don't think you wanted to remove the check for ret in the if
statement above.

>   if (cmd_q->cmd_error)
>   ccp_log_error(cmd_q->ccp,
> cmd_q->cmd_error);
> - /* A version 5 device doesn't use Job IDs... */
> - if (!ret)
> - ret = -EIO;
> + iowrite32(tail, cmd_q->reg_head_lo);
> + 

Re: [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs

2016-10-13 Thread Tom Lendacky
On 10/13/2016 09:53 AM, Gary R Hook wrote:
> A version 5 device provides the primitive commands
> required for AES GCM. This patch adds support for
> en/decryption.
> 
> Signed-off-by: Gary R Hook <gary.h...@amd.com>
> ---
>  drivers/crypto/ccp/Makefile|1 
>  drivers/crypto/ccp/ccp-crypto-aes-galois.c |  252 +++
>  drivers/crypto/ccp/ccp-crypto-main.c   |   12 +
>  drivers/crypto/ccp/ccp-crypto.h|   14 +
>  drivers/crypto/ccp/ccp-dev-v5.c|2 
>  drivers/crypto/ccp/ccp-dev.h   |1 
>  drivers/crypto/ccp/ccp-ops.c   |  262 
> 
>  include/linux/ccp.h|9 +
>  8 files changed, 553 insertions(+)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-galois.c
> 
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 23f89b7..fd77225 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -13,4 +13,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>  ccp-crypto-aes-cmac.o \
>  ccp-crypto-aes-xts.o \
>  ccp-crypto-rsa.o \
> +ccp-crypto-aes-galois.o \
>  ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c 
> b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
> new file mode 100644
> index 000..5da324f
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
> @@ -0,0 +1,252 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) AES crypto API support
> + *
> + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Tom Lendacky <thomas.lenda...@amd.com>

Maybe put your name here...

> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include "ccp-crypto.h"
> +
> +#define  AES_GCM_IVSIZE  12
> +
> +static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int 
> ret)
> +{
> + return ret;
> +}
> +
> +static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
> +   unsigned int key_len)
> +{
> + struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
> +
> + switch (key_len) {
> + case AES_KEYSIZE_128:
> + ctx->u.aes.type = CCP_AES_TYPE_128;
> + break;
> + case AES_KEYSIZE_192:
> + ctx->u.aes.type = CCP_AES_TYPE_192;
> + break;
> + case AES_KEYSIZE_256:
> + ctx->u.aes.type = CCP_AES_TYPE_256;
> + break;
> + default:
> + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
> + return -EINVAL;
> + }
> +
> + ctx->u.aes.mode = CCP_AES_MODE_GCM;
> + ctx->u.aes.key_len = key_len;
> +
> + memcpy(ctx->u.aes.key, key, key_len);
> + sg_init_one(>u.aes.key_sg, ctx->u.aes.key, key_len);
> +
> + return 0;
> +}
> +
> +static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
> +unsigned int authsize)
> +{
> + return 0;
> +}
> +
> +static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
> +{
> + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> + struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
> + struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
> + struct scatterlist *iv_sg = NULL;
> + unsigned int iv_len = 0;
> + int i;
> + int ret = 0;
> +
> + if (!ctx->u.aes.key_len)
> + return -EINVAL;
> +
> + if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
> + return -EINVAL;
> +
> + if (!req->iv)
> + return -EINVAL;
> +
> + /*
> +  * 5 parts:
> +  *   plaintext/ciphertext input
> +  *   AAD
> +  *   key
> +  *   IV
> +  *   Destination+tag buffer
> +  */
> +
> + /* Copy the IV and initialize a scatterlist */
> + memset(rctx->iv, 0, AES_BLOCK_SIZE);
> + memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
> + for (i = 0; i < 3; i++)
> + rctx->iv[i + AES_GCM_IVSIZE] = 0;

Is this needed if you did the memset to zero above?

> + rctx->iv[AES_BLOCK_SIZE - 1] = 1;
> + iv_sg = >iv_sg;
> + iv_len = AES_BLOCK_SIZE;
> + sg_init_one(iv_sg, rctx

Re: [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP

2016-10-13 Thread Tom Lendacky
On 10/13/2016 09:53 AM, Gary R Hook wrote:
> Wire up the v3 CCP as a cipher provider.
> 
> Signed-off-by: Gary R Hook 
> ---
>  drivers/crypto/ccp/Makefile  |1 
>  drivers/crypto/ccp/ccp-crypto-main.c |   15 ++
>  drivers/crypto/ccp/ccp-crypto-rsa.c  |  258 
> ++
>  drivers/crypto/ccp/ccp-crypto.h  |   24 +++
>  drivers/crypto/ccp/ccp-dev-v3.c  |   38 +
>  drivers/crypto/ccp/ccp-ops.c |1 
>  include/linux/ccp.h  |   34 
>  7 files changed, 370 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
> 
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 346ceb8..23f89b7 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -12,4 +12,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>  ccp-crypto-aes.o \
>  ccp-crypto-aes-cmac.o \
>  ccp-crypto-aes-xts.o \
> +ccp-crypto-rsa.o \
>  ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
> b/drivers/crypto/ccp/ccp-crypto-main.c
> index e0380e5..f3c4c25 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -33,6 +33,10 @@ static unsigned int sha_disable;
>  module_param(sha_disable, uint, 0444);
>  MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
>  
> +static unsigned int rsa_disable;
> +module_param(rsa_disable, uint, 0444);
> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
> +
>  /* List heads for the supported algorithms */
>  static LIST_HEAD(hash_algs);
>  static LIST_HEAD(cipher_algs);
> @@ -343,6 +347,14 @@ static int ccp_register_algs(void)
>   return ret;
>   }
>  
> + if (!rsa_disable) {
> + ret = ccp_register_rsa_algs();
> + if (ret) {
> + rsa_disable = 1;
> + return ret;
> + }
> + }
> +
>   return 0;
>  }
>  
> @@ -362,6 +374,9 @@ static void ccp_unregister_algs(void)
>   list_del(_alg->entry);
>   kfree(ablk_alg);
>   }
> +
> + if (!rsa_disable)
> + ccp_unregister_rsa_algs();
>  }
>  
>  static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c 
> b/drivers/crypto/ccp/ccp-crypto-rsa.c
> new file mode 100644
> index 000..7dab43b
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -0,0 +1,258 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
> + *
> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Gary R Hook 
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include "ccp-crypto.h"
> +
> +static inline struct akcipher_request *akcipher_request_cast(
> + struct crypto_async_request *req)
> +{
> + return container_of(req, struct akcipher_request, base);
> +}
> +
> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
> +{
> + struct akcipher_request *req = akcipher_request_cast(async_req);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +
> + if (!ret)
> + req->dst_len = rctx->cmd.u.rsa.d_len;
> +
> + ret = 0;
> +
> + return ret;
> +}
> +
> +static int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> +{
> + return CCP_RSA_MAXMOD;
> +}
> +
> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> +{
> + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> + int ret = 0;
> +
> + if (!ctx->u.rsa.pkey.d && !ctx->u.rsa.pkey.e)
> + return -EINVAL;
> +
> + memset(>cmd, 0, sizeof(rctx->cmd));
> + INIT_LIST_HEAD(>cmd.entry);
> + rctx->cmd.engine = CCP_ENGINE_RSA;
> + rctx->cmd.u.rsa.mode = encrypt ? CCP_RSA_ENCRYPT : CCP_RSA_DECRYPT;
> +
> + rctx->cmd.u.rsa.pkey = ctx->u.rsa.pkey;
> + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len;

The existing interface expects the key_size to be in bits, so you'll
need to multiply this by 8.

> + rctx->cmd.u.rsa.exp = >u.rsa.e_sg;
> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
> + rctx->cmd.u.rsa.mod = >u.rsa.n_sg;
> + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
> + if (ctx->u.rsa.pkey.d) {
> + rctx->cmd.u.rsa.d_sg = >u.rsa.d_sg;
> + rctx->cmd.u.rsa.d_len = ctx->u.rsa.d_len;
> + }
> +
> + rctx->cmd.u.rsa.src = req->src;
> + rctx->cmd.u.rsa.src_len = req->src_len;
> + 

Re: [PATCH 1/6] crypto: ccp - Add SHA-2 support

2016-10-13 Thread Tom Lendacky
On 10/13/2016 09:52 AM, Gary R Hook wrote:
> Incorporate 384-bit and 512-bit hashing for a version 5 CCP
> device
> 
> 
> Signed-off-by: Gary R Hook 
> ---
>  drivers/crypto/ccp/ccp-crypto-sha.c |   22 +++
>  drivers/crypto/ccp/ccp-crypto.h |9 +++--
>  drivers/crypto/ccp/ccp-ops.c|   70 
> +++
>  include/linux/ccp.h |3 ++
>  4 files changed, 101 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
> b/drivers/crypto/ccp/ccp-crypto-sha.c
> index 84a652b..6b46eea 100644
> --- a/drivers/crypto/ccp/ccp-crypto-sha.c
> +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
> @@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, 
> unsigned int nbytes,
>   case CCP_SHA_TYPE_256:
>   rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
>   break;
> + case CCP_SHA_TYPE_384:
> + rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
> + break;
> + case CCP_SHA_TYPE_512:
> + rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
> + break;
>   default:
>   /* Should never get here */
>   break;
> @@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
>   .digest_size= SHA256_DIGEST_SIZE,
>   .block_size = SHA256_BLOCK_SIZE,
>   },
> + {
> + .version= CCP_VERSION(5, 0),
> + .name   = "sha384",
> + .drv_name   = "sha384-ccp",
> + .type   = CCP_SHA_TYPE_384,
> + .digest_size= SHA384_DIGEST_SIZE,
> + .block_size = SHA384_BLOCK_SIZE,
> + },
> + {
> + .version= CCP_VERSION(5, 0),
> + .name   = "sha512",
> + .drv_name   = "sha512-ccp",
> + .type   = CCP_SHA_TYPE_512,
> + .digest_size= SHA512_DIGEST_SIZE,
> + .block_size = SHA512_BLOCK_SIZE,
> + },
>  };
>  
>  static int ccp_register_hmac_alg(struct list_head *head,
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index 8335b32..ae442ac 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -137,9 +137,12 @@ struct ccp_aes_cmac_exp_ctx {
>   u8 buf[AES_BLOCK_SIZE];
>  };
>  
> -/* SHA related defines */
> -#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
> -#define MAX_SHA_BLOCK_SIZE   SHA256_BLOCK_SIZE
> +/*
> + * SHA-related defines
> + * These values must be large enough to accommodate any variant
> + */
> +#define MAX_SHA_CONTEXT_SIZE SHA512_DIGEST_SIZE
> +#define MAX_SHA_BLOCK_SIZE   SHA512_BLOCK_SIZE
>  
>  struct ccp_sha_ctx {
>   struct scatterlist opad_sg;
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 50fae44..8fedb14 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -41,6 +41,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / 
> sizeof(__be32)] = {
>   cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
>  };
>  
> +static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
> + cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
> + cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
> + cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
> + cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
> +};
> +
> +static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
> + cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
> + cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
> + cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
> + cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
> +};
> +
>  #define  CCP_NEW_JOBID(ccp)  ((ccp->vdata->version == CCP_VERSION(3, 
> 0)) ? \
>   ccp_gen_jobid(ccp) : 0)
>  
> @@ -963,6 +977,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, 
> struct ccp_cmd *cmd)
>   return -EINVAL;
>   block_size = SHA256_BLOCK_SIZE;
>   break;
> + case CCP_SHA_TYPE_384:
> + if (sha->ctx_len < SHA384_DIGEST_SIZE)
> + return -EINVAL;
> + block_size = SHA384_BLOCK_SIZE;
> + break;
> + case CCP_SHA_TYPE_512:
> + if (sha->ctx_len < SHA512_DIGEST_SIZE)
> + return -EINVAL;
> + block_size = SHA512_BLOCK_SIZE;
> + break;

A version 3 CCP won't support these new sizes.  You should add a version
check and return an error if v3.

>   default:
>   return -EINVAL;
>   }
> @@ -1050,6 +1074,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue 
> *cmd_q, struct ccp_cmd *cmd)
>   sb_count = 1;
>   ooffset = ioffset = 0;
>   break;
> + case CCP_SHA_TYPE_384:
> + digest_size = 

Re: [PATCH 1/2] crypto: ccp - data structure cleanup

2016-09-28 Thread Tom Lendacky
On 09/28/2016 10:49 AM, Gary R Hook wrote:
> Change names of data structure instances; add const
> keyword where appropriate.
> 
> Signed-off-by: Gary R Hook 
> ---
>  drivers/crypto/ccp/ccp-dev-v3.c |2 +-
>  drivers/crypto/ccp/ccp-dev-v5.c |7 +--
>  drivers/crypto/ccp/ccp-dev.h|6 +++---
>  drivers/crypto/ccp/ccp-pci.c|4 ++--
>  4 files changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
> index 578522d..b6615b1 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -566,7 +566,7 @@ static const struct ccp_actions ccp3_actions = {
>   .irqhandler = ccp_irq_handler,
>  };
>  
> -struct ccp_vdata ccpv3 = {
> +const struct ccp_vdata ccpv3 = {
>   .version = CCP_VERSION(3, 0),
>   .setup = NULL,
>   .perform = _actions,
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index 9df1526..a90ca9e 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -839,6 +839,9 @@ static int ccp5_init(struct ccp_device *ccp)
>  
>   return 0;
>  
> +e_hwrng:
> + ccp_unregister_rng(ccp);
> +

This label has been added but is never used.  I assume that you wanted
to put a goto e_hwrng if ccp_dmaengine_register() fails.

Thanks,
Tom

>  e_kthread:
>   for (i = 0; i < ccp->cmd_q_count; i++)
>   if (ccp->cmd_q[i].kthread)
> @@ -994,7 +997,7 @@ static const struct ccp_actions ccp5_actions = {
>   .irqhandler = ccp5_irq_handler,
>  };
>  
> -struct ccp_vdata ccpv5 = {
> +const struct ccp_vdata ccpv5a = {
>   .version = CCP_VERSION(5, 0),
>   .setup = ccp5_config,
>   .perform = _actions,
> @@ -1002,7 +1005,7 @@ struct ccp_vdata ccpv5 = {
>   .offset = 0x0,
>  };
>  
> -struct ccp_vdata ccpv5other = {
> +const struct ccp_vdata ccpv5b = {
>   .version = CCP_VERSION(5, 0),
>   .setup = ccp5other_config,
>   .perform = _actions,
> diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
> index ebc9365..08f58b0 100644
> --- a/drivers/crypto/ccp/ccp-dev.h
> +++ b/drivers/crypto/ccp/ccp-dev.h
> @@ -639,8 +639,8 @@ struct ccp_vdata {
>   const unsigned int offset;
>  };
>  
> -extern   struct ccp_vdata ccpv3;
> -extern   struct ccp_vdata ccpv5;
> -extern   struct ccp_vdata ccpv5other;
> +extern const struct ccp_vdata ccpv3;
> +extern const struct ccp_vdata ccpv5a;
> +extern const struct ccp_vdata ccpv5b;
>  
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
> index 239cbf2..28a9996 100644
> --- a/drivers/crypto/ccp/ccp-pci.c
> +++ b/drivers/crypto/ccp/ccp-pci.c
> @@ -325,8 +325,8 @@ static int ccp_pci_resume(struct pci_dev *pdev)
>  
>  static const struct pci_device_id ccp_pci_table[] = {
>   { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t) },
> - { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t) },
> - { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t) },
> + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t) },
> + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t) },
>   /* Last entry must be zero */
>   { 0, }
>  };
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v1 09/28] x86/efi: Access EFI data as encrypted when SEV is active

2016-09-22 Thread Tom Lendacky
On 09/22/2016 12:07 PM, Borislav Petkov wrote:
> On Thu, Sep 22, 2016 at 05:05:54PM +0200, Paolo Bonzini wrote:
>> Which paragraph?
> 
> "Linux relies on BIOS to set this bit if BIOS has determined that the
> reduction in the physical address space as a result of enabling memory
> encryption..."
> 
> Basically, you can enable SME in the BIOS and you're all set.

That's not what I mean here.  If the BIOS sets the SMEE bit in the
SYS_CFG msr then, even if the encryption bit is never used, there is
still a reduction in physical address space.

Transparent SME (TSME) will be a BIOS option that will result in the
memory controller performing encryption no matter what. In this case
all data will be encrypted without a reduction in physical address
space.

Thanks,
Tom

> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v1 09/28] x86/efi: Access EFI data as encrypted when SEV is active

2016-09-22 Thread Tom Lendacky
On 09/22/2016 02:11 PM, Borislav Petkov wrote:
> On Thu, Sep 22, 2016 at 02:04:27PM -0500, Tom Lendacky wrote:
>> That's not what I mean here.  If the BIOS sets the SMEE bit in the
>> SYS_CFG msr then, even if the encryption bit is never used, there is
>> still a reduction in physical address space.
> 
> I thought that reduction is the reservation of bits for the SME mask.
> 
> What other reduction is there?

There is a reduction in physical address space for the SME mask and the
bits used to aid in identifying the ASID associated with the memory
request. This allows for the memory controller to determine the key to
be used for the encryption operation (host/hypervisor key vs. an SEV
guest key).

Thanks,
Tom

> 
>> Transparent SME (TSME) will be a BIOS option that will result in the
>> memory controller performing encryption no matter what. In this case
>> all data will be encrypted without a reduction in physical address
>> space.
> 
> Now I'm confused: aren't we reducing the address space with the SME
> mask?
> 
> Or what reduction do you mean?
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v1 09/28] x86/efi: Access EFI data as encrypted when SEV is active

2016-09-22 Thread Tom Lendacky
On 09/22/2016 09:45 AM, Paolo Bonzini wrote:
> 
> 
> On 22/09/2016 16:35, Borislav Petkov wrote:
 @@ -230,6 +230,10 @@ int __init efi_setup_page_tables(unsigned long 
 pa_memmap, unsigned num_pages)
efi_scratch.efi_pgt = (pgd_t *)__sme_pa(efi_pgd);
pgd = efi_pgd;
  
 +  flags = _PAGE_NX | _PAGE_RW;
 +  if (sev_active)
 +  flags |= _PAGE_ENC;
>> So this is confusing me. There's this patch which says EFI data is
>> accessed in the clear:
>>
>> https://lkml.kernel.org/r/2016083738.29880.6909.st...@tlendack-t1.amdoffice.net
>>
>> but now here it is encrypted when SEV is enabled.
>>
>> Do you mean, it is encrypted here because we're in the guest kernel?
> 
> I suspect this patch is untested, and also wrong. :)

Yes, it is untested but not sure that it is wrong...  It all depends on
how we add SEV support to the guest UEFI BIOS.  My take would be to have
the EFI data and ACPI tables encrypted.

> 
> The main difference between the SME and SEV encryption, from the point
> of view of the kernel, is that real-mode always writes unencrypted in
> SME and always writes encrypted in SEV.  But UEFI can run in 64-bit mode
> and learn about the C bit, so EFI boot data should be unprotected in SEV
> guests.
> 
> Because the firmware volume is written to high memory in encrypted form,
> and because the PEI phase runs in 32-bit mode, the firmware code will be
> encrypted; on the other hand, data that is placed in low memory for the
> kernel can be unencrypted, thus limiting differences between SME and SEV.

I like the idea of limiting the differences but it would leave the EFI
data and ACPI tables exposed and able to be manipulated.

> 
>   Important: I don't know what you guys are doing for SEV and
>   Windows guests, but if you are doing something I would really
>   appreciate doing things in the open.  If Linux and Windows end
>   up doing different things with EFI boot data, ACPI tables, etc.
>   it will be a huge pain.  On the other hand, if we can enjoy
>   being first, that's great.

We haven't discussed Windows guests under SEV yet, but as you say, we
need to do things the same.

Thanks,
Tom

> 
> In fact, I have suggested in the QEMU list that SEV guests should always
> use UEFI; because BIOS runs in real-mode or 32-bit non-paging protected
> mode, BIOS must always write encrypted data, which becomes painful in
> the kernel.
> 
> And regarding the above "important" point, all I know is that Microsoft
> for sure will be happy to restrict SEV to UEFI guests. :)
> 
> There are still some differences, mostly around the real mode trampoline
> executed by the kernel, but they should be much smaller.
> 
> Paolo
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v1 18/28] crypto: add AMD Platform Security Processor driver

2016-08-24 Thread Tom Lendacky


On 08/23/2016 02:14 AM, Herbert Xu wrote:
> On Mon, Aug 22, 2016 at 07:27:22PM -0400, Brijesh Singh wrote:
>> The driver to communicate with Secure Encrypted Virtualization (SEV)
>> firmware running within the AMD secure processor providing a secure key
>> management interface for SEV guests.
>>
>> Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
>> Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
> 
> This driver doesn't seem to hook into the Crypto API at all, is
> there any reason why it should be in drivers/crypto?

Yes, this needs to be cleaned up.  The PSP and the CCP share the same
PCI id, so this has to be integrated with the CCP. It could either
be moved into the drivers/crypto/ccp directory or both the psp and
ccp device specific support can be moved somewhere else leaving just
the ccp crypto API related files in drivers/crypto/ccp.

Thanks,
Tom

> 
> Thanks,
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: ccp - Fix AES XTS error for request sizes above 4096

2016-05-23 Thread Tom Lendacky
On 05/20/2016 06:35 PM, Herbert Xu wrote:
> On Fri, May 20, 2016 at 05:33:03PM -0500, Tom Lendacky wrote:
>> The ccp-crypto module for AES XTS support has a bug that can allow requests
>> greater than 4096 bytes in size to be passed to the CCP hardware. The CCP
>> hardware does not support request sizes larger than 4096, resulting in
>> incorrect output. The request should actually be handled by the fallback
>> mechanism instantiated by the ccp-crypto module.
>>
>> Add a check to insure the request size is less than or equal to the maximum
>> supported size and use the fallback mechanism if it is not.
>>
>> Cc: <sta...@vger.kernel.org> # 3.14.x-
>> Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
> 
> I'm OK with this patch but I think it doesn't always need to go into
> the fallback.  I made a test vector split as 4064 bytes + 48 bytes
> and ccp handled it just fine.  It appears that the bug is actually
> in the handling of a single SG entry that's longer than a page,
> presumably because sg_next is used unconditionally instead of
> checking whether there is more in the current SG entry.

I'll take a closer look at this. Something obviously isn't right but
the code doesn't do anything related to PAGE size checks and works
on the length specified in the SG entry.

> 
> But I'll merge your fix as it fixes a real problem.

Thanks Herbert.

Tom

> 
> Thanks,
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Fix AES XTS error for request sizes above 4096

2016-05-20 Thread Tom Lendacky
The ccp-crypto module for AES XTS support has a bug that can allow requests
greater than 4096 bytes in size to be passed to the CCP hardware. The CCP
hardware does not support request sizes larger than 4096, resulting in
incorrect output. The request should actually be handled by the fallback
mechanism instantiated by the ccp-crypto module.

Add a check to insure the request size is less than or equal to the maximum
supported size and use the fallback mechanism if it is not.

Cc: <sta...@vger.kernel.org> # 3.14.x-
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-xts.c |   17 -
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395..0d0d452 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+   u32 unit_size;
int ret;
 
if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request 
*req,
if (!req->info)
return -EINVAL;
 
-   for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
-   if (!(req->nbytes & (unit_size_map[unit].size - 1)))
-   break;
+   unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+   if (req->nbytes <= unit_size_map[0].size) {
+   for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+   if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+   unit_size = unit_size_map[unit].value;
+   break;
+   }
+   }
+   }
 
-   if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+   if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
   : CCP_AES_ACTION_DECRYPT;
-   rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+   rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = >u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = >iv_sg;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto/ccp: remove rwlocks_types.h

2016-05-11 Thread Tom Lendacky
On 05/11/2016 05:06 AM, Sebastian Andrzej Siewior wrote:
> Users of rwlocks should include spinlock.h instead including this
> header file. The current users of rwlocks_types.h are internal.
> 
> Signed-off-by: Sebastian Andrzej Siewior 

There's already been a patch submitted and accepted for this
in the cryptodev tree:

commit 7587c4075400 ("crypto: ccp - Fix RT breaking #include 
")

Thanks,
Tom

> ---
>  drivers/crypto/ccp/ccp-dev.c | 1 -
>  1 file changed, 1 deletion(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
> index 4dbc18727235..2a8ad712a5f2 100644
> --- a/drivers/crypto/ccp/ccp-dev.c
> +++ b/drivers/crypto/ccp/ccp-dev.c
> @@ -16,7 +16,6 @@
>  #include 
>  #include 
>  #include 
> -#include 
>  #include 
>  #include 
>  #include 
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Prevent information leakage on export

2016-04-13 Thread Tom Lendacky
Prevent information from leaking to userspace by doing a memset to 0 of
the export state structure before setting the structure values and copying
it. This prevents un-initialized padding areas from being copied into the
export area.

Cc: <sta...@vger.kernel.org> # 3.14.x-
Reported-by: Ben Hutchings <b...@decadent.org.uk>
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |3 +++
 drivers/crypto/ccp/ccp-crypto-sha.c  |3 +++
 2 files changed, 6 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc5..60fc0fa 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, 
void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
 
+   /* Don't let anything leak to 'out' */
+   memset(, 0, sizeof(state));
+
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad728..8f36af6 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void 
*out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
 
+   /* Don't let anything leak to 'out' */
+   memset(, 0, sizeof(state));
+
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: ccp - Register the CCP as a DMA resource

2016-04-04 Thread Tom Lendacky
status;
> + struct dma_chan dma_chan;
> +};
> +
>  struct ccp_cmd_queue {
>   struct ccp_device *ccp;
>  
> @@ -261,6 +297,14 @@ struct ccp_device {
>   unsigned int hwrng_retries;
>  
>   /*
> +  * Support for the CCP DMA capabilities
> +  */
> + struct dma_device dma_dev;
> + struct ccp_dma_chan *ccp_dma_chan;
> + struct kmem_cache *dma_cmd_cache;
> + struct kmem_cache *dma_desc_cache;
> +
> + /*
>* A counter used to generate job-ids for cmds submitted to the CCP
>*/
>   atomic_t current_id cacheline_aligned;
> @@ -418,4 +462,7 @@ int ccp_cmd_queue_thread(void *data);
>  
>  int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
>  
> +int ccp_dmaengine_register(struct ccp_device *ccp);
> +void ccp_dmaengine_unregister(struct ccp_device *ccp);
> +
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-dmaengine.c 
> b/drivers/crypto/ccp/ccp-dmaengine.c
> new file mode 100644
> index 000..241ad8a
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-dmaengine.c
> @@ -0,0 +1,718 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) driver
> + *
> + * Copyright (C) 2015 Advanced Micro Devices, Inc.

2016.

> + *
> + * Author: Tom Lendacky <thomas.lenda...@amd.com>

This should be your name.

> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +

...

> +int ccp_dmaengine_register(struct ccp_device *ccp)
> +{
> + struct ccp_dma_chan *chan;
> + struct dma_device *dma_dev = >dma_dev;
> + struct dma_chan *dma_chan;
> + char dma_cache_name[MAX_DMA_NAME_LEN];

This can't be a local function variable.  You'll need to allocate
memory for the cache names and track them (or use devm_kasprintf).

> + unsigned int i;
> + int ret;
> +
> + ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
> +  sizeof(*(ccp->ccp_dma_chan)),
> +  GFP_KERNEL);
> + if (!ccp->ccp_dma_chan)
> + return -ENOMEM;
> +
> + snprintf(dma_cache_name, sizeof(dma_cache_name),
> +  "%s-dmaengine-cmd-cache", ccp->name);
> + ccp->dma_cmd_cache = kmem_cache_create(dma_cache_name,
> +sizeof(struct ccp_dma_cmd),
> +sizeof(void *),
> +SLAB_HWCACHE_ALIGN, NULL);
> + if (!ccp->dma_cmd_cache)
> + return -ENOMEM;
> +
> + snprintf(dma_cache_name, sizeof(dma_cache_name),
> +  "%s-dmaengine-desc-cache", ccp->name);
> + ccp->dma_desc_cache = kmem_cache_create(dma_cache_name,
> + sizeof(struct ccp_dma_desc),
> + sizeof(void *),
> + SLAB_HWCACHE_ALIGN, NULL);
> + if (!ccp->dma_desc_cache) {
> + ret = -ENOMEM;
> + goto err_cache;
> + }
> +
> + dma_dev->dev = ccp->dev;
> + dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
> + dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
> + dma_dev->directions = DMA_MEM_TO_MEM;
> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
> + dma_cap_set(DMA_SG, dma_dev->cap_mask);
> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
> +
> + INIT_LIST_HEAD(_dev->channels);
> + for (i = 0; i < ccp->cmd_q_count; i++) {
> + chan = ccp->ccp_dma_chan + i;
> + dma_chan = >dma_chan;
> +
> + chan->ccp = ccp;
> +
> + spin_lock_init(>lock);
> + INIT_LIST_HEAD(>pending);
> + INIT_LIST_HEAD(>active);
> + INIT_LIST_HEAD(>complete);
> +
> + tasklet_init(>cleanup_tasklet, ccp_do_cleanup,
> +  (unsigned long)chan);
> +
> + dma_chan->device = dma_dev;
> + dma_cookie_init(dma_chan);
> +
> + list_add_tail(_chan->device_node, _dev->channels);
> + }
> +
> + dma_dev->device_free_chan_resources = ccp_free_chan_resources;
> + dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
> + dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
> + dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
&g

[PATCH] MAINTAINERS: Add a new maintainer for the CCP driver

2016-03-21 Thread Tom Lendacky
Gary will be taking over future development of the CCP driver, so add
him as a co-maintainer of the driver.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 MAINTAINERS |1 +
 1 file changed, 1 insertion(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 30aca4a..8c42c07 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -623,6 +623,7 @@ F:  include/linux/altera_jtaguart.h
 
 AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
 M:     Tom Lendacky <thomas.lenda...@amd.com>
+M: Gary Hook <gary.h...@amd.com>
 L: linux-crypto@vger.kernel.org
 S: Supported
 F: drivers/crypto/ccp/

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: ccp - Use different flag vars for nested locks

2016-03-11 Thread Tom Lendacky
On 03/11/2016 10:40 AM, Gary R Hook wrote:
> This patch fixes a coccinelle warning about reusing a flags
> variable in nested lock acquisition.
> 
> Signed-off-by: Gary R Hook <gary.h...@amd.com>

Acked-by: Tom Lendacky <thomas.lenda...@amd.com>

> ---
>  drivers/crypto/ccp/ccp-dev.c |6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
> index 336e5b7..9c7bce8 100644
> --- a/drivers/crypto/ccp/ccp-dev.c
> +++ b/drivers/crypto/ccp/ccp-dev.c
> @@ -120,7 +120,7 @@ void ccp_del_device(struct ccp_device *ccp)
>  
>  static struct ccp_device *ccp_get_device(void)
>  {
> - unsigned long flags;
> + unsigned long flags, rrflags;
>   struct ccp_device *dp = NULL;
>  
>   /* We round-robin through the unit list.
> @@ -128,14 +128,14 @@ static struct ccp_device *ccp_get_device(void)
>*/
>   read_lock_irqsave(_unit_lock, flags);
>   if (!list_empty(_units)) {
> - write_lock_irqsave(_rr_lock, flags);
> + write_lock_irqsave(_rr_lock, rrflags);
>   dp = ccp_rr;
>   if (list_is_last(_rr->entry, _units))
>   ccp_rr = list_first_entry(_units, struct ccp_device,
> entry);
>   else
>   ccp_rr = list_next_entry(ccp_rr, entry);
> - write_unlock_irqrestore(_rr_lock, flags);
> + write_unlock_irqrestore(_rr_lock, rrflags);
>   }
>   read_unlock_irqrestore(_unit_lock, flags);
>  
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/4] crypto: ccp - CCP versioning support

2016-03-03 Thread Tom Lendacky
On 03/01/2016 01:49 PM, Gary R Hook wrote:
> Future hardware may introduce new algorithms wherein the
> driver will need to manage resources for different versions
> of the cryptographic coprocessor. This precursor patch
> determines the version of the available device, and marks
> and registers algorithms accordingly. A structure is added
> which manages the version-specific data.
> 
> Signed-off-by: Gary R Hook <gary.h...@amd.com>

Acked-by: Tom Lendacky <thomas.lenda...@amd.com>

> ---
>  drivers/crypto/ccp/ccp-crypto-aes.c |   12 ++-
>  drivers/crypto/ccp/ccp-crypto-sha.c |9 +++-
>  drivers/crypto/ccp/ccp-dev.c|   27 
>  drivers/crypto/ccp/ccp-dev.h|8 +++
>  drivers/crypto/ccp/ccp-pci.c|8 ++-
>  drivers/crypto/ccp/ccp-platform.c   |   39 
> +--
>  include/linux/ccp.h |   17 +++
>  7 files changed, 115 insertions(+), 5 deletions(-)
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/4] crypto: ccp - Remove check for x86 family and model

2016-03-03 Thread Tom Lendacky
On 03/01/2016 01:48 PM, Gary R Hook wrote:
> Each x86 SoC will make use of a unique PCI ID for the CCP
> device so it is not necessary to check for the CPU family
> and model.
> 
> Signed-off-by: Gary R Hook <gary.h...@amd.com>

Acked-by: Tom Lendacky <thomas.lenda...@amd.com>

> ---
>  drivers/crypto/ccp/ccp-dev.c |   47 
> ++
>  1 file changed, 11 insertions(+), 36 deletions(-)
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - memset request context to zero during import

2016-02-25 Thread Tom Lendacky
Since a crypto_ahash_import() can be called against a request context
that has not had a crypto_ahash_init() performed, the request context
needs to be cleared to insure there is no random data present. If not,
the random data can result in a kernel oops during crypto_ahash_update().

Cc: <sta...@vger.kernel.org> # 3.14.x-
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |1 +
 drivers/crypto/ccp/ccp-crypto-sha.c  |1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d095452..3d9acc5 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -244,6 +244,7 @@ static int ccp_aes_cmac_import(struct ahash_request *req, 
const void *in)
/* 'in' may not be aligned so memcpy to local variable */
memcpy(, in, sizeof(state));
 
+   memset(rctx, 0, sizeof(*rctx));
rctx->null_msg = state.null_msg;
memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
rctx->buf_count = state.buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 7002c6b..8ef06fa 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -233,6 +233,7 @@ static int ccp_sha_import(struct ahash_request *req, const 
void *in)
/* 'in' may not be aligned so memcpy to local variable */
memcpy(, in, sizeof(state));
 
+   memset(rctx, 0, sizeof(*rctx));
rctx->type = state.type;
rctx->msg_bits = state.msg_bits;
rctx->first = state.first;

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Is a crypto_ahash_init required before invoking crypto_ahash_import?

2016-02-25 Thread Tom Lendacky
On 02/25/2016 04:11 PM, Herbert Xu wrote:
> On Thu, Feb 25, 2016 at 03:56:31PM -0600, Tom Lendacky wrote:
>>
>> I can fix this in the driver by doing a memset to zero of the request
>> context area during the import. But I guess I'm also wondering if there
>> is an expectation/requirement that crypto_ahash_init() be called before
>> doing an import?  If there is the I can add that to the testmgr code
>> instead.
> 
> No init means wiping out the hash state so that a new hash can be
> computed.  Import means importing the hash state so that computation
> can be continued from that point onwards.
> 
> So they're independent of each other and you must be able to handle
> an import without an init and vice versa.

Thanks for the clarification Herbert.  I'll send in a patch to perform
the memset during the import.  I know it's late in the cycle, but will
you be able to apply it to the current cryptodev-2.6 tree which contains
the ccp import/export patches?

Thanks,
Tom

> 
> Cheers,
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Is a crypto_ahash_init required before invoking crypto_ahash_import?

2016-02-25 Thread Tom Lendacky
I'm seeing an issue on one system that I wasn't seeing on another
system. It turns out that the testmgr sha testing exports an ahash
request context, allocates a new ahash request context and then imports
into that new ahash request context. Since crypto_ahash_init() is not
performed the driver request context could have random data in it,
which ends up causing an error. As part of the import/export support
that I added for the ccp driver I reduced the amount of data that was
exported, but I guess I always assumed that crypto_ahash_init() would
have been called before doing a crypto_ahash_import().

I can fix this in the driver by doing a memset to zero of the request
context area during the import. But I guess I'm also wondering if there
is an expectation/requirement that crypto_ahash_init() be called before
doing an import?  If there is the I can add that to the testmgr code
instead.

Thanks,
Tom
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1] crypto: ccp - Don't assume export/import areas are aligned

2016-02-02 Thread Tom Lendacky
Use a local variable for the exported and imported state so that
alignment is not an issue. On export, set a local variable from the
request context and then memcpy the contents of the local variable to
the export memory area. On import, memcpy the import memory area into
a local variable and then use the local variable to set the request
context.

Cc: <sta...@vger.kernel.org> # 3.14.x-
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   26 +-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   36 ++
 2 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 6a2d836..d095452 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -223,12 +223,15 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
 static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_aes_cmac_exp_ctx *state = out;
+   struct ccp_aes_cmac_exp_ctx state;
 
-   state->null_msg = rctx->null_msg;
-   memcpy(state->iv, rctx->iv, sizeof(state->iv));
-   state->buf_count = rctx->buf_count;
-   memcpy(state->buf, rctx->buf, sizeof(state->buf));
+   state.null_msg = rctx->null_msg;
+   memcpy(state.iv, rctx->iv, sizeof(state.iv));
+   state.buf_count = rctx->buf_count;
+   memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+   /* 'out' may not be aligned so memcpy from local variable */
+   memcpy(out, , sizeof(state));
 
return 0;
 }
@@ -236,12 +239,15 @@ static int ccp_aes_cmac_export(struct ahash_request *req, 
void *out)
 static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_aes_cmac_exp_ctx *state = in;
+   struct ccp_aes_cmac_exp_ctx state;
+
+   /* 'in' may not be aligned so memcpy to local variable */
+   memcpy(, in, sizeof(state));
 
-   rctx->null_msg = state->null_msg;
-   memcpy(rctx->iv, state->iv, sizeof(rctx->iv));
-   rctx->buf_count = state->buf_count;
-   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
+   rctx->null_msg = state.null_msg;
+   memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+   rctx->buf_count = state.buf_count;
+   memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
 
return 0;
 }
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index a67128a..7002c6b 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -210,14 +210,17 @@ static int ccp_sha_digest(struct ahash_request *req)
 static int ccp_sha_export(struct ahash_request *req, void *out)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_sha_exp_ctx *state = out;
+   struct ccp_sha_exp_ctx state;
 
-   state->type = rctx->type;
-   state->msg_bits = rctx->msg_bits;
-   state->first = rctx->first;
-   memcpy(state->ctx, rctx->ctx, sizeof(state->ctx));
-   state->buf_count = rctx->buf_count;
-   memcpy(state->buf, rctx->buf, sizeof(state->buf));
+   state.type = rctx->type;
+   state.msg_bits = rctx->msg_bits;
+   state.first = rctx->first;
+   memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+   state.buf_count = rctx->buf_count;
+   memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+   /* 'out' may not be aligned so memcpy from local variable */
+   memcpy(out, , sizeof(state));
 
return 0;
 }
@@ -225,14 +228,17 @@ static int ccp_sha_export(struct ahash_request *req, void 
*out)
 static int ccp_sha_import(struct ahash_request *req, const void *in)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_sha_exp_ctx *state = in;
-
-   rctx->type = state->type;
-   rctx->msg_bits = state->msg_bits;
-   rctx->first = state->first;
-   memcpy(rctx->ctx, state->ctx, sizeof(rctx->ctx));
-   rctx->buf_count = state->buf_count;
-   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
+   struct ccp_sha_exp_ctx state;
+
+   /* 'in' may not be aligned so memcpy to local variable */
+   memcpy(, in, sizeof(state));
+
+   rctx->type = state.type;
+   rctx->msg_bits = state.msg_bits;
+   rctx->first = state.first;
+   memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+   rctx->buf_count = state.buf_count;
+   memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
 
return 0;
 }

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1] crypto: ccp - Limit the amount of information exported

2016-02-01 Thread Tom Lendacky
On 02/01/2016 08:35 AM, Herbert Xu wrote:
> On Fri, Jan 29, 2016 at 12:45:14PM -0600, Tom Lendacky wrote:
>> Since the exported information can be exposed to user-space, instead of
>> exporting the entire request context only export the minimum information
>> needed.
>>
>> Cc: <sta...@vger.kernel.org> # 3.14.x-
>> Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
>> ---
>>  drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   16 +++-
>>  drivers/crypto/ccp/ccp-crypto-sha.c  |   20 +++-
>>  drivers/crypto/ccp/ccp-crypto.h  |   22 ++
>>  3 files changed, 48 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
>> b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
>> index 00207cf..6a2d836 100644
>> --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
>> +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
>> @@ -223,9 +223,12 @@ static int ccp_aes_cmac_digest(struct ahash_request 
>> *req)
>>  static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
>>  {
>>  struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
>> -struct ccp_aes_cmac_req_ctx *state = out;
>> +struct ccp_aes_cmac_exp_ctx *state = out;
>>  
>> -*state = *rctx;
>> +state->null_msg = rctx->null_msg;
>> +memcpy(state->iv, rctx->iv, sizeof(state->iv));
>> +state->buf_count = rctx->buf_count;
>> +memcpy(state->buf, rctx->buf, sizeof(state->buf));
>>  
>>  return 0;
>>  }
> 
> BTW this code needs to be fixed to not assume that in/out are
> aligned.
> 

Ugh, yeah I missed that.  I'll follow up with (yet) another patch
to be alignment safe.

Thanks,
Tom

> Cheers,
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1] crypto: ccp - Limit the amount of information exported

2016-01-29 Thread Tom Lendacky
Since the exported information can be exposed to user-space, instead of
exporting the entire request context only export the minimum information
needed.

Cc: <sta...@vger.kernel.org> # 3.14.x-
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   16 +++-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   20 +++-
 drivers/crypto/ccp/ccp-crypto.h  |   22 ++
 3 files changed, 48 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 00207cf..6a2d836 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -223,9 +223,12 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
 static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_aes_cmac_req_ctx *state = out;
+   struct ccp_aes_cmac_exp_ctx *state = out;
 
-   *state = *rctx;
+   state->null_msg = rctx->null_msg;
+   memcpy(state->iv, rctx->iv, sizeof(state->iv));
+   state->buf_count = rctx->buf_count;
+   memcpy(state->buf, rctx->buf, sizeof(state->buf));
 
return 0;
 }
@@ -233,9 +236,12 @@ static int ccp_aes_cmac_export(struct ahash_request *req, 
void *out)
 static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 {
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_aes_cmac_req_ctx *state = in;
+   const struct ccp_aes_cmac_exp_ctx *state = in;
 
-   *rctx = *state;
+   rctx->null_msg = state->null_msg;
+   memcpy(rctx->iv, state->iv, sizeof(rctx->iv));
+   rctx->buf_count = state->buf_count;
+   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
 
return 0;
 }
@@ -378,7 +384,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
 
halg = >halg;
halg->digestsize = AES_BLOCK_SIZE;
-   halg->statesize = sizeof(struct ccp_aes_cmac_req_ctx);
+   halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 
base = >base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 3aae58d..a67128a 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -210,9 +210,14 @@ static int ccp_sha_digest(struct ahash_request *req)
 static int ccp_sha_export(struct ahash_request *req, void *out)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   struct ccp_sha_req_ctx *state = out;
+   struct ccp_sha_exp_ctx *state = out;
 
-   *state = *rctx;
+   state->type = rctx->type;
+   state->msg_bits = rctx->msg_bits;
+   state->first = rctx->first;
+   memcpy(state->ctx, rctx->ctx, sizeof(state->ctx));
+   state->buf_count = rctx->buf_count;
+   memcpy(state->buf, rctx->buf, sizeof(state->buf));
 
return 0;
 }
@@ -220,9 +225,14 @@ static int ccp_sha_export(struct ahash_request *req, void 
*out)
 static int ccp_sha_import(struct ahash_request *req, const void *in)
 {
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-   const struct ccp_sha_req_ctx *state = in;
+   const struct ccp_sha_exp_ctx *state = in;
 
-   *rctx = *state;
+   rctx->type = state->type;
+   rctx->msg_bits = state->msg_bits;
+   rctx->first = state->first;
+   memcpy(rctx->ctx, state->ctx, sizeof(rctx->ctx));
+   rctx->buf_count = state->buf_count;
+   memcpy(rctx->buf, state->buf, sizeof(rctx->buf));
 
return 0;
 }
@@ -428,7 +438,7 @@ static int ccp_register_sha_alg(struct list_head *head,
 
halg = >halg;
halg->digestsize = def->digest_size;
-   halg->statesize = sizeof(struct ccp_sha_req_ctx);
+   halg->statesize = sizeof(struct ccp_sha_exp_ctx);
 
base = >base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0..a326ec2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
 };
 
+struct ccp_aes_cmac_exp_ctx {
+   unsigned int null_msg;
+
+   u8 iv[AES_BLOCK_SIZE];
+
+   unsigned int buf_count;
+   u8 buf[AES_BLOCK_SIZE];
+};
+
 /* SHA related defines */
 #define MAX_SHA_CONTEXT_SIZE   SHA256_DIGEST_SIZE
 #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
 };
 
+struct ccp_sha_exp_ctx {
+   enum ccp

Re: [PATCH v1] crypto: ccp - Add hash state import and export support

2016-01-22 Thread Tom Lendacky
On 01/12/2016 11:17 AM, Tom Lendacky wrote:
> Commit 8996eafdcbad ("crypto: ahash - ensure statesize is non-zero")
> added a check to prevent ahash algorithms from successfully registering
> if the import and export functions were not implemented. This prevents
> an oops in the hash_accept function of algif_hash. This commit causes
> the ccp-crypto module SHA support and AES CMAC support from successfully
> registering and causing the ccp-crypto module load to fail because the
> ahash import and export functions are not implemented.
> 
> Update the CCP Crypto API support to provide import and export support
> for ahash algorithms.
> 
> Cc: <sta...@vger.kernel.org> # 3.14.x-
> Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>

Herbert, is it possible this patch can be part of Crypto Fixes for 4.5?

Thanks,
Tom

> ---
>  drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   23 +++
>  drivers/crypto/ccp/ccp-crypto-sha.c  |   23 +++
>  2 files changed, 46 insertions(+)
> 
> diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
> b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
> index d89f20c..00207cf 100644
> --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
> +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
> @@ -220,6 +220,26 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
>   return ccp_aes_cmac_finup(req);
>  }
>  
> +static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
> +{
> + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
> + struct ccp_aes_cmac_req_ctx *state = out;
> +
> + *state = *rctx;
> +
> + return 0;
> +}
> +
> +static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
> +{
> + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
> + const struct ccp_aes_cmac_req_ctx *state = in;
> +
> + *rctx = *state;
> +
> + return 0;
> +}
> +
>  static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
>  unsigned int key_len)
>  {
> @@ -352,10 +372,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
>   alg->final = ccp_aes_cmac_final;
>   alg->finup = ccp_aes_cmac_finup;
>   alg->digest = ccp_aes_cmac_digest;
> + alg->export = ccp_aes_cmac_export;
> + alg->import = ccp_aes_cmac_import;
>   alg->setkey = ccp_aes_cmac_setkey;
>  
>   halg = >halg;
>   halg->digestsize = AES_BLOCK_SIZE;
> + halg->statesize = sizeof(struct ccp_aes_cmac_req_ctx);
>  
>   base = >base;
>   snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
> diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
> b/drivers/crypto/ccp/ccp-crypto-sha.c
> index d14b3f2..3aae58d 100644
> --- a/drivers/crypto/ccp/ccp-crypto-sha.c
> +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
> @@ -207,6 +207,26 @@ static int ccp_sha_digest(struct ahash_request *req)
>   return ccp_sha_finup(req);
>  }
>  
> +static int ccp_sha_export(struct ahash_request *req, void *out)
> +{
> + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
> + struct ccp_sha_req_ctx *state = out;
> +
> + *state = *rctx;
> +
> + return 0;
> +}
> +
> +static int ccp_sha_import(struct ahash_request *req, const void *in)
> +{
> + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
> + const struct ccp_sha_req_ctx *state = in;
> +
> + *rctx = *state;
> +
> + return 0;
> +}
> +
>  static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
> unsigned int key_len)
>  {
> @@ -403,9 +423,12 @@ static int ccp_register_sha_alg(struct list_head *head,
>   alg->final = ccp_sha_final;
>   alg->finup = ccp_sha_finup;
>   alg->digest = ccp_sha_digest;
> + alg->export = ccp_sha_export;
> + alg->import = ccp_sha_import;
>  
>   halg = >halg;
>   halg->digestsize = def->digest_size;
> + halg->statesize = sizeof(struct ccp_sha_req_ctx);
>  
>   base = >base;
>   snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/8] crypto: ccp: Use precalculated hash from headers

2015-10-22 Thread Tom Lendacky

On 10/20/2015 02:33 AM, LABBE Corentin wrote:

Precalculated hash for empty message are now present in hash headers.
This patch just use them.

Signed-off-by: LABBE Corentin <clabbe.montj...@gmail.com>


Tested-by: Tom Lendacky <thomas.lenda...@amd.com>
Acked-by: Tom Lendacky <thomas.lenda...@amd.com>


---
  drivers/crypto/ccp/ccp-ops.c | 39 ---
  1 file changed, 8 insertions(+), 31 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d09c6c4..64fac2b 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / 
sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  };

-/* The CCP cannot perform zero-length sha operations so the caller
- * is required to buffer data for the final operation.  However, a
- * sha operation for a message with a total length of zero is valid
- * so known values are required to supply the result.
- */
-static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
-   0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
-   0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
-   0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
-   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
-   0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
-   0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
-   0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
-   0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
-   0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
-   0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
-   0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
-   0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
-};
-
  static u32 ccp_addr_lo(struct ccp_dma_info *info)
  {
return lower_32_bits(info->address + info->offset);
@@ -1388,18 +1362,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
if (sha->msg_bits)
return -EINVAL;

-   /* A sha operation for a message with a total length of zero,
-* return known result.
+   /* The CCP cannot perform zero-length sha operations so the
+* caller is required to buffer data for the final operation.
+* However, a sha operation for a message with a total length
+* of zero is valid so known values are required to supply
+* the result.
 */
switch (sha->type) {
case CCP_SHA_TYPE_1:
-   sha_zero = ccp_sha1_zero;
+   sha_zero = sha1_zero_message_hash;
break;
case CCP_SHA_TYPE_224:
-   sha_zero = ccp_sha224_zero;
+   sha_zero = sha224_zero_message_hash;
break;
case CCP_SHA_TYPE_256:
-   sha_zero = ccp_sha256_zero;
+   sha_zero = sha256_zero_message_hash;
break;
default:
return -EINVAL;


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/8] crypto: ccp: Use precalculated hash from headers

2015-10-12 Thread Tom Lendacky

On 10/12/2015 11:53 AM, LABBE Corentin wrote:

Precalculated hash for empty message are now present in hash headers.
This patch just use them.

Signed-off-by: LABBE Corentin <clabbe.montj...@gmail.com>


Just a minor comment below.

Tested-by: Tom Lendacky <thomas.lenda...@amd.com>
Acked-by: Tom Lendacky <thomas.lenda...@amd.com>


---
  drivers/crypto/ccp/ccp-ops.c | 40 
  1 file changed, 8 insertions(+), 32 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d09c6c4..3002b418 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / 
sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  };

-/* The CCP cannot perform zero-length sha operations so the caller
- * is required to buffer data for the final operation.  However, a
- * sha operation for a message with a total length of zero is valid
- * so known values are required to supply the result.
- */
-static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
-   0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
-   0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
-   0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
-   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
-   0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
-   0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
-   0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
-   0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
-   0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
-   0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
-   0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
-   0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
-};
-
  static u32 ccp_addr_lo(struct ccp_dma_info *info)
  {
return lower_32_bits(info->address + info->offset);
@@ -1388,18 +1362,20 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
if (sha->msg_bits)
return -EINVAL;

-   /* A sha operation for a message with a total length of zero,
-* return known result.
-*/
+/* The CCP cannot perform zero-length sha operations so the caller
+ * is required to buffer data for the final operation.  However, a
+ * sha operation for a message with a total length of zero is valid
+ * so known values are required to supply the result.
+ */


This comment should be indented and re-flowed to be consistent with
previous comments in this same section.

Thanks,
Tom


switch (sha->type) {
case CCP_SHA_TYPE_1:
-   sha_zero = ccp_sha1_zero;
+   sha_zero = sha1_zero_message_hash;
break;
case CCP_SHA_TYPE_224:
-   sha_zero = ccp_sha224_zero;
+   sha_zero = sha224_zero_message_hash;
break;
case CCP_SHA_TYPE_256:
-   sha_zero = ccp_sha256_zero;
+   sha_zero = sha256_zero_message_hash;
break;
default:
return -EINVAL;


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 0/4] crypto: ccp - CCP driver updates 2015-10-01

2015-10-01 Thread Tom Lendacky
The following patches are included in this driver update series:

- Remove the usage of BUG_ON and replace with WARN_ON and an error
  return code
- Remove unused variable
- Change references to accelerator to offload
- Use the module name in the driver structure instead of a
  descriptive name
 
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (4):
  crypto: ccp - Replace BUG_ON with WARN_ON and a return code
  crypto: ccp - Remove use ACPI field
  crypto: ccp - Change references to accelerator to offload
  crypto: ccp - Use module name in driver structures


 drivers/crypto/Kconfig   |2 -
 drivers/crypto/ccp/Kconfig   |   13 ++--
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   20 +-
 drivers/crypto/ccp/ccp-crypto-main.c |6 +-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   13 
 drivers/crypto/ccp/ccp-ops.c |  108 +++---
 drivers/crypto/ccp/ccp-pci.c |2 -
 drivers/crypto/ccp/ccp-platform.c|6 --
 8 files changed, 114 insertions(+), 56 deletions(-)

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 3/4] crypto: ccp - Change references to accelerator to offload

2015-10-01 Thread Tom Lendacky
The CCP is meant to be more of an offload engine than an accelerator
engine. To avoid any confusion, change references to accelerator to
offload.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/Kconfig |2 +-
 drivers/crypto/ccp/Kconfig |   13 ++---
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e7f24a8..ab7e3b6 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -420,7 +420,7 @@ config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && 
HAS_IOMEM
help
- The AMD Cryptographic Coprocessor provides hardware support
+ The AMD Cryptographic Coprocessor provides hardware offload support
  for encryption, hashing and related operations.
 
 if CRYPTO_DEV_CCP
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index ae38f6b..3cd8481 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -5,12 +5,12 @@ config CRYPTO_DEV_CCP_DD
select HW_RANDOM
help
  Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to accelerate or offload encryption operations
- such as SHA, AES and more. If you choose 'M' here, this module
- will be called ccp.
+ which can be used to offload encryption operations such as SHA,
+ AES and more. If you choose 'M' here, this module will be called
+ ccp.
 
 config CRYPTO_DEV_CCP_CRYPTO
-   tristate "Encryption and hashing acceleration support"
+   tristate "Encryption and hashing offload support"
depends on CRYPTO_DEV_CCP_DD
default m
select CRYPTO_HASH
@@ -18,6 +18,5 @@ config CRYPTO_DEV_CCP_CRYPTO
select CRYPTO_AUTHENC
help
  Support for using the cryptographic API with the AMD Cryptographic
- Coprocessor. This module supports acceleration and offload of SHA
- and AES algorithms.  If you choose 'M' here, this module will be
- called ccp_crypto.
+ Coprocessor. This module supports offload of SHA and AES algorithms.
+ If you choose 'M' here, this module will be called ccp_crypto.

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 4/4] crypto: ccp - Use module name in driver structures

2015-10-01 Thread Tom Lendacky
The convention is to use the name of the module in the driver structures
that are used for registering the device. The CCP module is currently
using a descriptive name. Replace the descriptive name with module name.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-pci.c  |2 +-
 drivers/crypto/ccp/ccp-platform.c |2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index af190d4..6ade02f 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -319,7 +319,7 @@ static const struct pci_device_id ccp_pci_table[] = {
 MODULE_DEVICE_TABLE(pci, ccp_pci_table);
 
 static struct pci_driver ccp_pci_driver = {
-   .name = "AMD Cryptographic Coprocessor",
+   .name = "ccp",
.id_table = ccp_pci_table,
.probe = ccp_pci_probe,
.remove = ccp_pci_remove,
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 4bc73d4..8b923b7 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -225,7 +225,7 @@ MODULE_DEVICE_TABLE(of, ccp_of_match);
 
 static struct platform_driver ccp_platform_driver = {
.driver = {
-   .name = "AMD Cryptographic Coprocessor",
+   .name = "ccp",
 #ifdef CONFIG_ACPI
.acpi_match_table = ccp_acpi_match,
 #endif

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/4] crypto: ccp - Replace BUG_ON with WARN_ON and a return code

2015-10-01 Thread Tom Lendacky
Replace the usage of BUG_ON with WARN_ON and return an error.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   20 +-
 drivers/crypto/ccp/ccp-crypto-main.c |6 +-
 drivers/crypto/ccp/ccp-crypto-sha.c  |   13 
 drivers/crypto/ccp/ccp-ops.c |  108 +++---
 4 files changed, 105 insertions(+), 42 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index ea7e844..d89f20c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -118,10 +118,19 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
if (rctx->buf_count) {
sg_init_one(>buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(>data_sg, >buf_sg);
+   if (!sg) {
+   ret = -EINVAL;
+   goto e_free;
+   }
}
 
-   if (nbytes)
+   if (nbytes) {
sg = ccp_crypto_sg_table_add(>data_sg, req->src);
+   if (!sg) {
+   ret = -EINVAL;
+   goto e_free;
+   }
+   }
 
if (need_pad) {
int pad_length = block_size - (len & (block_size - 1));
@@ -132,6 +141,10 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
rctx->pad[0] = 0x80;
sg_init_one(>pad_sg, rctx->pad, pad_length);
sg = ccp_crypto_sg_table_add(>data_sg, >pad_sg);
+   if (!sg) {
+   ret = -EINVAL;
+   goto e_free;
+   }
}
if (sg) {
sg_mark_end(sg);
@@ -163,6 +176,11 @@ static int ccp_do_cmac_update(struct ahash_request *req, 
unsigned int nbytes,
ret = ccp_crypto_enqueue_request(>base, >cmd);
 
return ret;
+
+e_free:
+   sg_free_table(>data_sg);
+
+   return ret;
 }
 
 static int ccp_aes_cmac_init(struct ahash_request *req)
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index bdec01e..e0380e5 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -305,14 +305,16 @@ struct scatterlist *ccp_crypto_sg_table_add(struct 
sg_table *table,
for (sg = table->sgl; sg; sg = sg_next(sg))
if (!sg_page(sg))
break;
-   BUG_ON(!sg);
+   if (WARN_ON(!sg))
+   return NULL;
 
for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
sg_set_page(sg, sg_page(sg_add), sg_add->length,
sg_add->offset);
sg_last = sg;
}
-   BUG_ON(sg_add);
+   if (WARN_ON(sg_add))
+   return NULL;
 
return sg_last;
 }
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 507b34e..d14b3f2 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -107,7 +107,15 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
 
sg_init_one(>buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(>data_sg, >buf_sg);
+   if (!sg) {
+   ret = -EINVAL;
+   goto e_free;
+   }
sg = ccp_crypto_sg_table_add(>data_sg, req->src);
+   if (!sg) {
+   ret = -EINVAL;
+   goto e_free;
+   }
sg_mark_end(sg);
 
sg = rctx->data_sg.sgl;
@@ -142,6 +150,11 @@ static int ccp_do_sha_update(struct ahash_request *req, 
unsigned int nbytes,
ret = ccp_crypto_enqueue_request(>base, >cmd);
 
return ret;
+
+e_free:
+   sg_free_table(>data_sg);
+
+   return ret;
 }
 
 static int ccp_sha_init(struct ahash_request *req)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d09c6c4..c6e883b 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -611,15 +611,16 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, 
unsigned int wa_offset,
 1);
 }
 
-static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
-   struct scatterlist *sg,
-   unsigned int len, unsigned int se_len,
-   bool sign_extend)
+static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+  struct scatterlist *sg,
+  unsigned int len, unsigned int se_len,
+  bool sign_exte

Re: [PATCH v2 5/8] lib: introduce sg_nents_len_chained

2015-09-18 Thread Tom Lendacky

On 09/18/2015 07:57 AM, LABBE Corentin wrote:

Some driver use a modified version of sg_nents_for_len with an
additional parameter bool *chained for knowing if the scatterlist is
chained or not.

So, for removing duplicate code, add sg_nents_len_chained in
lib/scatterlist.c

Signed-off-by: LABBE Corentin 
---
  include/linux/scatterlist.h |  1 +
  lib/scatterlist.c   | 40 
  2 files changed, 41 insertions(+)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 556ec1e..594cdb0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -243,6 +243,7 @@ static inline void *sg_virt(struct scatterlist *sg)

  int sg_nents(struct scatterlist *sg);
  int sg_nents_for_len(struct scatterlist *sg, u64 len);
+int sg_nents_len_chained(struct scatterlist *sg, u64 len, bool *chained);
  struct scatterlist *sg_next(struct scatterlist *);
  struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
  void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index bafa993..070e396 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -90,6 +90,46 @@ int sg_nents_for_len(struct scatterlist *sg, u64 len)
  EXPORT_SYMBOL(sg_nents_for_len);

  /**
+ * sg_nents_len_chained - return total count of entries in scatterlist
+ *needed to satisfy the supplied length
+ * @sg:The scatterlist
+ * @len:   The total required length
+ * @chainedA pointer where to store if SG is chained or not
+ *
+ * Description:
+ * Determines the number of entries in sg that are required to meet
+ * the supplied length, taking into account chaining as well
+ * If the scatterlist is chained, set *chained to true.
+ *
+ * Returns:
+ *   the number of sg entries needed, negative error on failure
+ *
+ **/
+int sg_nents_len_chained(struct scatterlist *sg, u64 len, bool *chained)
+{
+   int nents;
+   u64 total;
+
+   if (chained)
+   *chained = false;
+
+   if (!len)
+   return 0;
+
+   for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
+   nents++;
+   total += sg->length;
+   if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)


Wouldn't it be better to use the sg_is_chain macro to determine if the
the entry is chained instead of checking the length?

Thanks,
Tom


+   *chained = true;
+   if (total >= len)
+   return nents;
+   }
+
+   return -EINVAL;
+}
+EXPORT_SYMBOL(sg_nents_len_chained);
+
+/**
   * sg_last - return the last scatterlist entry in a list
   * @sgl:  First entry in the scatterlist
   * @nents:Number of entries in the scatterlist


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/2] scatterlist: introduce sg_nents_for_len

2015-06-01 Thread Tom Lendacky
When performing a dma_map_sg() call, the number of sg entries to map is
required. Using sg_nents to retrieve the number of sg entries will
return the total number of entries in the sg list up to the entry marked
as the end. If there happen to be unused entries in the list, these will
still be counted. Some dma_map_sg() implementations will not handle the
unused entries correctly (lib/swiotlb.c) and execute a BUG_ON.

The sg_nents_for_len() function will traverse the sg list and return the
number of entries required to satisfy the supplied length argument. This
can then be supplied to the dma_map_sg() call to successfully map the
sg.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 include/linux/scatterlist.h |1 +
 lib/scatterlist.c   |   32 
 2 files changed, 33 insertions(+)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ed8f9e7..a0edb99 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -221,6 +221,7 @@ static inline void *sg_virt(struct scatterlist *sg)
 }
 
 int sg_nents(struct scatterlist *sg);
+int sg_nents_for_len(struct scatterlist *sg, u64 len);
 struct scatterlist *sg_next(struct scatterlist *);
 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c9f2e8c..99fbc2f 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -56,6 +56,38 @@ int sg_nents(struct scatterlist *sg)
 }
 EXPORT_SYMBOL(sg_nents);
 
+/**
+ * sg_nents_for_len - return total count of entries in scatterlist
+ *needed to satisfy the supplied length
+ * @sg:The scatterlist
+ * @len:   The total required length
+ *
+ * Description:
+ * Determines the number of entries in sg that are required to meet
+ * the supplied length, taking into acount chaining as well
+ *
+ * Returns:
+ *   the number of sg entries needed, negative error on failure
+ *
+ **/
+int sg_nents_for_len(struct scatterlist *sg, u64 len)
+{
+   int nents;
+   u64 total;
+
+   if (!len)
+   return 0;
+
+   for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
+   nents++;
+   total += sg-length;
+   if (total = len)
+   return nents;
+   }
+
+   return -EINVAL;
+}
+EXPORT_SYMBOL(sg_nents_for_len);
 
 /**
  * sg_last - return the last scatterlist entry in a list

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 2/2] crypto: ccp - Protect against poorly marked end of sg list

2015-06-01 Thread Tom Lendacky
Scatter gather lists can be created with more available entries than are
actually used (e.g. using sg_init_table() to reserve a specific number
of sg entries, but in actuality using something less than that based on
the data length).  The caller sometimes fails to mark the last entry
with sg_mark_end().  In these cases, sg_nents() will return the original
size of the sg list as opposed to the actual number of sg entries that
contain valid data.

On arm64, if the sg_nents() value is used in a call to dma_map_sg() in
this situation, then it causes a BUG_ON in lib/swiotlb.c because an
empty sg list entry results in dma_capable() returning false and
swiotlb trying to create a bounce buffer of size 0. This occurred in
the userspace crypto interface before being fixed by

0f477b655a52 (crypto: algif - Mark sgl end at the end of data)

Protect against this by using the new sg_nents_for_len() function which
returns only the number of sg entries required to meet the desired
length and supplying that value to dma_map_sg().

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-ops.c |7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 542453c..d09c6c4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -52,7 +52,7 @@ struct ccp_dm_workarea {
 
 struct ccp_sg_workarea {
struct scatterlist *sg;
-   unsigned int nents;
+   int nents;
 
struct scatterlist *dma_sg;
struct device *dma_dev;
@@ -495,7 +495,10 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea 
*wa, struct device *dev,
if (!sg)
return 0;
 
-   wa-nents = sg_nents(sg);
+   wa-nents = sg_nents_for_len(sg, len);
+   if (wa-nents  0)
+   return wa-nents;
+
wa-bytes_left = len;
wa-sg_used = 0;
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] crypto: ccp - Protect against poorly marked end of sg list

2015-05-28 Thread Tom Lendacky

On 05/27/2015 07:36 PM, Herbert Xu wrote:

On Wed, May 27, 2015 at 09:12:02AM -0500, Tom Lendacky wrote:



The reason I'm asking is because while this patch fixes your driver
everybody else will still crash and burn should something like this
happen again.


A number of other drivers already have similar sg-count functions in
them.


Perhaps you can help abstract this into a helper that everybody can
call?


I can do that.  Something like an sg_nents_for_len() function that takes
an sg pointer and a u64 length as arguments. The function should also
return an error if the length requirement isn't satisfied.

Thanks,
Tom



Cheers,


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] crypto: ccp - Protect against poorly marked end of sg list

2015-05-27 Thread Tom Lendacky

On 05/27/2015 04:43 AM, Herbert Xu wrote:

Tom Lendacky thomas.lenda...@amd.com wrote:

Scatter gather lists can be created with more available entries than are
actually used (e.g. using sg_init_table() to reserve a specific number
of sg entries, but in actuality using something less than that based on
the data length).  The caller sometimes fails to mark the last entry
with sg_mark_end().  In these cases, sg_nents() will return the original
size of the sg list as opposed to the actual number of sg entries that
contain valid data.

On arm64, if the sg_nents() value is used in a call to dma_map_sg() in
this situation, then it causes a BUG_ON in lib/swiotlb.c because an
empty sg list entry results in dma_capable() returning false and
swiotlb trying to create a bounce buffer of size 0. This occurred in
the userspace crypto interface before being fixed by

0f477b655a52 (crypto: algif - Mark sgl end at the end of data)

Protect against this in the future by counting the number of sg entries
needed to meet the length requirement and supplying that value to
dma_map_sg().


Is this needed for any reason other than this bug that's already
been fixed?



I added this just to protect against any other users of the API that
may do something similar in the future (or if the user should re-use
an sg list and leave leftover sg entries in it). Since software
crypto implementations walk the sg list based on length and do not use
DMA mappings it is possible for this bug to pop up again in another
location since it is likely that the testing won't be done with
hardware crypto devices.


The reason I'm asking is because while this patch fixes your driver
everybody else will still crash and burn should something like this
happen again.


A number of other drivers already have similar sg-count functions in
them.

I'm ok if you decide that this patch shouldn't be applied. It's just
that this is typically an issue that won't be found until after the
release of a kernel rather than during the development stages.

Thanks,
Tom



Cheers,


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v1 3/3] crypto: ccp - Protect against poorly marked end of sg list

2015-05-27 Thread Tom Lendacky

On 05/27/2015 04:45 AM, Herbert Xu wrote:

On Wed, May 27, 2015 at 05:43:05PM +0800, Herbert Xu wrote:

Tom Lendacky thomas.lenda...@amd.com wrote:

Scatter gather lists can be created with more available entries than are
actually used (e.g. using sg_init_table() to reserve a specific number
of sg entries, but in actuality using something less than that based on
the data length).  The caller sometimes fails to mark the last entry
with sg_mark_end().  In these cases, sg_nents() will return the original
size of the sg list as opposed to the actual number of sg entries that
contain valid data.

On arm64, if the sg_nents() value is used in a call to dma_map_sg() in
this situation, then it causes a BUG_ON in lib/swiotlb.c because an
empty sg list entry results in dma_capable() returning false and
swiotlb trying to create a bounce buffer of size 0. This occurred in
the userspace crypto interface before being fixed by

0f477b655a52 (crypto: algif - Mark sgl end at the end of data)

Protect against this in the future by counting the number of sg entries
needed to meet the length requirement and supplying that value to
dma_map_sg().


Is this needed for any reason other than this bug that's already
been fixed?


Could this be needed if you have a properly marked SG list say of
100 bytes but len is only 10 bytes?


I don't think that situation matters because the DMA mapping should
succeed just fine at 100 bytes even if only needing/using 10 bytes.

Thanks,
Tom



Cheers,


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 0/3] crypto: ccp - CCP driver updates 2015-05-26

2015-05-26 Thread Tom Lendacky
The following patches are included in this driver update series:

- Remove the checking and setting of the device dma_mask field
- Remove an unused field from a structure to help avoid any confusion
- Protect against poorly marked end of scatter-gather list
 
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (3):
  crypto: ccp - Remove manual check and set of dma_mask pointer
  crypto: ccp - Remove unused structure field
  crypto: ccp - Protect against poorly marked end of sg list


 drivers/crypto/ccp/ccp-ops.c  |   20 +---
 drivers/crypto/ccp/ccp-platform.c |2 --
 2 files changed, 17 insertions(+), 5 deletions(-)

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 2/3] crypto: ccp - Remove unused structure field

2015-05-26 Thread Tom Lendacky
Remove the length field from the ccp_sg_workarea since it is unused.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-ops.c |2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 71f2e3c..542453c 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -53,7 +53,6 @@ struct ccp_dm_workarea {
 struct ccp_sg_workarea {
struct scatterlist *sg;
unsigned int nents;
-   unsigned int length;
 
struct scatterlist *dma_sg;
struct device *dma_dev;
@@ -497,7 +496,6 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, 
struct device *dev,
return 0;
 
wa-nents = sg_nents(sg);
-   wa-length = sg-length;
wa-bytes_left = len;
wa-sg_used = 0;
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/3] crypto: ccp - Remove manual check and set of dma_mask pointer

2015-05-26 Thread Tom Lendacky
The underlying device support will set the device dma_mask pointer
if DMA is set up properly for the device.  Remove the check for and
assignment of dma_mask when it is null. Instead, just error out if
the dma_set_mask_and_coherent function fails because dma_mask is null.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-platform.c |2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index b1c20b2..c0aa5c5 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -174,8 +174,6 @@ static int ccp_platform_probe(struct platform_device *pdev)
}
ccp-io_regs = ccp-io_map;
 
-   if (!dev-dma_mask)
-   dev-dma_mask = dev-coherent_dma_mask;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, dma_set_mask_and_coherent failed (%d)\n, ret);

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 3/3] crypto: ccp - Protect against poorly marked end of sg list

2015-05-26 Thread Tom Lendacky
Scatter gather lists can be created with more available entries than are
actually used (e.g. using sg_init_table() to reserve a specific number
of sg entries, but in actuality using something less than that based on
the data length).  The caller sometimes fails to mark the last entry
with sg_mark_end().  In these cases, sg_nents() will return the original
size of the sg list as opposed to the actual number of sg entries that
contain valid data.

On arm64, if the sg_nents() value is used in a call to dma_map_sg() in
this situation, then it causes a BUG_ON in lib/swiotlb.c because an
empty sg list entry results in dma_capable() returning false and
swiotlb trying to create a bounce buffer of size 0. This occurred in
the userspace crypto interface before being fixed by

0f477b655a52 (crypto: algif - Mark sgl end at the end of data)

Protect against this in the future by counting the number of sg entries
needed to meet the length requirement and supplying that value to
dma_map_sg().

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-ops.c |   18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 542453c..8377ed6 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -477,6 +477,22 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
return atomic_inc_return(ccp-current_id)  CCP_JOBID_MASK;
 }
 
+static int ccp_sg_nents(struct scatterlist *sg, u64 len)
+{
+   int nents = 0;
+
+   while (sg  len) {
+   nents++;
+   if (sg-length  len)
+   break;
+
+   len -= sg-length;
+   sg = sg_next(sg);
+   }
+
+   return nents;
+}
+
 static void ccp_sg_free(struct ccp_sg_workarea *wa)
 {
if (wa-dma_count)
@@ -495,7 +511,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, 
struct device *dev,
if (!sg)
return 0;
 
-   wa-nents = sg_nents(sg);
+   wa-nents = ccp_sg_nents(sg, len);
wa-bytes_left = len;
wa-sg_used = 0;
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [Linaro-acpi] [V2 PATCH 2/5] arm64 : Introduce support for ACPI _CCA object

2015-05-05 Thread Tom Lendacky

On 05/05/2015 11:13 AM, Suravee Suthikulanit wrote:

On 5/5/2015 11:12 AM, Arnd Bergmann wrote:

On Tuesday 05 May 2015 11:09:38 Suravee Suthikulanit wrote:


However, codes in several places are making use of dma_map_ops without
checking if the ops are NULL (i.e.
include/asm-generic/dma-mapping-common.h and in arch-specific
implementation). If setting it to NULL is what we are planning to
support, we would need to scrub the current code to put NULL check.
Also, would you consider if that is safe to do going forward?




I mean the dma_mask pointer, not dma_map_ops.


Except a lot of drivers will actually set the dma_mask pointer during
probe (usually by setting dev-dma_mask = dev-coherent_dma_mask or by
calling dma_coerce_mask_and_coherent).  So I think the dummy_dma_ops
might be the safest way to go.

Thanks,
Tom



Arnd



Ah, got it. Sorry for confusion.

Suravee

___
Linaro-acpi mailing list
linaro-a...@lists.linaro.org
https://lists.linaro.org/mailman/listinfo/linaro-acpi

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/4] crypto: talitos - add software backlog queue handling

2015-03-13 Thread Tom Lendacky

On 03/13/2015 12:16 PM, Horia Geanta wrote:

I was running into situations where the hardware FIFO was filling up, and
the code was returning EAGAIN to dm-crypt and just dropping the submitted
crypto request.

This adds support in talitos for a software backlog queue. When requests
can't be queued to the hardware immediately EBUSY is returned. The queued
requests are dispatched to the hardware in received order as hardware FIFO
slots become available.

Signed-off-by: Martin Hicks m...@bork.org
Signed-off-by: Horia Geanta horia.gea...@freescale.com
---
  drivers/crypto/talitos.c | 107 +--
  drivers/crypto/talitos.h |   2 +
  2 files changed, 97 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c184987dfcc7..d4679030d23c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -197,23 +197,41 @@ static struct talitos_request *to_talitos_req(struct 
crypto_async_request *areq)
}
  }

-int talitos_submit(struct device *dev, int ch,
-  struct crypto_async_request *areq)
+/*
+ * Enqueue to HW queue a request, coming either from upper layer or taken from
+ * SW queue. When drawing from SW queue, check if there are backlogged requests
+ * and notify their producers.
+ */
+int __talitos_handle_queue(struct device *dev, int ch,
+  struct crypto_async_request *areq,
+  unsigned long *irq_flags)
  {
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
-   unsigned long flags;
int head;

-   spin_lock_irqsave(priv-chan[ch].head_lock, flags);
-
if (!atomic_inc_not_zero(priv-chan[ch].submit_count)) {
/* h/w fifo is full */
-   spin_unlock_irqrestore(priv-chan[ch].head_lock, flags);
-   return -EAGAIN;
+   if (!areq)
+   return -EBUSY;
+
+   /* Try to backlog request (if allowed) */
+   return crypto_enqueue_request(priv-chan[ch].queue, areq);


I'd remembered something about how hardware drivers should use their
own list element for queuing, searched back and found this:

http://marc.info/?l=linux-crypto-vgerm=137609769605139w=2

Thanks,
Tom


}

-   head = priv-chan[ch].head;
+   if (!areq) {
+   struct crypto_async_request *backlog =
+   crypto_get_backlog(priv-chan[ch].queue);
+
+   /* Dequeue the oldest request */
+   areq = crypto_dequeue_request(priv-chan[ch].queue);
+   if (!areq)
+   return 0;
+
+   /* Mark a backlogged request as in-progress */
+   if (backlog)
+   backlog-complete(backlog, -EINPROGRESS);
+   }

request = to_talitos_req(areq);
if (IS_ERR(request))
@@ -224,6 +242,7 @@ int talitos_submit(struct device *dev, int ch,
   DMA_BIDIRECTIONAL);

/* increment fifo head */
+   head = priv-chan[ch].head;
priv-chan[ch].head = (priv-chan[ch].head + 1)  (priv-fifo_len - 1);

smp_wmb();
@@ -236,14 +255,66 @@ int talitos_submit(struct device *dev, int ch,
out_be32(priv-chan[ch].reg + TALITOS_FF_LO,
 lower_32_bits(request-dma_desc));

+   return -EINPROGRESS;
+}
+
+int talitos_submit(struct device *dev, int ch,
+  struct crypto_async_request *areq)
+{
+   struct talitos_private *priv = dev_get_drvdata(dev);
+   unsigned long flags;
+   int ret;
+
+   spin_lock_irqsave(priv-chan[ch].head_lock, flags);
+
+   /*
+* Hidden assumption: we maintain submission order separately for
+* requests that may be backlogged and those that may not. For e.g. even
+* if SW queue has some requests, we won't drop an incoming request that
+* may not be backlogged, but enqueue it in the HW queue (in front of
+* pending ones).
+*/
+   if (areq-flags  CRYPTO_TFM_REQ_MAY_BACKLOG 
+   priv-chan[ch].queue.qlen) {
+   /*
+* There are pending requests in the SW queue. Since we want to
+* maintain the order of requests, we cannot enqueue in the HW
+* queue. Thus put this new request in SW queue and dispatch
+* the oldest backlogged request to the hardware.
+*/
+   ret = crypto_enqueue_request(priv-chan[ch].queue, areq);
+   __talitos_handle_queue(dev, ch, NULL, flags);
+   } else {
+   ret = __talitos_handle_queue(dev, ch, areq, flags);
+   }
+
spin_unlock_irqrestore(priv-chan[ch].head_lock, flags);

-   return -EINPROGRESS;
+   return ret;
  }
  EXPORT_SYMBOL(talitos_submit);

+static void talitos_handle_queue(struct device *dev, int ch)
+{
+   struct talitos_private *priv = 

Re: AF_ALG interface not marking the end of the scatter-gather list

2015-02-13 Thread Tom Lendacky

On 02/13/2015 05:43 AM, Stephan Mueller wrote:

Am Donnerstag, 12. Februar 2015, 17:41:59 schrieb Tom Lendacky:

Hi Tom,


I was doing some testing of the CCP driver using the AF_ALG interface
and encountered a BUG_ON statement during scatter-gather DMA mapping.

In algif_skcipher.c, before submitting a request to the the Crypto API
the input sg list is not updated to mark the last valid sg entry of the
input data. So even if there is only a single valid sg entry, sg_nents
returns 127 (the initial value used when creating the sg table).

In the CCP driver, when making a call to dma_map_sg I supply the number
of entries as returned by sg_nents. During this call, the sg elements
that are not valid cause a BUG_ON statement to be hit.

I've worked around the issue in skcipher_recvmsg by marking the last
valid sg entry (sg_mark_end(sgl-sg + sgl-cur - 1)) just before the
call to ablkcipher_request_set_crypt and then unmarking the entry after
the return from af_alg_wait_for_completion (using sg_unmark_end).

Is this an appropriate/valid solution for this issue?  If so, I can
submit a patch with the fix in algif_skcipher and algif_hash.


There has been a patch around this issue -- see patch
0f477b655a524515ec9a263d70d51f460c05a161


Thanks for the pointer Stephan.  I had been working with the main
kernel tree where this patch hasn't been merged yet.

Thanks,
Tom



Thanks,
Tom
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html




--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


AF_ALG interface not marking the end of the scatter-gather list

2015-02-12 Thread Tom Lendacky

I was doing some testing of the CCP driver using the AF_ALG interface
and encountered a BUG_ON statement during scatter-gather DMA mapping.

In algif_skcipher.c, before submitting a request to the the Crypto API
the input sg list is not updated to mark the last valid sg entry of the
input data. So even if there is only a single valid sg entry, sg_nents
returns 127 (the initial value used when creating the sg table).

In the CCP driver, when making a call to dma_map_sg I supply the number
of entries as returned by sg_nents. During this call, the sg elements
that are not valid cause a BUG_ON statement to be hit.

I've worked around the issue in skcipher_recvmsg by marking the last
valid sg entry (sg_mark_end(sgl-sg + sgl-cur - 1)) just before the
call to ablkcipher_request_set_crypt and then unmarking the entry after
the return from af_alg_wait_for_completion (using sg_unmark_end).

Is this an appropriate/valid solution for this issue?  If so, I can
submit a patch with the fix in algif_skcipher and algif_hash.

Thanks,
Tom
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 4/5] crypto: ccp - Convert calls to their devm_ counterparts

2015-02-03 Thread Tom Lendacky
Where applicable, convert calls to their devm_ counterparts, e.g. kzalloc
to devm_kzalloc.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-dev.c  |2 +-
 drivers/crypto/ccp/ccp-pci.c  |   19 +--
 drivers/crypto/ccp/ccp-platform.c |   11 +++
 3 files changed, 9 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 68c637a..861bacc 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -295,7 +295,7 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
 {
struct ccp_device *ccp;
 
-   ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
+   ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp)
return NULL;
ccp-dev = dev;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 1980f77..af190d4 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -174,11 +174,10 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
if (!ccp)
goto e_err;
 
-   ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
-   if (!ccp_pci) {
-   ret = -ENOMEM;
-   goto e_free1;
-   }
+   ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
+   if (!ccp_pci)
+   goto e_err;
+
ccp-dev_specific = ccp_pci;
ccp-get_irq = ccp_get_irqs;
ccp-free_irq = ccp_free_irqs;
@@ -186,7 +185,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
ret = pci_request_regions(pdev, ccp);
if (ret) {
dev_err(dev, pci_request_regions failed (%d)\n, ret);
-   goto e_free2;
+   goto e_err;
}
 
ret = pci_enable_device(pdev);
@@ -239,12 +238,6 @@ e_device:
 e_regions:
pci_release_regions(pdev);
 
-e_free2:
-   kfree(ccp_pci);
-
-e_free1:
-   kfree(ccp);
-
 e_err:
dev_notice(dev, initialization failed\n);
return ret;
@@ -266,8 +259,6 @@ static void ccp_pci_remove(struct pci_dev *pdev)
 
pci_release_regions(pdev);
 
-   kfree(ccp);
-
dev_notice(dev, disabled\n);
 }
 
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 04265a3..20661f0 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -103,7 +103,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp-io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp-io_map)) {
ret = PTR_ERR(ccp-io_map);
-   goto e_free;
+   goto e_err;
}
ccp-io_regs = ccp-io_map;
 
@@ -112,7 +112,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, dma_set_mask_and_coherent failed (%d)\n, ret);
-   goto e_free;
+   goto e_err;
}
 
if (of_property_read_bool(dev-of_node, dma-coherent))
@@ -124,15 +124,12 @@ static int ccp_platform_probe(struct platform_device 
*pdev)
 
ret = ccp_init(ccp);
if (ret)
-   goto e_free;
+   goto e_err;
 
dev_notice(dev, enabled\n);
 
return 0;
 
-e_free:
-   kfree(ccp);
-
 e_err:
dev_notice(dev, initialization failed\n);
return ret;
@@ -145,8 +142,6 @@ static int ccp_platform_remove(struct platform_device *pdev)
 
ccp_destroy(ccp);
 
-   kfree(ccp);
-
dev_notice(dev, disabled\n);
 
return 0;

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 1/5] crypto: ccp - Updates for checkpatch warnings/errors

2015-02-03 Thread Tom Lendacky
Changes to address warnings and errors reported by the checkpatch
script.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   12 +++-
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |4 +---
 drivers/crypto/ccp/ccp-crypto-aes.c  |3 +--
 drivers/crypto/ccp/ccp-crypto-main.c |5 ++---
 drivers/crypto/ccp/ccp-crypto-sha.c  |   12 +++-
 drivers/crypto/ccp/ccp-crypto.h  |3 ---
 drivers/crypto/ccp/ccp-dev.c |5 +
 drivers/crypto/ccp/ccp-dev.h |   12 
 drivers/crypto/ccp/ccp-ops.c |   24 
 drivers/crypto/ccp/ccp-pci.c |2 +-
 drivers/crypto/ccp/ccp-platform.c|1 -
 11 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 8e162ad..ea7e844 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -23,7 +23,6 @@
 
 #include ccp-crypto.h
 
-
 static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
 int ret)
 {
@@ -38,11 +37,13 @@ static int ccp_aes_cmac_complete(struct 
crypto_async_request *async_req,
if (rctx-hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx-nbytes - rctx-hash_rem;
+
scatterwalk_map_and_copy(rctx-buf, rctx-src,
 offset, rctx-hash_rem, 0);
rctx-buf_count = rctx-hash_rem;
-   } else
+   } else {
rctx-buf_count = 0;
+   }
 
/* Update result area if supplied */
if (req-result)
@@ -202,7 +203,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
 }
 
 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
-  unsigned int key_len)
+  unsigned int key_len)
 {
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct ccp_crypto_ahash_alg *alg =
@@ -292,7 +293,8 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
 
cipher_tfm = crypto_alloc_cipher(aes, 0,
-   CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+CRYPTO_ALG_ASYNC |
+CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn(could not load aes cipher driver\n);
return PTR_ERR(cipher_tfm);
@@ -354,7 +356,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
ret = crypto_register_ahash(alg);
if (ret) {
pr_err(%s ahash algorithm registration error (%d)\n,
-   base-cra_name, ret);
+  base-cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c 
b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0cc5594..52c7395 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -21,7 +21,6 @@
 
 #include ccp-crypto.h
 
-
 struct ccp_aes_xts_def {
const char *name;
const char *drv_name;
@@ -216,7 +215,6 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
ctx-u.aes.tfm_ablkcipher = NULL;
 }
 
-
 static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
 {
@@ -255,7 +253,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err(%s ablkcipher algorithm registration error (%d)\n,
-   alg-cra_name, ret);
+  alg-cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c 
b/drivers/crypto/ccp/ccp-crypto-aes.c
index e46490d..7984f91 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -22,7 +22,6 @@
 
 #include ccp-crypto.h
 
-
 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
 {
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
@@ -345,7 +344,7 @@ static int ccp_register_aes_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err(%s ablkcipher algorithm registration error (%d)\n,
-   alg-cra_name, ret);
+  alg-cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 4d4e016..bdec01e 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp

[PATCH v1 2/5] crypto: ccp - Update CCP build support

2015-02-03 Thread Tom Lendacky
Add HAS_IOMEM as a Kconfig dependency. Always include ccp-platform.c
in the CCP build and conditionally include ccp-pci.c.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/Kconfig  |2 +-
 drivers/crypto/ccp/Makefile |9 ++---
 2 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2fb0fdf..b840b79 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool Support for AMD Cryptographic Coprocessor
-   depends on (X86  PCI) || ARM64
+   depends on ((X86  PCI) || ARM64)  HAS_IOMEM
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 7f592d8..55a1f39 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,11 +1,6 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o
-ifdef CONFIG_X86
-ccp-objs += ccp-pci.o
-endif
-ifdef CONFIG_ARM64
-ccp-objs += ccp-platform.o
-endif
+ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-$(CONFIG_PCI) += ccp-pci.o
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 0/5] crypto: ccp - CCP driver updates 2015-01-28

2015-02-03 Thread Tom Lendacky
For some reason this series never reached the mailing list... resending.

The following series of patches includes functional updates to the
driver as well as some trivial changes.

- Fix checks/warnings from checkpatch
- Update how the CCP is built (Kconfig and Makefile)
- Use dma_set_mask_and_coherent to set the DMA mask
- Use devm_ calls where appropriate
- Add ACPI support
  
This patch series is based on cryptodev-2.6.

---

Tom Lendacky (5):
  crypto: ccp - Updates for checkpatch warnings/errors
  crypto: ccp - Update CCP build support
  crypto: ccp - Use dma_set_mask_and_coherent to set DMA mask
  crypto: ccp - Convert calls to their devm_ counterparts
  crypto: ccp - Add ACPI support


 drivers/crypto/Kconfig   |2 -
 drivers/crypto/ccp/Makefile  |9 +-
 drivers/crypto/ccp/ccp-crypto-aes-cmac.c |   12 ++-
 drivers/crypto/ccp/ccp-crypto-aes-xts.c  |4 -
 drivers/crypto/ccp/ccp-crypto-aes.c  |3 -
 drivers/crypto/ccp/ccp-crypto-main.c |5 +
 drivers/crypto/ccp/ccp-crypto-sha.c  |   12 ++-
 drivers/crypto/ccp/ccp-crypto.h  |3 -
 drivers/crypto/ccp/ccp-dev.c |7 +-
 drivers/crypto/ccp/ccp-dev.h |   12 +--
 drivers/crypto/ccp/ccp-ops.c |   24 +++---
 drivers/crypto/ccp/ccp-pci.c |   21 ++
 drivers/crypto/ccp/ccp-platform.c|  111 ++
 13 files changed, 143 insertions(+), 82 deletions(-)

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 3/5] crypto: ccp - Use dma_set_mask_and_coherent to set DMA mask

2015-02-03 Thread Tom Lendacky
Replace the setting of the DMA masks with the dma_set_mask_and_coherent
function call.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-platform.c |7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 9e09c50..04265a3 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -109,8 +109,11 @@ static int ccp_platform_probe(struct platform_device *pdev)
 
if (!dev-dma_mask)
dev-dma_mask = dev-coherent_dma_mask;
-   *(dev-dma_mask) = DMA_BIT_MASK(48);
-   dev-coherent_dma_mask = DMA_BIT_MASK(48);
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+   if (ret) {
+   dev_err(dev, dma_set_mask_and_coherent failed (%d)\n, ret);
+   goto e_free;
+   }
 
if (of_property_read_bool(dev-of_node, dma-coherent))
ccp-axcache = CACHE_WB_NO_ALLOC;

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v1 5/5] crypto: ccp - Add ACPI support

2015-02-03 Thread Tom Lendacky
Add support for ACPI to the CCP platform driver.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/Kconfig|2 -
 drivers/crypto/ccp/ccp-platform.c |   96 +++--
 2 files changed, 93 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b840b79..7e94413 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool Support for AMD Cryptographic Coprocessor
-   depends on ((X86  PCI) || ARM64)  HAS_IOMEM
+   depends on ((X86  PCI) || (ARM64  (OF_ADDRESS || ACPI)))  
HAS_IOMEM
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 20661f0..b1c20b2 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -23,9 +23,16 @@
 #include linux/delay.h
 #include linux/ccp.h
 #include linux/of.h
+#include linux/of_address.h
+#include linux/acpi.h
 
 #include ccp-dev.h
 
+struct ccp_platform {
+   int use_acpi;
+   int coherent;
+};
+
 static int ccp_get_irq(struct ccp_device *ccp)
 {
struct device *dev = ccp-dev;
@@ -83,10 +90,64 @@ static struct resource *ccp_find_mmio_area(struct 
ccp_device *ccp)
return NULL;
 }
 
+#ifdef CONFIG_ACPI
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+   struct ccp_platform *ccp_platform = ccp-dev_specific;
+   struct acpi_device *adev = ACPI_COMPANION(ccp-dev);
+   acpi_handle handle;
+   acpi_status status;
+   unsigned long long data;
+   int cca;
+
+   /* Retrieve the device cache coherency value */
+   handle = adev-handle;
+   do {
+   status = acpi_evaluate_integer(handle, _CCA, NULL, data);
+   if (!ACPI_FAILURE(status)) {
+   cca = data;
+   break;
+   }
+   } while (!ACPI_FAILURE(status));
+
+   if (ACPI_FAILURE(status)) {
+   dev_err(ccp-dev, error obtaining acpi coherency value\n);
+   return -EINVAL;
+   }
+
+   ccp_platform-coherent = !!cca;
+
+   return 0;
+}
+#else  /* CONFIG_ACPI */
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+   return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_OF
+static int ccp_of_support(struct ccp_device *ccp)
+{
+   struct ccp_platform *ccp_platform = ccp-dev_specific;
+
+   ccp_platform-coherent = of_dma_is_coherent(ccp-dev-of_node);
+
+   return 0;
+}
+#else
+static int ccp_of_support(struct ccp_device *ccp)
+{
+   return -EINVAL;
+}
+#endif
+
 static int ccp_platform_probe(struct platform_device *pdev)
 {
struct ccp_device *ccp;
+   struct ccp_platform *ccp_platform;
struct device *dev = pdev-dev;
+   struct acpi_device *adev = ACPI_COMPANION(dev);
struct resource *ior;
int ret;
 
@@ -95,10 +156,16 @@ static int ccp_platform_probe(struct platform_device *pdev)
if (!ccp)
goto e_err;
 
-   ccp-dev_specific = NULL;
+   ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
+   if (!ccp_platform)
+   goto e_err;
+
+   ccp-dev_specific = ccp_platform;
ccp-get_irq = ccp_get_irqs;
ccp-free_irq = ccp_free_irqs;
 
+   ccp_platform-use_acpi = (!adev || acpi_disabled) ? 0 : 1;
+
ior = ccp_find_mmio_area(ccp);
ccp-io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp-io_map)) {
@@ -115,7 +182,14 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err;
}
 
-   if (of_property_read_bool(dev-of_node, dma-coherent))
+   if (ccp_platform-use_acpi)
+   ret = ccp_acpi_support(ccp);
+   else
+   ret = ccp_of_support(ccp);
+   if (ret)
+   goto e_err;
+
+   if (ccp_platform-coherent)
ccp-axcache = CACHE_WB_NO_ALLOC;
else
ccp-axcache = CACHE_NONE;
@@ -197,15 +271,29 @@ static int ccp_platform_resume(struct platform_device 
*pdev)
 }
 #endif
 
-static const struct of_device_id ccp_platform_ids[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ccp_acpi_match[] = {
+   { AMDI0C00, 0 },
+   { },
+};
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ccp_of_match[] = {
{ .compatible = amd,ccp-seattle-v1a },
{ },
 };
+#endif
 
 static struct platform_driver ccp_platform_driver = {
.driver = {
.name = AMD Cryptographic Coprocessor,
-   .of_match_table = ccp_platform_ids,
+#ifdef CONFIG_ACPI
+   .acpi_match_table = ccp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+   .of_match_table = ccp_of_match,
+#endif
},
.probe = ccp_platform_probe,
.remove = ccp_platform_remove

Re: [PATCH] crypto: ccp: terminate ccp_support array with empty element

2015-01-23 Thread Tom Lendacky

On 01/21/2015 09:06 AM, Andrey Ryabinin wrote:

x86_match_cpu() expects array of x86_cpu_ids terminated
with empty element.

Signed-off-by: Andrey Ryabinin a.ryabi...@samsung.com


Acked-by: Tom Lendacky thomas.lenda...@amd.com


---
  drivers/crypto/ccp/ccp-dev.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c6e6171..ca29c12 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
  #ifdef CONFIG_X86
  static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
+   { },
  };
  #endif



--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Asynchronous usage of PCOMPRESS

2014-11-10 Thread Tom Lendacky

Hi Herbert,

Is the PCOMPRESS algorithm supposed to support asynchronous
implementations? In other words, are callers expected to handle the
-EINPROGRESS or -EAGAIN return codes that can be returned by an
asynchronous implementation?

Or is it assumed that if the CRYPTO_ALG_ASYNC flag is not set then the
code path must be synchronous?  If that's the case, should the pcompress
support be updated to look for synchronous implementations if the
CRYPTO_ALG_ASYNC flag isn't set or should asynchronous implementations
provide a synchronous fallback?

Thanks,
Tom
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 01/11] crypto: Documentation - crypto API high level spec

2014-11-03 Thread Tom Lendacky

On 11/03/2014 08:49 AM, Herbert Xu wrote:

On Mon, Nov 03, 2014 at 03:18:29PM +0100, Stephan Mueller wrote:



+ * CRYPTO_ALG_TYPE_DIGEST  Raw message digest
+ * CRYPTO_ALG_TYPE_HASHAlias for CRYPTO_ALG_TYPE_DIGEST
+ * CRYPTO_ALG_TYPE_SHASH   Synchronous multi-block hash
+ * CRYPTO_ALG_TYPE_AHASH   Asynchronous multi-block hash
+ * CRYPTO_ALG_TYPE_RNG Random Number Generation
+ * CRYPTO_ALG_TYPE_PCOMPRESS


What's that last one?


Same here.


pcompress is an enhanced version of compress allowing for piece-meal
compression/decompression rather than having to shove everything in
all at once.

Eventually pcompress should replace the compress interface once
everything is converted across.


Herbert, I was looking at adding async support for ALG_TYPE_COMPRESS
since the CCP device will support compression/decompression but only
as an everything at once invocation.  Given what you're saying about
pcompress replacing compress, would this be something you'd even
consider though?

Thanks,
Tom



Thanks,


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] crypto: ccp - Check for CCP before registering crypto algs

2014-09-22 Thread Tom Lendacky

On 09/15/2014 06:47 AM, Herbert Xu wrote:

On Fri, Sep 05, 2014 at 11:49:38PM +, Scot Doyle wrote:


On Fri, 5 Sep 2014, Tom Lendacky wrote:


If the ccp is built as a built-in module, then ccp-crypto (whether
built as a module or a built-in module) will be able to load and
it will register its crypto algorithms.  If the system does not have
a CCP this will result in -ENODEV being returned whenever a command
is attempted to be queued by the registered crypto algorithms.

Add an API, ccp_present(), that checks for the presence of a CCP
on the system.  The ccp-crypto module can use this to determine if it
should register it's crypto alogorithms.

Reported-by: Scot Doyle lkm...@scotdoyle.com
Signed-off-by: Tom Lendacky thomas.lenda...@amd.com


Tested-by: Scot Doyle lkm...@scotdoyle.com


Patch applied.  Thanks!



Hi Herbert,

Can you push this patch into the 3.17 release?

Also, it should probably go into to the stable releases.  Is this
something that you request or should I take care of that?

Thanks,
Tom
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: AF_ALG inadvertently disabled

2014-09-05 Thread Tom Lendacky

On 09/04/2014 07:43 PM, Scot Doyle wrote:

On a laptop without AMD's CCP, compiling 3.17-rc3 with
   # CONFIG_MODULES is not set
   CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
   CONFIG_CRYPTO_DEV_CCP=y
   CONFIG_CRYPTO_DEV_CCP_DD=y
   # CONFIG_CRYPTO_DEV_CCP_CRYPTO is not set
the strace from a test program is
   socket(PF_ALG, SOCK_SEQPACKET, 0)   = 3
   bind(3, {sa_family=AF_ALG, sa_data=skcipher\0\0\0\0\0\0}, 88) = 0
   setsockopt(3, 0x117 /* SOL_?? */, 1, n) 
\21\220\25-\364\356\5\2019\336\366\20\273, 16) = 0
   accept(3, 0, NULL)  = 4
   sendmsg(4, {msg_name(0)=NULL, 
msg_iov(1)=[{\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27...,
 512}], msg_controllen=64, {cmsg_len=20, cmsg_level=0x117 /* SOL_??? */, cmsg_type=, 
...}, msg_flags=0}, 0) = 512
   read(4, 
\322\322\22\25\3\3159\2052Q\356\256lA\336\245\230a\36!\343\366\26=J\231\254\211xG...,
 512) = 512


However, when compiling with
   # CONFIG_MODULES is not set
   CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
   CONFIG_CRYPTO_DEV_CCP=y
   CONFIG_CRYPTO_DEV_CCP_DD=y
   CONFIG_CRYPTO_DEV_CCP_CRYPTO=y
the strace from the same test program is
   socket(PF_ALG, SOCK_SEQPACKET, 0)   = 3
   bind(3, {sa_family=AF_ALG, sa_data=skcipher\0\0\0\0\0\0}, 88) = 0
   setsockopt(3, 0x117 /* SOL_?? */, 1, n) 
\21\220\25-\364\356\5\2019\336\366\20\273, 16) = 0
   accept(3, 0, NULL)  = 4
   sendmsg(4, {msg_name(0)=NULL, 
msg_iov(1)=[{\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27\27...,
 512}], msg_controllen=64, {cmsg_len=20, cmsg_level=0x117 /* SOL_??? */, cmsg_type=, 
...}, msg_flags=0}, 0) = 512
   read(4, 0x1f48000, 512) = -1 ENODEV (No such device)



Because ccp-crypto isn't built as a module it will register the
algorithms even if a CCP device isn't there. I'll work up a patch
that checks for the presence of the CCP and only register the
algorithms if a CCP is there.

Thanks,
Tom



cryptsetup exhibits the same behavior as the test program.


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Check for CCP before registering crypto algs

2014-09-05 Thread Tom Lendacky
If the ccp is built as a built-in module, then ccp-crypto (whether
built as a module or a built-in module) will be able to load and
it will register its crypto algorithms.  If the system does not have
a CCP this will result in -ENODEV being returned whenever a command
is attempted to be queued by the registered crypto algorithms.

Add an API, ccp_present(), that checks for the presence of a CCP
on the system.  The ccp-crypto module can use this to determine if it
should register it's crypto alogorithms.

Reported-by: Scot Doyle lkm...@scotdoyle.com
Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-crypto-main.c |4 
 drivers/crypto/ccp/ccp-dev.c |   14 ++
 include/linux/ccp.h  |   12 
 3 files changed, 30 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 20dc848..4d4e016 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
 {
int ret;
 
+   ret = ccp_present();
+   if (ret)
+   return ret;
+
spin_lock_init(req_queue_lock);
INIT_LIST_HEAD(req_queue.cmds);
req_queue.backlog = req_queue.cmds;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index a7d1106..c6e6171 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
 }
 
 /**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+   if (ccp_get_device())
+   return 0;
+
+   return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
+/**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
  * @cmd: ccp_cmd struct to be processed
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index ebcc9d1..7f43703 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -27,6 +27,13 @@ struct ccp_cmd;
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
 
 /**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
+/**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
  * @cmd: ccp_cmd struct to be processed
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
 
 #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
 
+static inline int ccp_present(void)
+{
+   return -ENODEV;
+}
+
 static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
return -ENODEV;

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Do not sign extend input data to CCP

2014-07-30 Thread Tom Lendacky
The CCP hardware interprets all numbers as unsigned numbers, therefore
sign extending input data is not valid.  Modify the function calls
for RSA and ECC to not perform sign extending.

This patch is based on the cryptodev-2.6 kernel tree.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-ops.c |   26 +-
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 9ae006d..8729364 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
goto e_ksb;
 
ccp_reverse_set_dm_area(exp, rsa-exp, rsa-exp_len, CCP_KSB_BYTES,
-   true);
+   false);
ret = ccp_copy_to_ksb(cmd_q, exp, op.jobid, op.ksb_key,
  CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
goto e_exp;
 
ccp_reverse_set_dm_area(src, rsa-mod, rsa-mod_len, CCP_KSB_BYTES,
-   true);
+   false);
src.address += o_len;   /* Adjust the address for the copy operation */
ccp_reverse_set_dm_area(src, rsa-src, rsa-src_len, CCP_KSB_BYTES,
-   true);
+   false);
src.address -= o_len;   /* Reset the address to original value */
 
/* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
 
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(src, ecc-mod, ecc-mod_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Copy the first operand */
ccp_reverse_set_dm_area(src, ecc-u.mm.operand_1,
ecc-u.mm.operand_1_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
if (ecc-function != CCP_ECC_FUNCTION_MINV_384BIT) {
/* Copy the second operand */
ccp_reverse_set_dm_area(src, ecc-u.mm.operand_2,
ecc-u.mm.operand_2_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
}
 
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
 
/* Copy the ECC modulus */
ccp_reverse_set_dm_area(src, ecc-mod, ecc-mod_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Copy the first point X and Y coordinate */
ccp_reverse_set_dm_area(src, ecc-u.pm.point_1.x,
ecc-u.pm.point_1.x_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(src, ecc-u.pm.point_1.y,
ecc-u.pm.point_1.y_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
/* Copy the second point X and Y coordinate */
ccp_reverse_set_dm_area(src, ecc-u.pm.point_2.x,
ecc-u.pm.point_2.x_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
ccp_reverse_set_dm_area(src, ecc-u.pm.point_2.y,
ecc-u.pm.point_2.y_len,
-   CCP_ECC_OPERAND_SIZE, true);
+   CCP_ECC_OPERAND_SIZE, false);
src.address += CCP_ECC_OPERAND_SIZE;
 
/* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
/* Copy the Domain a parameter */
ccp_reverse_set_dm_area(src, ecc-u.pm.domain_a,
ecc-u.pm.domain_a_len

[PATCH] crypto: ccp - Remove select OF from Kconfig

2014-07-23 Thread Tom Lendacky
The addition of the select OF if ARM64 has led to a Kconfig
recursive dependency error when make ARCH=sh rsk7269_defconfig
was run.  Since OF is selected by ARM64 and the of_property_read_bool
is defined no matter what, delete the Kconfig line that selects OF.

Reported-by: kbuild test robot fengguang...@intel.com
Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/Kconfig |1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 474382d..7639ffc 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,7 +3,6 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
-   select OF if ARM64
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] crypto: ccp - Base AXI DMA cache settings on device tree

2014-07-10 Thread Tom Lendacky
The default cache operations for ARM64 were changed during 3.15.
To use coherent operations a dma-coherent device tree property
is required.  If that property is not present in the device tree
node then the non-coherent operations are assigned for the device.

Add support to the ccp driver to assign the AXI DMA cache settings
based on whether the dma-coherent property is present in the device
node.  If present, use settings that work with the caches.  If not
present, use settings that do not look at the caches.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 .../devicetree/bindings/crypto/amd-ccp.txt |3 +++
 drivers/crypto/ccp/Kconfig |1 +
 drivers/crypto/ccp/ccp-dev.c   |2 +-
 drivers/crypto/ccp/ccp-dev.h   |4 
 drivers/crypto/ccp/ccp-platform.c  |6 ++
 5 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt 
b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
index 6e0b11a..8c61183 100644
--- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -7,6 +7,9 @@ Required properties:
   that services interrupts for this device
 - interrupts: Should contain the CCP interrupt
 
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+
 Example:
ccp@e010 {
compatible = amd,ccp-seattle-v1a;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 7639ffc..474382d 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,6 +3,7 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
+   select OF if ARM64
help
  Provides the interface to use the AMD Cryptographic Coprocessor
  which can be used to accelerate or offload encryption operations
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index fa1ab10..a7d1106 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -364,7 +364,7 @@ int ccp_init(struct ccp_device *ccp)
 
 #ifdef CONFIG_ARM64
/* For arm64 set the recommended queue cache settings */
-   iowrite32(CACHE_WB_NO_ALLOC, ccp-io_regs + CMD_Q_CACHE_BASE +
+   iowrite32(ccp-axcache, ccp-io_regs + CMD_Q_CACHE_BASE +
  (CMD_Q_CACHE_INC * i));
 #endif
 
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 1c5651b..62ff35a 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -30,6 +30,7 @@
 
 #define TRNG_RETRIES   10
 
+#define CACHE_NONE 0x00
 #define CACHE_WB_NO_ALLOC  0xb7
 
 
@@ -255,6 +256,9 @@ struct ccp_device {
/* Suspend support */
unsigned int suspending;
wait_queue_head_t suspend_queue;
+
+   /* DMA caching attribute support */
+   unsigned int axcache;
 };
 
 
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
index 65e5829..b0a2806 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -22,6 +22,7 @@
 #include linux/spinlock.h
 #include linux/delay.h
 #include linux/ccp.h
+#include linux/of.h
 
 #include ccp-dev.h
 
@@ -112,6 +113,11 @@ static int ccp_platform_probe(struct platform_device *pdev)
*(dev-dma_mask) = DMA_BIT_MASK(48);
dev-coherent_dma_mask = DMA_BIT_MASK(48);
 
+   if (of_property_read_bool(dev-of_node, dma-coherent))
+   ccp-axcache = CACHE_WB_NO_ALLOC;
+   else
+   ccp-axcache = CACHE_NONE;
+
dev_set_drvdata(dev, ccp);
 
ret = ccp_init(ccp);

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 2/3] crypto: ccp - CCP device bindings documentation

2014-06-05 Thread Tom Lendacky
This patch provides the documentation of the device bindings
for the AMD Cryptographic Coprocessor driver.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 .../devicetree/bindings/crypto/amd-ccp.txt |   16 
 1 file changed, 16 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/crypto/amd-ccp.txt

diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt 
b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
new file mode 100644
index 000..6e0b11a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -0,0 +1,16 @@
+* AMD Cryptographic Coprocessor driver (ccp)
+
+Required properties:
+- compatible: Should be amd,ccp-seattle-v1a
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the CCP interrupt
+
+Example:
+   ccp@e010 {
+   compatible = amd,ccp-seattle-v1a;
+   reg = 0 0xe010 0 0x1;
+   interrupt-parent = gic;
+   interrupts = 0 3 4;
+   };

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 0/3] crypto: ccp - arm64 platform support

2014-06-05 Thread Tom Lendacky
The following series implements support for the CCP as a platform
driver on ARM64.

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (3):
  crypto: ccp - Modify PCI support in prep for arm64 support
  crypto: ccp - CCP device bindings documentation
  crypto: ccp - Add platform device support for arm64


 .../devicetree/bindings/crypto/amd-ccp.txt |   16 +
 drivers/crypto/Kconfig |2 
 drivers/crypto/ccp/Makefile|5 
 drivers/crypto/ccp/ccp-dev.c   |   34 +++
 drivers/crypto/ccp/ccp-dev.h   |   10 +
 drivers/crypto/ccp/ccp-pci.c   |   39 +--
 drivers/crypto/ccp/ccp-platform.c  |  224 
 7 files changed, 301 insertions(+), 29 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/crypto/amd-ccp.txt
 create mode 100644 drivers/crypto/ccp/ccp-platform.c

-- 
Tom Lendacky
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V1 3/3] crypto: ccp - Add platform device support for arm64

2014-06-05 Thread Tom Lendacky
Add support for the CCP on arm64 as a platform device.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/Kconfig|2 
 drivers/crypto/ccp/Makefile   |5 +
 drivers/crypto/ccp/ccp-dev.c  |   34 ++
 drivers/crypto/ccp/ccp-dev.h  |7 +
 drivers/crypto/ccp/ccp-platform.c |  224 +
 5 files changed, 270 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp-platform.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f066fa2..09ae35c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
bool Support for AMD Cryptographic Coprocessor
-   depends on X86  PCI
+   depends on (X86  PCI) || ARM64
default n
help
  The AMD Cryptographic Coprocessor provides hardware support
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index d3505a0..7f592d8 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,6 +1,11 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
 ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
 ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2c78161..fa1ab10 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -20,7 +20,9 @@
 #include linux/delay.h
 #include linux/hw_random.h
 #include linux/cpu.h
+#ifdef CONFIG_X86
 #include asm/cpu_device_id.h
+#endif
 #include linux/ccp.h
 
 #include ccp-dev.h
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
/* Build queue interrupt mask (two interrupts per queue) */
qim |= cmd_q-int_ok | cmd_q-int_err;
 
+#ifdef CONFIG_ARM64
+   /* For arm64 set the recommended queue cache settings */
+   iowrite32(CACHE_WB_NO_ALLOC, ccp-io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
dev_dbg(dev, queue #%u available\n, i);
}
if (ccp-cmd_q_count == 0) {
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
 }
 #endif
 
+#ifdef CONFIG_X86
 static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
 };
+#endif
 
 static int __init ccp_mod_init(void)
 {
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = boot_cpu_data;
int ret;
 
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
 
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+   int ret;
+
+   ret = ccp_platform_init();
+   if (ret)
+   return ret;
+
+   /* Don't leave the driver loaded if init failed */
+   if (!ccp_get_device()) {
+   ccp_platform_exit();
+   return -ENODEV;
+   }
+
+   return 0;
+#endif
 
return -ENODEV;
 }
 
 static void __exit ccp_mod_exit(void)
 {
+#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo = boot_cpu_data;
 
switch (cpuinfo-x86) {
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
ccp_pci_exit();
break;
}
+#endif
+
+#ifdef CONFIG_ARM64
+   ccp_platform_exit();
+#endif
 }
 
 module_init(ccp_mod_init);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 72bf153..1c5651b 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -30,6 +30,8 @@
 
 #define TRNG_RETRIES   10
 
+#define CACHE_WB_NO_ALLOC  0xb7
+
 
 /** Register Mappings **/
 #define Q_MASK_REG 0x000
@@ -48,7 +50,7 @@
 #define CMD_Q_INT_STATUS_BASE  0x214
 #define CMD_Q_STATUS_INCR  0x20
 
-#define CMD_Q_CACHE0x228
+#define CMD_Q_CACHE_BASE   0x228
 #define CMD_Q_CACHE_INC0x20
 
 #define CMD_Q_ERROR(__qs)  ((__qs)  0x003f);
@@ -259,6 +261,9 @@ struct ccp_device {
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 int ccp_init(struct ccp_device *ccp);
 void ccp_destroy(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-platform.c 
b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644
index 000..65e5829
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -0,0 +1,224 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky thomas.lenda...@amd.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include linux

[PATCH V1 1/3] crypto: ccp - Modify PCI support in prep for arm64 support

2014-06-05 Thread Tom Lendacky
Modify the PCI device support in prep for supporting the
CCP as a platform device for arm64.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-dev.h |3 +--
 drivers/crypto/ccp/ccp-pci.c |   39 ++-
 2 files changed, 15 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7ec536e..72bf153 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -23,8 +23,6 @@
 #include linux/hw_random.h
 
 
-#define IO_OFFSET  0x2
-
 #define MAX_DMAPOOL_NAME_LEN   32
 
 #define MAX_HW_QUEUES  5
@@ -194,6 +192,7 @@ struct ccp_device {
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
+   unsigned int irq;
 
/*
 * I/O area used for device communication. The register mapping
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0d74623..180cc87 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -12,8 +12,10 @@
 
 #include linux/module.h
 #include linux/kernel.h
+#include linux/device.h
 #include linux/pci.h
 #include linux/pci_ids.h
+#include linux/dma-mapping.h
 #include linux/kthread.h
 #include linux/sched.h
 #include linux/interrupt.h
@@ -24,6 +26,8 @@
 #include ccp-dev.h
 
 #define IO_BAR 2
+#define IO_OFFSET  0x2
+
 #define MSIX_VECTORS   2
 
 struct ccp_msix {
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
if (ret)
return ret;
 
-   ret = request_irq(pdev-irq, ccp_irq_handler, 0, ccp, dev);
+   ccp-irq = pdev-irq;
+   ret = request_irq(ccp-irq, ccp_irq_handler, 0, ccp, dev);
if (ret) {
dev_notice(dev, unable to allocate MSI IRQ (%d)\n, ret);
goto e_msi;
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
 dev);
pci_disable_msix(pdev);
} else {
-   free_irq(pdev-irq, dev);
+   free_irq(ccp-irq, dev);
pci_disable_msi(pdev);
}
 }
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
resource_size_t io_len;
unsigned long io_flags;
-   int bar;
 
io_flags = pci_resource_flags(pdev, IO_BAR);
io_len = pci_resource_len(pdev, IO_BAR);
if ((io_flags  IORESOURCE_MEM)  (io_len = (IO_OFFSET + 0x800)))
return IO_BAR;
 
-   for (bar = 0; bar  PCI_STD_RESOURCE_END; bar++) {
-   io_flags = pci_resource_flags(pdev, bar);
-   io_len = pci_resource_len(pdev, bar);
-   if ((io_flags  IORESOURCE_MEM) 
-   (io_len = (IO_OFFSET + 0x800)))
-   return bar;
-   }
-
return -EIO;
 }
 
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
}
ccp-io_regs = ccp-io_map + IO_OFFSET;
 
-   ret = dma_set_mask(dev, DMA_BIT_MASK(48));
-   if (ret == 0) {
-   ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+   if (ret) {
+   ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
-   dev_err(dev,
-   pci_set_consistent_dma_mask failed (%d)\n,
+   dev_err(dev, dma_set_mask_and_coherent failed (%d)\n,
ret);
-   goto e_bar0;
-   }
-   } else {
-   ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-   if (ret) {
-   dev_err(dev, pci_set_dma_mask failed (%d)\n, ret);
-   goto e_bar0;
+   goto e_iomap;
}
}
 
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
 
ret = ccp_init(ccp);
if (ret)
-   goto e_bar0;
+   goto e_iomap;
 
dev_notice(dev, enabled\n);
 
return 0;
 
-e_bar0:
+e_iomap:
pci_iounmap(pdev, ccp-io_map);
 
 e_device:

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/3] crypto: ccp - minor code fixes

2014-02-24 Thread Tom Lendacky
The following series implements some fixes to some code paths executed
during crypto API request processing.  These fixes address processing of
requests when the CCP driver returns -EBUSY and freeing memory in error
paths.

This patch series is based on the cryptodev-2.6 kernel tree.

---

Tom Lendacky (3):
  crypto: ccp - Prevent a possible lost CCP command request
  crypto: ccp - Invoke context callback when there is a backlog error
  crypto: ccp - Account for CCP backlog processing


 drivers/crypto/ccp/ccp-crypto-main.c |   25 ++---
 1 file changed, 18 insertions(+), 7 deletions(-)

-- 
Tom Lendacky

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/3] crypto: ccp - Account for CCP backlog processing

2014-02-24 Thread Tom Lendacky
When the crypto layer is able to queue up a command for processing
by the CCP on the initial call to ccp_crypto_enqueue_request and
the CCP returns -EBUSY, then if the backlog flag is not set the
command needs to be freed and not added to the active command list.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-crypto-main.c |   18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 7d98635..20dc848 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
 {
struct ccp_crypto_cmd *active = NULL, *tmp;
unsigned long flags;
+   bool free_cmd = true;
int ret;
 
spin_lock_irqsave(req_queue_lock, flags);
@@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
if (!active) {
ret = ccp_enqueue_cmd(crypto_cmd-cmd);
if (!ccp_crypto_success(ret))
-   goto e_lock;
+   goto e_lock;/* Error, don't queue it */
+   if ((ret == -EBUSY) 
+   !(crypto_cmd-cmd-flags  CCP_CMD_MAY_BACKLOG))
+   goto e_lock;/* Not backlogging, don't queue it */
}
 
if (req_queue.cmd_count = CCP_CRYPTO_MAX_QLEN) {
@@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd 
*crypto_cmd)
req_queue.cmd_count++;
list_add_tail(crypto_cmd-entry, req_queue.cmds);
 
+   free_cmd = false;
+
 e_lock:
spin_unlock_irqrestore(req_queue_lock, flags);
 
+   if (free_cmd)
+   kfree(crypto_cmd);
+
return ret;
 }
 
@@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request 
*req,
 {
struct ccp_crypto_cmd *crypto_cmd;
gfp_t gfp;
-   int ret;
 
gfp = req-flags  CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
 
@@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request 
*req,
else
cmd-flags = ~CCP_CMD_MAY_BACKLOG;
 
-   ret = ccp_crypto_enqueue_cmd(crypto_cmd);
-   if (!ccp_crypto_success(ret))
-   kfree(crypto_cmd);
-
-   return ret;
+   return ccp_crypto_enqueue_cmd(crypto_cmd);
 }
 
 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/3] crypto: ccp - Prevent a possible lost CCP command request

2014-02-24 Thread Tom Lendacky
If a CCP command has been queued for processing at the
crypto layer then, when dequeueing it for processing, the
can backlog flag must be set so that the request isn't
lost if the CCP backlog queue limit is reached.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-crypto-main.c |4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 010fded..9d30d6f 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -174,6 +174,10 @@ static void ccp_crypto_complete(void *data, int err)
 
/* Submit the next cmd */
while (held) {
+   /* Since we have already queued the cmd, we must indicate that
+* we can backlog so as not to lose this request.
+*/
+   held-cmd-flags |= CCP_CMD_MAY_BACKLOG;
ret = ccp_enqueue_cmd(held-cmd);
if (ccp_crypto_success(ret))
break;


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Fix ccp_run_passthru_cmd dma variable assignments

2014-01-24 Thread Tom Lendacky
On 01/24/2014 12:39 PM, Dave Jones wrote:
 There are some suspicious looking lines of code in the new ccp driver, 
 including
 one that assigns a variable to itself, and another that overwrites a previous 
 assignment.
 
 This may have been a cut-and-paste error where 'src' was forgotten to be 
 changed to 'dst'.
 I have no hardware to test this, so this is untested.

Yes, this was a cut-and-paste error that was not discovered with my tests. I've
updated my testcases and tested/verified this fix.

Herbert, this should probably go through the cryptodev-2.6 tree right?

Acked-by: Tom Lendacky thomas.lenda...@amd.com

Thanks,
Tom

 
 Signed-off-by: Dave Jones da...@fedoraproject.org
 
 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
 index 71ed3ade7e12..c266a7b154bb 100644
 --- a/drivers/crypto/ccp/ccp-ops.c
 +++ b/drivers/crypto/ccp/ccp-ops.c
 @@ -1666,8 +1666,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue 
 *cmd_q,
   
   op.dst.type = CCP_MEMTYPE_SYSTEM;
   op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
 - op.src.u.dma.offset = dst.sg_wa.sg_used;
 - op.src.u.dma.length = op.src.u.dma.length;
 + op.dst.u.dma.offset = dst.sg_wa.sg_used;
 + op.dst.u.dma.length = op.src.u.dma.length;
   
   ret = ccp_perform_passthru(op);
   if (ret) {
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/4] crypto: ccp - Allow for selective disablement of crypto API algorithms

2014-01-24 Thread Tom Lendacky
Introduce module parameters that allow for disabling of a
crypto algorithm by not registering the algorithm with the
crypto API.

Signed-off-by: Tom Lendacky thomas.lenda...@amd.com
---
 drivers/crypto/ccp/ccp-crypto-main.c |   37 +++---
 1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-main.c 
b/drivers/crypto/ccp/ccp-crypto-main.c
index 2636f04..b3f22b0 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -11,6 +11,7 @@
  */
 
 #include linux/module.h
+#include linux/moduleparam.h
 #include linux/kernel.h
 #include linux/list.h
 #include linux/ccp.h
@@ -24,6 +25,14 @@ MODULE_LICENSE(GPL);
 MODULE_VERSION(1.0.0);
 MODULE_DESCRIPTION(AMD Cryptographic Coprocessor crypto API support);
 
+static unsigned int aes_disable;
+module_param(aes_disable, uint, 0444);
+MODULE_PARM_DESC(aes_disable, Disable use of AES - any non-zero value);
+
+static unsigned int sha_disable;
+module_param(sha_disable, uint, 0444);
+MODULE_PARM_DESC(sha_disable, Disable use of SHA - any non-zero value);
+
 
 /* List heads for the supported algorithms */
 static LIST_HEAD(hash_algs);
@@ -337,21 +346,25 @@ static int ccp_register_algs(void)
 {
int ret;
 
-   ret = ccp_register_aes_algs(cipher_algs);
-   if (ret)
-   return ret;
+   if (!aes_disable) {
+   ret = ccp_register_aes_algs(cipher_algs);
+   if (ret)
+   return ret;
 
-   ret = ccp_register_aes_cmac_algs(hash_algs);
-   if (ret)
-   return ret;
+   ret = ccp_register_aes_cmac_algs(hash_algs);
+   if (ret)
+   return ret;
 
-   ret = ccp_register_aes_xts_algs(cipher_algs);
-   if (ret)
-   return ret;
+   ret = ccp_register_aes_xts_algs(cipher_algs);
+   if (ret)
+   return ret;
+   }
 
-   ret = ccp_register_sha_algs(hash_algs);
-   if (ret)
-   return ret;
+   if (!sha_disable) {
+   ret = ccp_register_sha_algs(hash_algs);
+   if (ret)
+   return ret;
+   }
 
return 0;
 }


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


  1   2   >