1) Rebased patches:
qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch
qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch

2) Added patches:
qat17_4.7.0-00006-Link-driver-with-object-files.patch
qat17_4.7.0-00006-Switch-to-skcipher-API.patch
qat17_4.7.0-00006-Drop-pr_warning-definition.patch

3) Remove duplicate firmware installation, since linux-firmware.bb
in OE-Core will do it.

4) Remove qat17-src from PACKAGES due to duplicate packaging.

5) Use ${nonarch_base_libdir} to install firmware instead of
using ${base_libdir}.

Signed-off-by: Yongxin Liu <[email protected]>
---
 ....0-34-qat-remove-local-path-from-makefile.patch |   15 +-
 ...0-4-qat-add-install-target-and-add-folder.patch |   16 +-
 ...17_4.7.0-00006-Drop-pr_warning-definition.patch |   31 +
 ...4.7.0-00006-Link-driver-with-object-files.patch |   91 ++
 .../qat17_4.7.0-00006-Switch-to-skcipher-API.patch | 1161 ++++++++++++++++++++
 .../{qat17_4.2.0-00012.bb => qat17_4.7.0-00006.bb} |   67 +-
 6 files changed, 1332 insertions(+), 49 deletions(-)
 create mode 100644 
recipes-extended/qat/files/qat17_4.7.0-00006-Drop-pr_warning-definition.patch
 create mode 100644 
recipes-extended/qat/files/qat17_4.7.0-00006-Link-driver-with-object-files.patch
 create mode 100644 
recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch
 rename recipes-extended/qat/{qat17_4.2.0-00012.bb => qat17_4.7.0-00006.bb} 
(73%)

diff --git 
a/recipes-extended/qat/files/qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch
 
b/recipes-extended/qat/files/qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch
index da40e9f..96791cb 100644
--- 
a/recipes-extended/qat/files/qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch
+++ 
b/recipes-extended/qat/files/qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch
@@ -1,4 +1,4 @@
-From 5044a14a6b4192b771f16aa834f688c1fd1287dd Mon Sep 17 00:00:00 2001
+From 1e29afc0e69fb9118cb0dcb924cdffa9db730572 Mon Sep 17 00:00:00 2001
 From: Anuj Mittal <[email protected]>
 Date: Wed, 8 Jul 2015 11:11:32 +0800
 Subject: [PATCH] qat: remove local path from makefile
@@ -9,22 +9,23 @@ Remove the host machine /usr/include path from makefile.
 
 Signed-off-by: Anuj Mittal <[email protected]>
 ---
- .../build_files/env_files/linux_2.6_user_space.mk  |    3 +--
+ quickassist/build_system/build_files/env_files/linux_2.6_user_space.mk | 3 +--
  1 file changed, 1 insertion(+), 2 deletions(-)
 
 diff --git 
a/quickassist/build_system/build_files/env_files/linux_2.6_user_space.mk 
b/quickassist/build_system/build_files/env_files/linux_2.6_user_space.mk
-index 1451f4a..719d1bb 100755
+index f7f18a5..e8c9f18 100644
 --- a/quickassist/build_system/build_files/env_files/linux_2.6_user_space.mk
 +++ b/quickassist/build_system/build_files/env_files/linux_2.6_user_space.mk
-@@ -72,8 +72,7 @@
+@@ -46,8 +46,7 @@
  #
  #-------------------------------------------------------------
-
+ 
 -INCLUDES+=-I/usr/include \
 -          -I$(API_DIR)   \
 +INCLUDES+=-I$(API_DIR)   \
+           -I$(ADF_CMN_DIR) \
            -I$(OSAL_DIR)/include \
            -I$(OSAL_DIR)/src/linux/user_space/include
+-- 
+2.14.4
 
---
-1.7.9.5
diff --git 
a/recipes-extended/qat/files/qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch
 
b/recipes-extended/qat/files/qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch
index 6b6dfa9..a810cfc 100644
--- 
a/recipes-extended/qat/files/qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch
+++ 
b/recipes-extended/qat/files/qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch
@@ -1,4 +1,4 @@
-From 22963fed4e9017ca05855bd2373e2467f45ebe30 Mon Sep 17 00:00:00 2001
+From a94af9df0fa6f2c41efaf7ef6c17d0e5bb8aa80d Mon Sep 17 00:00:00 2001
 From: "Tan, Raymond" <[email protected]>
 Date: Mon, 4 Jun 2018 09:26:33 +0800
 Subject: [PATCH] qat-add-install-target-and-add-folder
@@ -8,23 +8,24 @@ Upstream-Status: Inappropriate [Configuration]
 Modify Makefile to add install target and add folder
 
 Signed-off-by: Tan, Raymond <[email protected]>
+
 ---
  quickassist/Makefile | 25 ++++++++++++++++++++++---
  1 file changed, 22 insertions(+), 3 deletions(-)
 
 diff --git a/quickassist/Makefile b/quickassist/Makefile
-index 3e08241..1647d9e 100644
+index 93990f2..70a4353 100644
 --- a/quickassist/Makefile
 +++ b/quickassist/Makefile
-@@ -64,6 +64,7 @@ ICP_BUILD_OUTPUT?=build_$(DATE)
+@@ -97,6 +97,7 @@ ICP_BUILD_OUTPUT?=build_$(DATE)
  ICP_TOP_ENV=$(ICP_BUILDSYSTEM_PATH)/build_files/env_files/
  export ICP_ACCEL_INC=YES
  LAC_LIB_DIR=$(LAC_PATH)/build/libs
 
+MODULE_INSTALLPATH=$(SAMPLE_BUILD_OUTPUT)/lib/modules/$(QAT_KERNEL_VER)/updates/drivers/crypto/qat
  
  #Release Package build steps
- ALL_TARGETS =  clean lac_lib_dir qat_direct libosal_user lac_user
-@@ -80,10 +81,14 @@ all: $(ALL_TARGETS)
+ ALL_TARGETS = lac_user lac_kernel
+@@ -114,10 +115,14 @@ all: $(ALL_TARGETS)
  
  user: lac_lib_dir libosal_user lac_user
  
@@ -41,7 +42,7 @@ index 3e08241..1647d9e 100644
  
  #
  # Common memory driver
-@@ -143,8 +148,22 @@ ifeq ($(ICP_NO_CLEAN),)
+@@ -200,8 +205,22 @@ ifeq ($(ICP_NO_CLEAN),)
  endif
  
  
@@ -65,6 +66,3 @@ index 3e08241..1647d9e 100644
  
  lac_lib_dir: clean
        test -d $(LAC_LIB_DIR) || mkdir -p $(LAC_LIB_DIR);
--- 
-1.9.1
-
diff --git 
a/recipes-extended/qat/files/qat17_4.7.0-00006-Drop-pr_warning-definition.patch 
b/recipes-extended/qat/files/qat17_4.7.0-00006-Drop-pr_warning-definition.patch
new file mode 100644
index 0000000..6b816df
--- /dev/null
+++ 
b/recipes-extended/qat/files/qat17_4.7.0-00006-Drop-pr_warning-definition.patch
@@ -0,0 +1,31 @@
+From 058673d6798b835dce7f27fe172b7727bbaf30cf Mon Sep 17 00:00:00 2001
+From: Yongxin Liu <[email protected]>
+Date: Wed, 15 Jan 2020 15:25:15 +0000
+Subject: [PATCH] qat: Drop pr_warning definition
+
+In mainline kernel commit 61ff72f40168 ("printk: Drop pr_warning
+definition"), pr_warning was dropped.
+
+Upstream-Status: Inappropriate [Code released in tarball form only]
+
+Signed-off-by: Yongxin Liu <[email protected]>
+---
+ quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h 
b/quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h
+index f4a56dc..d88e762 100644
+--- a/quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h
++++ b/quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h
+@@ -93,7 +93,7 @@ MALLOC_DECLARE(M_QAE_MEM);
+ 
+ #define mm_info(...) pr_info(USDM_MOD __VA_ARGS__)
+ 
+-#define mm_warning(...) pr_warning(USDM_MOD __VA_ARGS__)
++#define mm_warning(...) pr_warn(USDM_MOD __VA_ARGS__)
+ 
+ /*define types which need to vary between 32 and 64 bit*/
+ #define QAE_PAGE_SHIFT 12
+-- 
+2.24.1
+
diff --git 
a/recipes-extended/qat/files/qat17_4.7.0-00006-Link-driver-with-object-files.patch
 
b/recipes-extended/qat/files/qat17_4.7.0-00006-Link-driver-with-object-files.patch
new file mode 100644
index 0000000..0780426
--- /dev/null
+++ 
b/recipes-extended/qat/files/qat17_4.7.0-00006-Link-driver-with-object-files.patch
@@ -0,0 +1,91 @@
+From 555a4b3605e983e492f8c67e38a094933bc7efcd Mon Sep 17 00:00:00 2001
+From: Yongxin Liu <[email protected]>
+Date: Mon, 6 Jan 2020 09:26:39 +0800
+Subject: [PATCH] qat: Link driver with object files instead of archived files
+
+Due to mainline kernel commit 69ea912fda7 ("kbuild: remove unneeded
+link_multi_deps"), modules cannot link *.a archives. So change .a to
+.o files.
+
+Upstream-Status: Inappropriate [Temporary workaround for kernel later than
+v4.19-rc3]
+
+Signed-off-by: Yongxin Liu <[email protected]>
+
+---
+ quickassist/Makefile                          |  2 ++
+ .../lookaside/access_layer/src/Makefile       | 21 +++++++++----------
+ 2 files changed, 12 insertions(+), 11 deletions(-)
+
+diff --git a/quickassist/Makefile b/quickassist/Makefile
+index 70a4353..5f6ee46 100644
+--- a/quickassist/Makefile
++++ b/quickassist/Makefile
+@@ -154,6 +154,7 @@ libosal_kernel: clean output_dir lac_lib_dir
+       echo ; echo 'Copying OSAL library';
+       cp 
$(OSAL_PATH)/src/linux/kernel_space/build/linux_2.6/kernel_space/libosal.a 
$(ICP_BUILD_OUTPUT)/libosal_kernel.a;
+       cp 
$(OSAL_PATH)/src/linux/kernel_space/build/linux_2.6/kernel_space/libosal.a 
$(LAC_LIB_DIR)/;
++      cp $(OSAL_PATH)/src/linux/kernel_space/build/linux_2.6/kernel_space/*.o 
$(LAC_LIB_DIR)/;
+ 
+ 
+ #build linux qat_direct layer
+@@ -169,6 +170,7 @@ qat_kernel: clean output_dir lac_lib_dir libosal_kernel 
cmn_ko
+       echo ; echo 'Copying qat_kernel library';
+       cp $(KERNEL_PATH)/src/build/linux_2.6/kernel_space/libadf_kernel.a 
$(ICP_BUILD_OUTPUT)/;
+       cp $(KERNEL_PATH)/src/build/linux_2.6/kernel_space/libadf_kernel.a 
$(LAC_LIB_DIR)/;
++      cp $(KERNEL_PATH)/src/build/linux_2.6/kernel_space/*.o $(LAC_LIB_DIR)/;
+ 
+ 
+ lac_user: clean output_dir qat_direct libosal_user cmn_user cmn_ko
+diff --git a/quickassist/lookaside/access_layer/src/Makefile 
b/quickassist/lookaside/access_layer/src/Makefile
+index cc8cf2f..b8ec93c 100644
+--- a/quickassist/lookaside/access_layer/src/Makefile
++++ b/quickassist/lookaside/access_layer/src/Makefile
+@@ -112,13 +112,13 @@ LIB_STATIC=$(OUTPUT_NAME).a
+ LIB_SHARED=$(OUTPUT_NAME).so
+ 
+ # add the path and list of source libraries,
+-ADDITIONAL_KERNEL_LIBS= common/utils/$(ICP_BUILD_OUTPUT_DIR)/utils.a \
+-                      common/ctrl/$(ICP_BUILD_OUTPUT_DIR)/init.a \
+-                        
common/compression/$(ICP_BUILD_OUTPUT_DIR)/compression.a
++ADDITIONAL_KERNEL_LIBS= common/utils/$(ICP_BUILD_OUTPUT_DIR)/*.o \
++                      common/ctrl/$(ICP_BUILD_OUTPUT_DIR)/*.o \
++                        common/compression/$(ICP_BUILD_OUTPUT_DIR)/*.o
+ ifndef ICP_DC_ONLY
+-ADDITIONAL_KERNEL_LIBS += common/crypto/sym/$(ICP_BUILD_OUTPUT_DIR)/sym.a \
+-                        
common/crypto/sym/qat/$(ICP_BUILD_OUTPUT_DIR)/sym_qat.a \
+-                        
common/crypto/sym/key/$(ICP_BUILD_OUTPUT_DIR)/sym_key.a
++ADDITIONAL_KERNEL_LIBS += common/crypto/sym/$(ICP_BUILD_OUTPUT_DIR)/*.o \
++                        common/crypto/sym/qat/$(ICP_BUILD_OUTPUT_DIR)/*.o \
++                        common/crypto/sym/key/$(ICP_BUILD_OUTPUT_DIR)/*.o
+ ifeq ($(ICP_OS_LEVEL), user_space)
+ ADDITIONAL_KERNEL_LIBS += 
common/crypto/asym/pke_common/$(ICP_BUILD_OUTPUT_DIR)/pke_common.a \
+                         
common/crypto/asym/diffie_hellman/$(ICP_BUILD_OUTPUT_DIR)/diffie_hellman.a \
+@@ -128,14 +128,14 @@ ADDITIONAL_KERNEL_LIBS += 
common/crypto/asym/pke_common/$(ICP_BUILD_OUTPUT_DIR)/
+                         
common/crypto/asym/large_number/$(ICP_BUILD_OUTPUT_DIR)/ln.a \
+                         
common/crypto/asym/ecc/$(ICP_BUILD_OUTPUT_DIR)/elliptic_curve.a
+ else
+-ADDITIONAL_KERNEL_LIBS += common/stubs/$(ICP_BUILD_OUTPUT_DIR)/lib_lac_stubs.a
++ADDITIONAL_KERNEL_LIBS += common/stubs/$(ICP_BUILD_OUTPUT_DIR)/*.o
+ endif
+ else
+ ifeq ($(ICP_OS_LEVEL), kernel_space)
+-ADDITIONAL_KERNEL_LIBS += common/stubs/$(ICP_BUILD_OUTPUT_DIR)/lib_lac_stubs.a
++ADDITIONAL_KERNEL_LIBS += common/stubs/$(ICP_BUILD_OUTPUT_DIR)/*.o
+ endif
+ endif
+-ADDITIONAL_KERNEL_LIBS += common/qat_comms/$(ICP_BUILD_OUTPUT_DIR)/qat_comms.a
++ADDITIONAL_KERNEL_LIBS += common/qat_comms/$(ICP_BUILD_OUTPUT_DIR)/*.o
+ 
+ ifeq ($(ICP_OS_LEVEL), user_space)
+ ifdef KPT
+@@ -145,8 +145,7 @@ endif
+ endif
+ 
+ ifeq ($(ICP_OS_LEVEL), kernel_space)
+-    ADDITIONAL_OBJECTS =  ../build/libs/libadf_kernel.a
+-    ADDITIONAL_OBJECTS += ../build/libs/libosal.a
++    ADDITIONAL_OBJECTS += ../build/libs/*.o
+ endif
+ 
+ ifeq ($(ICP_OS_LEVEL), user_space)
diff --git 
a/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch 
b/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch
new file mode 100644
index 0000000..96e949c
--- /dev/null
+++ b/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch
@@ -0,0 +1,1161 @@
+From b19449e3c11ffd477a3db60f21e14930ed07f251 Mon Sep 17 00:00:00 2001
+From: Yongxin Liu <[email protected]>
+Date: Wed, 15 Jan 2020 13:50:38 +0000
+Subject: [PATCH] qat: Switch to skcipher API
+
+The patch is derived from mainline kernel commit 7fe948a52287
+("crypto: qat - switch to skcipher API").
+
+Upstream-Status: Inappropriate [Code released in tarball form only]
+
+Signed-off-by: Yongxin Liu <[email protected]>
+---
+ .../drivers/crypto/qat/qat_common/qat_algs.c  | 676 ++++++++++--------
+ .../crypto/qat/qat_common/qat_crypto.h        |   6 +-
+ 2 files changed, 394 insertions(+), 288 deletions(-)
+
+diff --git a/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c 
b/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c
+index c4edb3c..35bca76 100644
+--- a/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -44,14 +44,15 @@
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+-#ifndef QAT_AEAD_OLD_SUPPORTED
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/crypto.h>
+ #include <crypto/internal/aead.h>
++#include <crypto/internal/skcipher.h>
+ #include <crypto/aes.h>
+ #include <crypto/sha.h>
+ #include <crypto/hash.h>
++#include <crypto/hmac.h>
+ #include <crypto/algapi.h>
+ #include <crypto/authenc.h>
+ #include <linux/dma-mapping.h>
+@@ -113,11 +114,16 @@ struct qat_alg_aead_ctx {
+       struct crypto_shash *hash_tfm;
+       enum icp_qat_hw_auth_algo qat_hash_alg;
+       struct qat_crypto_instance *inst;
+-      char ipad[SHA512_BLOCK_SIZE];
++      union {
++              struct sha1_state sha1;
++              struct sha256_state sha256;
++              struct sha512_state sha512;
++      };
++      char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+       char opad[SHA512_BLOCK_SIZE];
+ };
+ 
+-struct qat_alg_ablkcipher_ctx {
++struct qat_alg_skcipher_ctx {
+       struct icp_qat_hw_cipher_algo_blk *enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *dec_cd;
+       dma_addr_t enc_cd_paddr;
+@@ -125,7 +131,7 @@ struct qat_alg_ablkcipher_ctx {
+       struct icp_qat_fw_la_bulk_req enc_fw_req;
+       struct icp_qat_fw_la_bulk_req dec_fw_req;
+       struct qat_crypto_instance *inst;
+-      struct crypto_tfm *tfm;
++      struct crypto_skcipher *tfm;
+ };
+ 
+ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+@@ -149,9 +155,6 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+                                 unsigned int auth_keylen)
+ {
+       SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+-      struct sha1_state sha1;
+-      struct sha256_state sha256;
+-      struct sha512_state sha512;
+       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+       __be32 *hash_state_out;
+@@ -160,7 +163,6 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+ 
+       memset(ctx->ipad, 0, block_size);
+       memset(ctx->opad, 0, block_size);
+-      memset(shash, 0, sizeof(struct shash_desc));
+       shash->tfm = ctx->hash_tfm;
+ 
+       if (auth_keylen > block_size) {
+@@ -178,8 +180,8 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+       for (i = 0; i < block_size; i++) {
+               char *ipad_ptr = ctx->ipad + i;
+               char *opad_ptr = ctx->opad + i;
+-              *ipad_ptr ^= 0x36;
+-              *opad_ptr ^= 0x5C;
++              *ipad_ptr ^= HMAC_IPAD_VALUE;
++              *opad_ptr ^= HMAC_OPAD_VALUE;
+       }
+ 
+       if (crypto_shash_init(shash))
+@@ -193,22 +195,22 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+ 
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+-              if (crypto_shash_export(shash, &sha1))
++              if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+-                      *hash_state_out = cpu_to_be32(*(sha1.state + i));
++                      *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+-              if (crypto_shash_export(shash, &sha256))
++              if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+-                      *hash_state_out = cpu_to_be32(*(sha256.state + i));
++                      *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+-              if (crypto_shash_export(shash, &sha512))
++              if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+-                      *hash512_state_out = cpu_to_be64(*(sha512.state + i));
++                      *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+@@ -229,22 +231,22 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+ 
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+-              if (crypto_shash_export(shash, &sha1))
++              if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+-                      *hash_state_out = cpu_to_be32(*(sha1.state + i));
++                      *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+-              if (crypto_shash_export(shash, &sha256))
++              if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+-                      *hash_state_out = cpu_to_be32(*(sha256.state + i));
++                      *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+-              if (crypto_shash_export(shash, &sha512))
++              if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+-                      *hash512_state_out = cpu_to_be64(*(sha512.state + i));
++                      *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+@@ -254,7 +256,24 @@ static int qat_alg_do_precomputes(struct 
icp_qat_hw_auth_algo_blk *hash,
+       return 0;
+ }
+ 
+-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
++static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
++{
++      ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
++                                         ICP_QAT_FW_CIPH_IV_64BIT_PTR);
++      ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
++                                     ICP_QAT_FW_LA_UPDATE_STATE);
++}
++
++static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr 
*header)
++{
++      ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
++                                         ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
++      ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
++                                     ICP_QAT_FW_LA_NO_UPDATE_STATE);
++}
++
++static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
++                                  int aead)
+ {
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+@@ -264,12 +283,12 @@ static void qat_alg_init_common_hdr(struct 
icp_qat_fw_comn_req_hdr *header)
+                                           QAT_COMN_PTR_TYPE_SGL);
+       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+                                 ICP_QAT_FW_LA_PARTIAL_NONE);
+-      ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+-                                         ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
++      if (aead)
++              qat_alg_init_hdr_no_iv_updt(header);
++      else
++              qat_alg_init_hdr_iv_updt(header);
+       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+-      ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+-                                     ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ }
+ 
+ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+@@ -304,7 +323,7 @@ static int qat_alg_aead_init_enc_session(struct 
crypto_aead *aead_tfm,
+               return -EFAULT;
+ 
+       /* Request setup */
+-      qat_alg_init_common_hdr(header);
++      qat_alg_init_common_hdr(header, 1);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+@@ -391,7 +410,7 @@ static int qat_alg_aead_init_dec_session(struct 
crypto_aead *aead_tfm,
+               return -EFAULT;
+ 
+       /* Request setup */
+-      qat_alg_init_common_hdr(header);
++      qat_alg_init_common_hdr(header, 1);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+@@ -445,17 +464,17 @@ static int qat_alg_aead_init_dec_session(struct 
crypto_aead *aead_tfm,
+       return 0;
+ }
+ 
+-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
+-                                      struct icp_qat_fw_la_bulk_req *req,
+-                                      struct icp_qat_hw_cipher_algo_blk *cd,
+-                                      const uint8_t *key, unsigned int keylen)
++static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
++                                    struct icp_qat_fw_la_bulk_req *req,
++                                    struct icp_qat_hw_cipher_algo_blk *cd,
++                                    const uint8_t *key, unsigned int keylen)
+ {
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+ 
+       memcpy(cd->aes.key, key, keylen);
+-      qat_alg_init_common_hdr(header);
++      qat_alg_init_common_hdr(header, 0);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+       cd_pars->u.s.content_desc_params_sz =
+                               sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+@@ -467,28 +486,28 @@ static void qat_alg_ablkcipher_init_com(struct 
qat_alg_ablkcipher_ctx *ctx,
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ }
+ 
+-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
+-                                      int alg, const uint8_t *key,
+-                                      unsigned int keylen, int mode)
++static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
++                                    int alg, const uint8_t *key,
++                                    unsigned int keylen, int mode)
+ {
+       struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ 
+-      qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
++      qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+ }
+ 
+-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
+-                                      int alg, const uint8_t *key,
+-                                      unsigned int keylen, int mode)
++static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
++                                    int alg, const uint8_t *key,
++                                    unsigned int keylen, int mode)
+ {
+       struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ 
+-      qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
++      qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+ 
+       if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+@@ -548,86 +567,110 @@ static int qat_alg_aead_init_sessions(struct 
crypto_aead *tfm, const u8 *key,
+       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+               goto error;
+ 
++      memzero_explicit(&keys, sizeof(keys));
+       return 0;
+ bad_key:
+       crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++      memzero_explicit(&keys, sizeof(keys));
+       return -EINVAL;
+ error:
++      memzero_explicit(&keys, sizeof(keys));
+       return -EFAULT;
+ }
+ 
+-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx 
*ctx,
+-                                          const uint8_t *key,
+-                                          unsigned int keylen,
+-                                          int mode)
++static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
++                                        const uint8_t *key,
++                                        unsigned int keylen,
++                                        int mode)
+ {
+       int alg;
+ 
+       if (qat_alg_validate_key(keylen, &alg, mode))
+               goto bad_key;
+ 
+-      qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
+-      qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
++      qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
++      qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
+       return 0;
+ bad_key:
+-      crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++      crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+ }
+ 
+-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
++static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
++                            unsigned int keylen)
++{
++      struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
++
++      memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
++      memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
++      memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
++      memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
++
++      return qat_alg_aead_init_sessions(tfm, key, keylen,
++                                        ICP_QAT_HW_CIPHER_CBC_MODE);
++}
++
++static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
+                              unsigned int keylen)
+ {
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
++      struct qat_crypto_instance *inst = NULL;
++      int node = get_current_node();
+       struct device *dev;
++      int ret;
+ 
+-      if (ctx->enc_cd) {
+-              /* rekeying */
+-              dev = &GET_DEV(ctx->inst->accel_dev);
+-              memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+-              memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+-              memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+-              memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+-      } else {
+-              /* new key */
+-              int node = get_current_node();
+-              struct qat_crypto_instance *inst =
+-                              qat_crypto_get_instance_node(node);
+-              if (!inst) {
+-                      return -EINVAL;
+-              }
+-
+-              dev = &GET_DEV(inst->accel_dev);
+-              ctx->inst = inst;
+-              ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+-                                               &ctx->enc_cd_paddr,
+-                                               GFP_ATOMIC);
+-              if (!ctx->enc_cd) {
+-                      return -ENOMEM;
+-              }
+-              ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+-                                               &ctx->dec_cd_paddr,
+-                                               GFP_ATOMIC);
+-              if (!ctx->dec_cd) {
+-                      goto out_free_enc;
+-              }
++      inst = qat_crypto_get_instance_node(node);
++      if (!inst)
++              return -EINVAL;
++      dev = &GET_DEV(inst->accel_dev);
++      ctx->inst = inst;
++      ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
++                                       &ctx->enc_cd_paddr,
++                                       GFP_ATOMIC);
++      if (!ctx->enc_cd) {
++              ret = -ENOMEM;
++              goto out_free_inst;
+       }
+-      if (qat_alg_aead_init_sessions(tfm, key, keylen,
+-                                     ICP_QAT_HW_CIPHER_CBC_MODE))
++      ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
++                                       &ctx->dec_cd_paddr,
++                                       GFP_ATOMIC);
++      if (!ctx->dec_cd) {
++              ret = -ENOMEM;
++              goto out_free_enc;
++      }
++
++      ret = qat_alg_aead_init_sessions(tfm, key, keylen,
++                                       ICP_QAT_HW_CIPHER_CBC_MODE);
++      if (ret)
+               goto out_free_all;
+ 
+       return 0;
+ 
+ out_free_all:
+-      memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
++      memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+ out_free_enc:
+-      memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
++      memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+-      return -ENOMEM;
++out_free_inst:
++      ctx->inst = NULL;
++      qat_crypto_put_instance(inst);
++      return ret;
++}
++
++static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
++                             unsigned int keylen)
++{
++      struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
++
++      if (ctx->enc_cd)
++              return qat_alg_aead_rekey(tfm, key, keylen);
++      else
++              return qat_alg_aead_newkey(tfm, key, keylen);
+ }
+ 
+ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+@@ -675,8 +718,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
+       dma_addr_t blp;
+       dma_addr_t bloutp = 0;
+       struct scatterlist *sg;
+-      size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
+-                      ((1 + n) * sizeof(struct qat_alg_buf));
++      size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ 
+       if (unlikely(!n))
+               return -EINVAL;
+@@ -688,7 +730,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
+ 
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+-              goto err;
++              goto err_in;
+ 
+       for_each_sg(sgl, sg, n, i) {
+               int y = sg_nctr;
+@@ -701,7 +743,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
+                                                     DMA_BIDIRECTIONAL);
+               bufl->bufers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+-                      goto err;
++                      goto err_in;
+               sg_nctr++;
+       }
+       bufl->num_bufs = sg_nctr;
+@@ -713,16 +755,15 @@ static int qat_alg_sgl_to_bufl(struct 
qat_crypto_instance *inst,
+               struct qat_alg_buf *bufers;
+ 
+               n = sg_nents(sglout);
+-              sz_out = sizeof(struct qat_alg_buf_list) +
+-                      ((1 + n) * sizeof(struct qat_alg_buf));
++              sz_out = struct_size(buflout, bufers, n + 1);
+               sg_nctr = 0;
+               buflout = kzalloc_node(sz_out, GFP_ATOMIC,
+                                      dev_to_node(&GET_DEV(inst->accel_dev)));
+               if (unlikely(!buflout))
+-                      goto err;
++                      goto err_in;
+               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+-                      goto err;
++                      goto err_out;
+               bufers = buflout->bufers;
+               for_each_sg(sglout, sg, n, i) {
+                       int y = sg_nctr;
+@@ -734,7 +775,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
+                                                       sg->length,
+                                                       DMA_BIDIRECTIONAL);
+                       if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+-                              goto err;
++                              goto err_out;
+                       bufers[y].len = sg->length;
+                       sg_nctr++;
+               }
+@@ -749,8 +790,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
+               qat_req->buf.sz_out = 0;
+       }
+       return 0;
+-err:
+-      dev_err(dev, "Failed to map buf for dma\n");
++
++err_out:
++      n = sg_nents(sglout);
++      for (i = 0; i < n; i++)
++              if (!dma_mapping_error(dev, buflout->bufers[i].addr))
++                      dma_unmap_single(dev, buflout->bufers[i].addr,
++                                       buflout->bufers[i].len,
++                                       DMA_BIDIRECTIONAL);
++      if (!dma_mapping_error(dev, bloutp))
++              dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
++      kfree(buflout);
++
++err_in:
++      n = sg_nents(sgl);
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+                       dma_unmap_single(dev, bufl->bufers[i].addr,
+@@ -760,17 +813,8 @@ err:
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+       kfree(bufl);
+-      if (sgl != sglout && buflout) {
+-              n = sg_nents(sglout);
+-              for (i = 0; i < n; i++)
+-                      if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+-                              dma_unmap_single(dev, buflout->bufers[i].addr,
+-                                               buflout->bufers[i].len,
+-                                               DMA_BIDIRECTIONAL);
+-              if (!dma_mapping_error(dev, bloutp))
+-                      dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+-              kfree(buflout);
+-      }
++
++      dev_err(dev, "Failed to map buf for dma\n");
+       return -ENOMEM;
+ }
+ 
+@@ -789,19 +833,25 @@ static void qat_aead_alg_callback(struct 
icp_qat_fw_la_resp *qat_resp,
+       areq->base.complete(&areq->base, res);
+ }
+ 
+-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+-                                      struct qat_crypto_request *qat_req)
++static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
++                                    struct qat_crypto_request *qat_req)
+ {
+-      struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
++      struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+-      struct ablkcipher_request *areq = qat_req->ablkcipher_req;
++      struct skcipher_request *sreq = qat_req->skcipher_req;
+       uint8_t stat_filed = qat_resp->comn_resp.comn_status;
++      struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+ 
+       qat_alg_free_bufl(inst, qat_req);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EINVAL;
+-      areq->base.complete(&areq->base, res);
++
++      memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
++      dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
++                        qat_req->iv_paddr);
++
++      sreq->base.complete(&sreq->base, res);
+ }
+ 
+ void qat_alg_callback(void *resp)
+@@ -823,7 +873,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int digst_size = crypto_aead_authsize(aead_tfm);
+-      int ret;
++      int ret, ctr = 0;
+ 
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+       if (unlikely(ret))
+@@ -844,13 +894,14 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+-
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+-              if (ret)
+-                      cond_resched();
+-      } while (ret == -EAGAIN);
++      } while (ret == -EAGAIN && ctr++ < 10);
+ 
++      if (ret == -EAGAIN) {
++              qat_alg_free_bufl(ctx->inst, qat_req);
++              return -EBUSY;
++      }
+       return -EINPROGRESS;
+ }
+ 
+@@ -864,7 +915,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       uint8_t *iv = areq->iv;
+-      int ret;
++      int ret, ctr = 0;
+ 
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+       if (unlikely(ret))
+@@ -890,159 +941,230 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ 
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+-              if (ret)
+-                      cond_resched();
+-      } while (ret == -EAGAIN);
++      } while (ret == -EAGAIN && ctr++ < 10);
+ 
++      if (ret == -EAGAIN) {
++              qat_alg_free_bufl(ctx->inst, qat_req);
++              return -EBUSY;
++      }
+       return -EINPROGRESS;
+ }
+ 
+-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+-                                   const u8 *key, unsigned int keylen,
+-                                   int mode)
++static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
++                                const u8 *key, unsigned int keylen,
++                                int mode)
+ {
+-      struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+-      struct device *dev;
++      memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
++      memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
++      memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
++      memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+ 
+-      if (ctx->enc_cd) {
+-              /* rekeying */
+-              dev = &GET_DEV(ctx->inst->accel_dev);
+-              memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+-              memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+-              memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+-              memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+-      } else {
+-              /* new key */
+-              int node = get_current_node();
+-              struct qat_crypto_instance *inst =
+-                              qat_crypto_get_instance_node(node);
+-              if (!inst)
+-                      return -EINVAL;
++      return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
++}
++
++static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
++                                 const u8 *key, unsigned int keylen,
++                                 int mode)
++{
++      struct qat_crypto_instance *inst = NULL;
++      struct device *dev;
++      int node = get_current_node();
++      int ret;
+ 
+-              dev = &GET_DEV(inst->accel_dev);
+-              ctx->inst = inst;
+-              ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+-                                               &ctx->enc_cd_paddr,
+-                                               GFP_ATOMIC);
+-              if (!ctx->enc_cd)
+-                      return -ENOMEM;
+-              ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+-                                               &ctx->dec_cd_paddr,
+-                                               GFP_ATOMIC);
+-              if (!ctx->dec_cd)
+-                      goto out_free_enc;
++      inst = qat_crypto_get_instance_node(node);
++      if (!inst)
++              return -EINVAL;
++      dev = &GET_DEV(inst->accel_dev);
++      ctx->inst = inst;
++      ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
++                                       &ctx->enc_cd_paddr,
++                                       GFP_ATOMIC);
++      if (!ctx->enc_cd) {
++              ret = -ENOMEM;
++              goto out_free_instance;
++      }
++      ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
++                                       &ctx->dec_cd_paddr,
++                                       GFP_ATOMIC);
++      if (!ctx->dec_cd) {
++              ret = -ENOMEM;
++              goto out_free_enc;
+       }
+-      if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
++
++      ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
++      if (ret)
+               goto out_free_all;
+ 
+       return 0;
+ 
+ out_free_all:
+-      memzero_explicit(ctx->dec_cd, sizeof(*ctx->dec_cd));
++      memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+ out_free_enc:
+-      memzero_explicit(ctx->enc_cd, sizeof(*ctx->enc_cd));
++      memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+-      return -ENOMEM;
++out_free_instance:
++      ctx->inst = NULL;
++      qat_crypto_put_instance(inst);
++      return ret;
+ }
+ 
+-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
+-                                       const u8 *key, unsigned int keylen)
++static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
++                                 const u8 *key, unsigned int keylen,
++                                 int mode)
+ {
+-      return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+-                                       ICP_QAT_HW_CIPHER_CBC_MODE);
++      struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->enc_cd)
++              return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
++      else
++              return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
++}
++
++static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
++                                     const u8 *key, unsigned int keylen)
++{
++      return qat_alg_skcipher_setkey(tfm, key, keylen,
++                                     ICP_QAT_HW_CIPHER_CBC_MODE);
+ }
+ 
+-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
+-                                       const u8 *key, unsigned int keylen)
++static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
++                                     const u8 *key, unsigned int keylen)
+ {
+-      return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+-                                       ICP_QAT_HW_CIPHER_CTR_MODE);
++      return qat_alg_skcipher_setkey(tfm, key, keylen,
++                                     ICP_QAT_HW_CIPHER_CTR_MODE);
+ }
+ 
+-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
+-                                       const u8 *key, unsigned int keylen)
++static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
++                                     const u8 *key, unsigned int keylen)
+ {
+-      return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+-                                       ICP_QAT_HW_CIPHER_XTS_MODE);
++      return qat_alg_skcipher_setkey(tfm, key, keylen,
++                                     ICP_QAT_HW_CIPHER_XTS_MODE);
+ }
+ 
+-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
++static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+ {
+-      struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+-      struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+-      struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+-      struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
++      struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
++      struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
++      struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+-      int ret;
++      struct device *dev = &GET_DEV(ctx->inst->accel_dev);
++      int ret, ctr = 0;
++
++      if (req->cryptlen == 0)
++              return 0;
++
++      qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
++                                       &qat_req->iv_paddr, GFP_ATOMIC);
++      if (!qat_req->iv)
++              return -ENOMEM;
+ 
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+-      if (unlikely(ret))
++      if (unlikely(ret)) {
++              dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
++                                qat_req->iv_paddr);
+               return ret;
++      }
+ 
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req;
+-      qat_req->ablkcipher_ctx = ctx;
+-      qat_req->ablkcipher_req = req;
+-      qat_req->cb = qat_ablkcipher_alg_callback;
++      qat_req->skcipher_ctx = ctx;
++      qat_req->skcipher_req = req;
++      qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+-      cipher_param->cipher_length = req->nbytes;
++      cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+-      memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+-
++      cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
++      memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+-              if (ret)
+-                      cond_resched();
+-      } while (ret == -EAGAIN);
++      } while (ret == -EAGAIN && ctr++ < 10);
+ 
++      if (ret == -EAGAIN) {
++              qat_alg_free_bufl(ctx->inst, qat_req);
++              dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
++                                qat_req->iv_paddr);
++              return -EBUSY;
++      }
+       return -EINPROGRESS;
+ }
+ 
+-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
++static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
++{
++      if (req->cryptlen % AES_BLOCK_SIZE != 0)
++              return -EINVAL;
++
++      return qat_alg_skcipher_encrypt(req);
++}
++
++static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ {
+-      struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+-      struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+-      struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+-      struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
++      struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
++      struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
++      struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+-      int ret;
++      struct device *dev = &GET_DEV(ctx->inst->accel_dev);
++      int ret, ctr = 0;
++
++      if (req->cryptlen == 0)
++              return 0;
++
++      qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
++                                       &qat_req->iv_paddr, GFP_ATOMIC);
++      if (!qat_req->iv)
++              return -ENOMEM;
+ 
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+-      if (unlikely(ret))
++      if (unlikely(ret)) {
++              dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
++                                qat_req->iv_paddr);
+               return ret;
++      }
+ 
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req;
+-      qat_req->ablkcipher_ctx = ctx;
+-      qat_req->ablkcipher_req = req;
+-      qat_req->cb = qat_ablkcipher_alg_callback;
++      qat_req->skcipher_ctx = ctx;
++      qat_req->skcipher_req = req;
++      qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+-      cipher_param->cipher_length = req->nbytes;
++      cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+-      memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+-
++      cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
++      memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+-              if (ret)
+-                      cond_resched();
+-      } while (ret == -EAGAIN);
++      } while (ret == -EAGAIN && ctr++ < 10);
+ 
++      if (ret == -EAGAIN) {
++              qat_alg_free_bufl(ctx->inst, qat_req);
++              dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
++                                qat_req->iv_paddr);
++              return -EBUSY;
++      }
+       return -EINPROGRESS;
+ }
+ 
++static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
++{
++      if (req->cryptlen % AES_BLOCK_SIZE != 0)
++              return -EINVAL;
++
++      return qat_alg_skcipher_decrypt(req);
++}
+ static int qat_alg_aead_init(struct crypto_aead *tfm,
+                            enum icp_qat_hw_auth_algo hash,
+                            const char *hash_name)
+@@ -1085,30 +1207,30 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
+ 
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+-              memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
++              memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+-              memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
++              memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       }
+       qat_crypto_put_instance(inst);
+ }
+ 
+-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
++static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
+ {
+-      struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
++      struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ 
+-      tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
++      crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+       ctx->tfm = tfm;
+       return 0;
+ }
+ 
+-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
++static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+ {
+-      struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
++      struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+ 
+@@ -1117,15 +1239,15 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm 
*tfm)
+ 
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+-              memzero_explicit(ctx->enc_cd,
+-                               sizeof(struct icp_qat_hw_cipher_algo_blk));
++              memset(ctx->enc_cd, 0,
++                     sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+-              memzero_explicit(ctx->dec_cd,
+-                               sizeof(struct icp_qat_hw_cipher_algo_blk));
++              memset(ctx->dec_cd, 0,
++                     sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+@@ -1187,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { {
+       .maxauthsize = SHA512_DIGEST_SIZE,
+ } };
+ 
+-static struct crypto_alg qat_algs[] = { {
+-      .cra_name = "cbc(aes)",
+-      .cra_driver_name = "qat_aes_cbc",
+-      .cra_priority = 4001,
+-      .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+-      .cra_blocksize = AES_BLOCK_SIZE,
+-      .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+-      .cra_alignmask = 0,
+-      .cra_type = &crypto_ablkcipher_type,
+-      .cra_module = THIS_MODULE,
+-      .cra_init = qat_alg_ablkcipher_init,
+-      .cra_exit = qat_alg_ablkcipher_exit,
+-      .cra_u = {
+-              .ablkcipher = {
+-                      .setkey = qat_alg_ablkcipher_cbc_setkey,
+-                      .decrypt = qat_alg_ablkcipher_decrypt,
+-                      .encrypt = qat_alg_ablkcipher_encrypt,
+-                      .min_keysize = AES_MIN_KEY_SIZE,
+-                      .max_keysize = AES_MAX_KEY_SIZE,
+-                      .ivsize = AES_BLOCK_SIZE,
+-              },
+-      },
++static struct skcipher_alg qat_skciphers[] = { {
++      .base.cra_name = "cbc(aes)",
++      .base.cra_driver_name = "qat_aes_cbc",
++      .base.cra_priority = 4001,
++      .base.cra_flags = CRYPTO_ALG_ASYNC,
++      .base.cra_blocksize = AES_BLOCK_SIZE,
++      .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
++      .base.cra_alignmask = 0,
++      .base.cra_module = THIS_MODULE,
++
++      .init = qat_alg_skcipher_init_tfm,
++      .exit = qat_alg_skcipher_exit_tfm,
++      .setkey = qat_alg_skcipher_cbc_setkey,
++      .decrypt = qat_alg_skcipher_blk_decrypt,
++      .encrypt = qat_alg_skcipher_blk_encrypt,
++      .min_keysize = AES_MIN_KEY_SIZE,
++      .max_keysize = AES_MAX_KEY_SIZE,
++      .ivsize = AES_BLOCK_SIZE,
+ }, {
+-      .cra_name = "ctr(aes)",
+-      .cra_driver_name = "qat_aes_ctr",
+-      .cra_priority = 4001,
+-      .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+-      .cra_blocksize = AES_BLOCK_SIZE,
+-      .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+-      .cra_alignmask = 0,
+-      .cra_type = &crypto_ablkcipher_type,
+-      .cra_module = THIS_MODULE,
+-      .cra_init = qat_alg_ablkcipher_init,
+-      .cra_exit = qat_alg_ablkcipher_exit,
+-      .cra_u = {
+-              .ablkcipher = {
+-                      .setkey = qat_alg_ablkcipher_ctr_setkey,
+-                      .decrypt = qat_alg_ablkcipher_decrypt,
+-                      .encrypt = qat_alg_ablkcipher_encrypt,
+-                      .min_keysize = AES_MIN_KEY_SIZE,
+-                      .max_keysize = AES_MAX_KEY_SIZE,
+-                      .ivsize = AES_BLOCK_SIZE,
+-              },
+-      },
++      .base.cra_name = "ctr(aes)",
++      .base.cra_driver_name = "qat_aes_ctr",
++      .base.cra_priority = 4001,
++      .base.cra_flags = CRYPTO_ALG_ASYNC,
++      .base.cra_blocksize = 1,
++      .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
++      .base.cra_alignmask = 0,
++      .base.cra_module = THIS_MODULE,
++
++      .init = qat_alg_skcipher_init_tfm,
++      .exit = qat_alg_skcipher_exit_tfm,
++      .setkey = qat_alg_skcipher_ctr_setkey,
++      .decrypt = qat_alg_skcipher_decrypt,
++      .encrypt = qat_alg_skcipher_encrypt,
++      .min_keysize = AES_MIN_KEY_SIZE,
++      .max_keysize = AES_MAX_KEY_SIZE,
++      .ivsize = AES_BLOCK_SIZE,
+ }, {
+-      .cra_name = "xts(aes)",
+-      .cra_driver_name = "qat_aes_xts",
+-      .cra_priority = 4001,
+-      .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+-      .cra_blocksize = AES_BLOCK_SIZE,
+-      .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+-      .cra_alignmask = 0,
+-      .cra_type = &crypto_ablkcipher_type,
+-      .cra_module = THIS_MODULE,
+-      .cra_init = qat_alg_ablkcipher_init,
+-      .cra_exit = qat_alg_ablkcipher_exit,
+-      .cra_u = {
+-              .ablkcipher = {
+-                      .setkey = qat_alg_ablkcipher_xts_setkey,
+-                      .decrypt = qat_alg_ablkcipher_decrypt,
+-                      .encrypt = qat_alg_ablkcipher_encrypt,
+-                      .min_keysize = 2 * AES_MIN_KEY_SIZE,
+-                      .max_keysize = 2 * AES_MAX_KEY_SIZE,
+-                      .ivsize = AES_BLOCK_SIZE,
+-              },
+-      },
++      .base.cra_name = "xts(aes)",
++      .base.cra_driver_name = "qat_aes_xts",
++      .base.cra_priority = 4001,
++      .base.cra_flags = CRYPTO_ALG_ASYNC,
++      .base.cra_blocksize = AES_BLOCK_SIZE,
++      .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
++      .base.cra_alignmask = 0,
++      .base.cra_module = THIS_MODULE,
++
++      .init = qat_alg_skcipher_init_tfm,
++      .exit = qat_alg_skcipher_exit_tfm,
++      .setkey = qat_alg_skcipher_xts_setkey,
++      .decrypt = qat_alg_skcipher_blk_decrypt,
++      .encrypt = qat_alg_skcipher_blk_encrypt,
++      .min_keysize = 2 * AES_MIN_KEY_SIZE,
++      .max_keysize = 2 * AES_MAX_KEY_SIZE,
++      .ivsize = AES_BLOCK_SIZE,
+ } };
+ 
+ int qat_algs_register(void)
+ {
+-      int ret = 0, i;
++      int ret = 0;
+ 
+       mutex_lock(&algs_lock);
+       if (++active_devs != 1)
+               goto unlock;
+ 
+-      for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+-              qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 
CRYPTO_ALG_ASYNC;
+-
+-      ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
++      ret = crypto_register_skciphers(qat_skciphers,
++                                      ARRAY_SIZE(qat_skciphers));
+       if (ret)
+               goto unlock;
+ 
+-      for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+-              qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+-
+       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       if (ret)
+               goto unreg_algs;
+@@ -1282,7 +1387,7 @@ unlock:
+       return ret;
+ 
+ unreg_algs:
+-      crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
++      crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+       goto unlock;
+ }
+ 
+@@ -1293,9 +1398,8 @@ void qat_algs_unregister(void)
+               goto unlock;
+ 
+       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+-      crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
++      crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+ 
+ unlock:
+       mutex_unlock(&algs_lock);
+ }
+-#endif
+diff --git a/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h 
b/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h
+index dc0273f..300bb91 100644
+--- a/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -79,15 +79,17 @@ struct qat_crypto_request {
+       struct icp_qat_fw_la_bulk_req req;
+       union {
+               struct qat_alg_aead_ctx *aead_ctx;
+-              struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
++              struct qat_alg_skcipher_ctx *skcipher_ctx;
+       };
+       union {
+               struct aead_request *aead_req;
+-              struct ablkcipher_request *ablkcipher_req;
++              struct skcipher_request *skcipher_req;
+       };
+       struct qat_crypto_request_buffs buf;
+       void (*cb)(struct icp_qat_fw_la_resp *resp,
+                  struct qat_crypto_request *req);
++      void *iv;
++      dma_addr_t iv_paddr;
+ };
+ 
+ #endif
+-- 
+2.24.1
+
diff --git a/recipes-extended/qat/qat17_4.2.0-00012.bb 
b/recipes-extended/qat/qat17_4.7.0-00006.bb
similarity index 73%
rename from recipes-extended/qat/qat17_4.2.0-00012.bb
rename to recipes-extended/qat/qat17_4.7.0-00006.bb
index a5902dc..693a2f8 100644
--- a/recipes-extended/qat/qat17_4.2.0-00012.bb
+++ b/recipes-extended/qat/qat17_4.7.0-00006.bb
@@ -12,17 +12,24 @@ PROVIDES += "virtual/qat"
 
 TARGET_CC_ARCH += "${LDFLAGS}"
 
-SRC_URI="https://01.org/sites/default/files/downloads/intelr-quickassist-technology/qat1.7.l.${PV}.tar.gz;subdir=qat17
 \
-         file://qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch \
-         
file://qat16_2.6.0-65-qat-override-CC-LD-AR-only-when-it-is-not-define.patch \
-         file://qat17_0.6.0-1-qat-update-KDIR-for-cross-compilation.patch \
-         file://qat17_0.8.0-37-qat-added-include-dir-path.patch \
-         file://qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch \
-         file://qat17_4.1.0-00022-qat-use-static-lib-for-linking.patch \
-         "
-
-SRC_URI[md5sum] = "2fe81587e8b85747d5461b031241beb2"
-SRC_URI[sha256sum] = 
"47990b3283ded748799dba42d4b0e1bdc0be3cf3978bd587533cd12788b03856"
+SRC_URI = 
"https://01.org/sites/default/files/downloads/qat1.7.l.4.7.0-00006.tar.gz;subdir=qat17
 \
+           file://qat16_2.3.0-34-qat-remove-local-path-from-makefile.patch \
+           
file://qat16_2.6.0-65-qat-override-CC-LD-AR-only-when-it-is-not-define.patch \
+           file://qat17_0.6.0-1-qat-update-KDIR-for-cross-compilation.patch \
+           file://qat17_0.8.0-37-qat-added-include-dir-path.patch \
+           file://qat17_0.9.0-4-qat-add-install-target-and-add-folder.patch \
+           file://qat17_4.1.0-00022-qat-use-static-lib-for-linking.patch \
+           file://qat17_4.7.0-00006-Link-driver-with-object-files.patch \
+           file://qat17_4.7.0-00006-Drop-pr_warning-definition.patch \
+          "
+
+python __anonymous () {
+    if d.getVar("KERNEL_VERSION") >= "5.5%":
+        d.appendVar('SRC_URI', 
"file://qat17_4.7.0-00006-Switch-to-skcipher-API.patch")
+}
+
+SRC_URI[md5sum] = "ac939b51cc8836c182e31e309c065002"
+SRC_URI[sha256sum] = 
"5c8bdc35fd7a42f212f1f87eb9e3d8584df7af56dae366debc487981e531fa5c"
 
 COMPATIBLE_MACHINE = "null"
 COMPATIBLE_HOST_x86-x32 = 'null'
@@ -65,13 +72,13 @@ do_compile () {
   export LD="${LD} --hash-style=gnu"
   export MACHINE="${TARGET_ARCH}"
 
-  cd ${S}/quickassist
-  oe_runmake
-
   cd ${S}/quickassist/qat
-  oe_runmake 'clean'
+  oe_runmake
   oe_runmake 'modules_install'
 
+  cd ${S}/quickassist
+  oe_runmake
+
   cd ${S}/quickassist/utilities/adf_ctl
   oe_runmake
 
@@ -109,27 +116,22 @@ do_install() {
   echo 'KERNEL=="uio*" MODE="0660" GROUP="qat"' >> 
${D}/etc/udev/rules.d/00-qat.rules
   echo 'KERNEL=="hugepages" MODE="0660" GROUP="qat"' >> 
${D}/etc/udev/rules.d/00-qat.rules
 
+  mkdir -p ${D}${base_libdir}
+
   install -D -m 0755 
${S}/quickassist/lookaside/access_layer/src/build/linux_2.6/user_space/libqat_s.so
 ${D}${base_libdir}
   install -D -m 0755 
${S}/quickassist/lookaside/access_layer/src/build/linux_2.6/user_space/libqat.a 
${D}${base_libdir}
   install -D -m 0755 
${S}/quickassist/utilities/osal/src/build/linux_2.6/user_space/libosal_s.so 
${D}${base_libdir}
   install -D -m 0755 
${S}/quickassist/utilities/osal/src/build/linux_2.6/user_space/libosal.a 
${D}${base_libdir}
-  install -D -m 0755 
${S}/quickassist/lookaside/access_layer/src/qat_direct/src/build/linux_2.6/user_space/libadf.a
 ${D}${base_libdir}
+  install -D -m 0755 
${S}/quickassist/lookaside/access_layer/src/qat_direct/src/build/linux_2.6/user_space/libadf_user.a
 ${D}${base_libdir}/libadf.a
   install -D -m 0755 ${S}/quickassist/utilities/libusdm_drv/libusdm_drv_s.so 
${D}${base_libdir}
   install -D -m 0755 ${S}/quickassist/utilities/libusdm_drv/libusdm_drv.a 
${D}${base_libdir}
   install -D -m 0750 ${S}/quickassist/utilities/adf_ctl/adf_ctl ${D}${sbindir}
 
-  install -D -m 640 
${S}/quickassist/utilities/adf_ctl/conf_files/c3xxx_dev0.conf  ${D}${sysconfdir}
   install -D -m 640 ${S}/quickassist/utilities/adf_ctl/conf_files/*.conf  
${D}${sysconfdir}/conf_files
   install -D -m 640 ${S}/quickassist/utilities/adf_ctl/conf_files/*.conf.vm  
${D}${sysconfdir}/conf_files
 
-  install -m 0755 ${S}/quickassist/qat/fw/qat_c3xxx.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_c3xxx_mmp.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_c62x.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_c62x_mmp.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_895xcc.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_895xcc_mmp.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_d15xx.bin  
${D}${base_libdir}/firmware
-  install -m 0755 ${S}/quickassist/qat/fw/qat_d15xx_mmp.bin  
${D}${base_libdir}/firmware
+  install -m 0755 ${S}/quickassist/qat/fw/qat_d15xx.bin  
${D}${nonarch_base_libdir}/firmware
+  install -m 0755 ${S}/quickassist/qat/fw/qat_d15xx_mmp.bin  
${D}${nonarch_base_libdir}/firmware
 
   install -m 640 ${S}/quickassist/include/*.h  ${D}${includedir}
   install -m 640 ${S}/quickassist/include/dc/*.h  ${D}${includedir}/dc/
@@ -137,25 +139,27 @@ do_install() {
   install -m 640 ${S}/quickassist/lookaside/access_layer/include/*.h  
${D}${includedir}
   install -m 640 ${S}/quickassist/utilities/libusdm_drv/*.h  ${D}${includedir}
 
-  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary
  ${D}${base_libdir}/firmware
-  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary32
  ${D}${base_libdir}/firmware
-  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/canterbury
  ${D}${base_libdir}/firmware
+  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary
  ${D}${nonarch_base_libdir}/firmware
+  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary32
  ${D}${nonarch_base_libdir}/firmware
+  install -m 0755 
${S}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/canterbury
  ${D}${nonarch_base_libdir}/firmware
 
   #install qat source
   cp ${DL_DIR}/qat1.7.l.${PV}.tar.gz ${D}${prefix}/src/qat/
 }
 
-PACKAGES += "${PN}-app ${PN}-src"
+PACKAGES += "${PN}-app"
 
 FILES_${PN}-dev = "${includedir}/ \
+                   ${nonarch_base_libdir}/*.a \
                    "
 
 FILES_${PN} += "\
                 ${libdir}/ \
-                ${base_libdir}/firmware \
+                ${nonarch_base_libdir}/firmware \
                 ${sysconfdir}/ \
                 ${sbindir}/ \
                 ${base_libdir}/*.so \
+                ${prefix}/src/qat \
                 "
 
 FILES_${PN}-dbg += "${sysconfdir}/init.d/.debug/ \
@@ -164,6 +168,3 @@ FILES_${PN}-dbg += "${sysconfdir}/init.d/.debug/ \
 FILES_${PN}-app += "${bindir}/* \
                     ${prefix}/qat \
                     "
-
-FILES_${PN}-src += "${prefix}/src/* \
-                   "
-- 
2.14.4

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#6332): 
https://lists.yoctoproject.org/g/meta-intel/message/6332
Mute This Topic: https://lists.yoctoproject.org/mt/69739825/21656
Group Owner: [email protected]
Unsubscribe: https://lists.yoctoproject.org/g/meta-intel/unsub  
[[email protected]]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to