commit:     70a60381716c31741c8126bf7c4589a781c3d23b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 16 15:46:12 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 16 15:46:12 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=70a60381

Linux patch 4.17.2

 0000_README             |    4 +
 1001_linux-4.17.2.patch | 2863 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2867 insertions(+)

diff --git a/0000_README b/0000_README
index de4fd96..df97765 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.17.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.17.1
 
+Patch:  1001_linux-4.17.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.17.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.17.2.patch b/1001_linux-4.17.2.patch
new file mode 100644
index 0000000..a2da995
--- /dev/null
+++ b/1001_linux-4.17.2.patch
@@ -0,0 +1,2863 @@
+diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus 
b/Documentation/ABI/stable/sysfs-bus-vmbus
+index 0c9d9dcd2151..3eaffbb2d468 100644
+--- a/Documentation/ABI/stable/sysfs-bus-vmbus
++++ b/Documentation/ABI/stable/sysfs-bus-vmbus
+@@ -1,25 +1,25 @@
+-What:         /sys/bus/vmbus/devices/vmbus_*/id
++What:         /sys/bus/vmbus/devices/<UUID>/id
+ Date:         Jul 2009
+ KernelVersion:        2.6.31
+ Contact:      K. Y. Srinivasan <[email protected]>
+ Description:  The VMBus child_relid of the device's primary channel
+ Users:                tools/hv/lsvmbus
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/class_id
++What:         /sys/bus/vmbus/devices/<UUID>/class_id
+ Date:         Jul 2009
+ KernelVersion:        2.6.31
+ Contact:      K. Y. Srinivasan <[email protected]>
+ Description:  The VMBus interface type GUID of the device
+ Users:                tools/hv/lsvmbus
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/device_id
++What:         /sys/bus/vmbus/devices/<UUID>/device_id
+ Date:         Jul 2009
+ KernelVersion:        2.6.31
+ Contact:      K. Y. Srinivasan <[email protected]>
+ Description:  The VMBus interface instance GUID of the device
+ Users:                tools/hv/lsvmbus
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping
++What:         /sys/bus/vmbus/devices/<UUID>/channel_vp_mapping
+ Date:         Jul 2015
+ KernelVersion:        4.2.0
+ Contact:      K. Y. Srinivasan <[email protected]>
+@@ -28,112 +28,112 @@ Description:     The mapping of which primary/sub 
channels are bound to which
+               Format: <channel's child_relid:the bound cpu's number>
+ Users:                tools/hv/lsvmbus
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/device
++What:         /sys/bus/vmbus/devices/<UUID>/device
+ Date:         Dec. 2015
+ KernelVersion:        4.5
+ Contact:      K. Y. Srinivasan <[email protected]>
+ Description:  The 16 bit device ID of the device
+ Users:                tools/hv/lsvmbus and user level RDMA libraries
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/vendor
++What:         /sys/bus/vmbus/devices/<UUID>/vendor
+ Date:         Dec. 2015
+ KernelVersion:        4.5
+ Contact:      K. Y. Srinivasan <[email protected]>
+ Description:  The 16 bit vendor ID of the device
+ Users:                tools/hv/lsvmbus and user level RDMA libraries
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Directory for per-channel information
+               NN is the VMBUS relid associtated with the channel.
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/cpu
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/cpu
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  VCPU (sub)channel is affinitized to
+ Users:                tools/hv/lsvmbus and other debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/cpu
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/cpu
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  VCPU (sub)channel is affinitized to
+ Users:                tools/hv/lsvmbus and other debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/in_mask
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/in_mask
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Host to guest channel interrupt mask
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/latency
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/latency
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Channel signaling latency
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/out_mask
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_mask
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Guest to host channel interrupt mask
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/pending
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/pending
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Channel interrupt pending state
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/read_avail
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/read_avail
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Bytes available to read
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/write_avail
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/write_avail
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Bytes available to write
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/events
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/events
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Number of times we have signaled the host
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/interrupts
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/interrupts
+ Date:         September. 2017
+ KernelVersion:        4.14
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Number of times we have taken an interrupt (incoming)
+ Users:                Debugging tools
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/subchannel_id
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/subchannel_id
+ Date:         January. 2018
+ KernelVersion:        4.16
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Subchannel ID associated with VMBUS channel
+ Users:                Debugging tools and userspace drivers
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/monitor_id
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/monitor_id
+ Date:         January. 2018
+ KernelVersion:        4.16
+ Contact:      Stephen Hemminger <[email protected]>
+ Description:  Monitor bit associated with channel
+ Users:                Debugging tools and userspace drivers
+ 
+-What:         /sys/bus/vmbus/devices/vmbus_*/channels/NN/ring
++What:         /sys/bus/vmbus/devices/<UUID>/channels/<N>/ring
+ Date:         January. 2018
+ KernelVersion:        4.16
+ Contact:      Stephen Hemminger <[email protected]>
+diff --git a/Makefile b/Makefile
+index e551c9af6a06..f43cd522b175 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 17
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index ecf613761e78..fe005df02ed3 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -320,6 +320,7 @@ CONFIG_PINCTRL_MAX77620=y
+ CONFIG_PINCTRL_MSM8916=y
+ CONFIG_PINCTRL_MSM8994=y
+ CONFIG_PINCTRL_MSM8996=y
++CONFIG_PINCTRL_MT7622=y
+ CONFIG_PINCTRL_QDF2XXX=y
+ CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+ CONFIG_GPIO_DWAPB=y
+diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
+index b24b1c8b3979..0f82cd91cd3c 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -107,11 +107,12 @@ struct x86_emulate_ops {
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
++       *  @system:[IN ] Whether the access is forced to be at CPL0.
+        */
+       int (*read_std)(struct x86_emulate_ctxt *ctxt,
+                       unsigned long addr, void *val,
+                       unsigned int bytes,
+-                      struct x86_exception *fault);
++                      struct x86_exception *fault, bool system);
+ 
+       /*
+        * read_phys: Read bytes of standard (non-emulated/special) memory.
+@@ -129,10 +130,11 @@ struct x86_emulate_ops {
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to write to memory.
++       *  @system:[IN ] Whether the access is forced to be at CPL0.
+        */
+       int (*write_std)(struct x86_emulate_ctxt *ctxt,
+                        unsigned long addr, void *val, unsigned int bytes,
+-                       struct x86_exception *fault);
++                       struct x86_exception *fault, bool system);
+       /*
+        * fetch: Read bytes of standard (non-emulated/special) memory.
+        *        Used for instruction fetch.
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index b3705ae52824..4c4f4263420c 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -812,6 +812,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, 
int rel)
+       return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+ 
++static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
++                            void *data, unsigned size)
++{
++      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
true);
++}
++
++static int linear_write_system(struct x86_emulate_ctxt *ctxt,
++                             ulong linear, void *data,
++                             unsigned int size)
++{
++      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
true);
++}
++
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+                             struct segmented_address addr,
+                             void *data,
+@@ -823,7 +836,7 @@ static int segmented_read_std(struct x86_emulate_ctxt 
*ctxt,
+       rc = linearize(ctxt, addr, size, false, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+-      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
false);
+ }
+ 
+ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+@@ -837,7 +850,7 @@ static int segmented_write_std(struct x86_emulate_ctxt 
*ctxt,
+       rc = linearize(ctxt, addr, size, true, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+-      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
false);
+ }
+ 
+ /*
+@@ -1496,8 +1509,7 @@ static int read_interrupt_descriptor(struct 
x86_emulate_ctxt *ctxt,
+               return emulate_gp(ctxt, index << 3 | 0x2);
+ 
+       addr = dt.address + index * 8;
+-      return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+-                                 &ctxt->exception);
++      return linear_read_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+@@ -1560,8 +1572,7 @@ static int read_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
+-                                 &ctxt->exception);
++      return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
+ }
+ 
+ /* allowed just for 8 bytes segments */
+@@ -1575,8 +1586,7 @@ static int write_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+-                                  &ctxt->exception);
++      return linear_write_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+@@ -1737,8 +1747,7 @@ static int __load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+                               return ret;
+               }
+       } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+-              ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+-                              sizeof(base3), &ctxt->exception);
++              ret = linear_read_system(ctxt, desc_addr+8, &base3, 
sizeof(base3));
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+               if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
+@@ -2051,11 +2060,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt 
*ctxt, int irq)
+       eip_addr = dt.address + (irq << 2);
+       cs_addr = dt.address + (irq << 2) + 2;
+ 
+-      rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
++      rc = linear_read_system(ctxt, cs_addr, &cs, 2);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
++      rc = linear_read_system(ctxt, eip_addr, &eip, 2);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+@@ -2919,12 +2928,12 @@ static bool emulator_io_port_access_allowed(struct 
x86_emulate_ctxt *ctxt,
+ #ifdef CONFIG_X86_64
+       base |= ((u64)base3) << 32;
+ #endif
+-      r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
++      r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
+       if (r != X86EMUL_CONTINUE)
+               return false;
+       if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
+               return false;
+-      r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
++      r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, 
true);
+       if (r != X86EMUL_CONTINUE)
+               return false;
+       if ((perm >> bit_idx) & mask)
+@@ -3053,35 +3062,30 @@ static int task_switch_16(struct x86_emulate_ctxt 
*ctxt,
+                         u16 tss_selector, u16 old_tss_sel,
+                         ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+       struct tss_segment_16 tss_seg;
+       int ret;
+       u32 new_tss_base = get_desc_base(new_desc);
+ 
+-      ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       save_state_to_tss16(ctxt, &tss_seg);
+ 
+-      ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                           &ctxt->exception);
++      ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+-      ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       if (old_tss_sel != 0xffff) {
+               tss_seg.prev_task_link = old_tss_sel;
+ 
+-              ret = ops->write_std(ctxt, new_tss_base,
+-                                   &tss_seg.prev_task_link,
+-                                   sizeof tss_seg.prev_task_link,
+-                                   &ctxt->exception);
++              ret = linear_write_system(ctxt, new_tss_base,
++                                        &tss_seg.prev_task_link,
++                                        sizeof tss_seg.prev_task_link);
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+       }
+@@ -3197,38 +3201,34 @@ static int task_switch_32(struct x86_emulate_ctxt 
*ctxt,
+                         u16 tss_selector, u16 old_tss_sel,
+                         ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+       struct tss_segment_32 tss_seg;
+       int ret;
+       u32 new_tss_base = get_desc_base(new_desc);
+       u32 eip_offset = offsetof(struct tss_segment_32, eip);
+       u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
+ 
+-      ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       save_state_to_tss32(ctxt, &tss_seg);
+ 
+       /* Only GP registers and segment selectors are saved */
+-      ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+-                           ldt_sel_offset - eip_offset, &ctxt->exception);
++      ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
++                                ldt_sel_offset - eip_offset);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+-      ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       if (old_tss_sel != 0xffff) {
+               tss_seg.prev_task_link = old_tss_sel;
+ 
+-              ret = ops->write_std(ctxt, new_tss_base,
+-                                   &tss_seg.prev_task_link,
+-                                   sizeof tss_seg.prev_task_link,
+-                                   &ctxt->exception);
++              ret = linear_write_system(ctxt, new_tss_base,
++                                        &tss_seg.prev_task_link,
++                                        sizeof tss_seg.prev_task_link);
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+       }
+@@ -4189,7 +4189,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+                               maxphyaddr = eax & 0xff;
+                       else
+                               maxphyaddr = 36;
+-                      rsvd = rsvd_bits(maxphyaddr, 62);
++                      rsvd = rsvd_bits(maxphyaddr, 63);
++                      if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
++                              rsvd &= ~CR3_PCID_INVD;
+               }
+ 
+               if (new_val & rsvd)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 40aa29204baf..82f5e915e568 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7588,8 +7588,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, 
gpa_t *vmpointer)
+                       vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+               return 1;
+ 
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+-                              sizeof(*vmpointer), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -7670,6 +7669,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+               return 1;
+       }
+ 
++      /* CPL=0 must be checked manually. */
++      if (vmx_get_cpl(vcpu)) {
++              kvm_queue_exception(vcpu, UD_VECTOR);
++              return 1;
++      }
++
+       if (vmx->nested.vmxon) {
+               nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
+               return kvm_skip_emulated_instruction(vcpu);
+@@ -7729,6 +7734,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+  */
+ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+ {
++      if (vmx_get_cpl(vcpu)) {
++              kvm_queue_exception(vcpu, UD_VECTOR);
++              return 0;
++      }
++
+       if (!to_vmx(vcpu)->nested.vmxon) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+@@ -8029,9 +8039,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+               if (get_vmx_mem_address(vcpu, exit_qualification,
+                               vmx_instruction_info, true, &gva))
+                       return 1;
+-              /* _system ok, as hardware has verified cpl=0 */
+-              kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+-                           &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
++              /* _system ok, nested_vmx_check_permission has verified cpl=0 */
++              kvm_write_guest_virt_system(vcpu, gva, &field_value,
++                                          (is_long_mode(vcpu) ? 8 : 4), NULL);
+       }
+ 
+       nested_vmx_succeed(vcpu);
+@@ -8069,8 +8079,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+               if (get_vmx_mem_address(vcpu, exit_qualification,
+                               vmx_instruction_info, false, &gva))
+                       return 1;
+-              if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+-                         &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++              if (kvm_read_guest_virt(vcpu, gva, &field_value,
++                                      (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+                       kvm_inject_page_fault(vcpu, &e);
+                       return 1;
+               }
+@@ -8189,10 +8199,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, exit_qualification,
+                       vmx_instruction_info, true, &vmcs_gva))
+               return 1;
+-      /* ok to use *_system, as hardware has verified cpl=0 */
+-      if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+-                               (void *)&to_vmx(vcpu)->nested.current_vmptr,
+-                               sizeof(u64), &e)) {
++      /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
++      if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
++                                      (void 
*)&to_vmx(vcpu)->nested.current_vmptr,
++                                      sizeof(u64), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -8239,8 +8249,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmx_instruction_info, false, &gva))
+               return 1;
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+-                              sizeof(operand), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -8304,8 +8313,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmx_instruction_info, false, &gva))
+               return 1;
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+-                              sizeof(operand), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 71e7cda6d014..fbc4d17e3ecc 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -856,7 +856,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+       }
+ 
+       if (is_long_mode(vcpu) &&
+-          (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
++          (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
+               return 1;
+       else if (is_pae(vcpu) && is_paging(vcpu) &&
+                  !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
+@@ -2894,7 +2894,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long 
ext)
+               r = KVM_CLOCK_TSC_STABLE;
+               break;
+       case KVM_CAP_X86_DISABLE_EXITS:
+-              r |=  KVM_X86_DISABLE_EXITS_HTL | KVM_X86_DISABLE_EXITS_PAUSE;
++              r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
+               if(kvm_can_mwait_in_guest())
+                       r |= KVM_X86_DISABLE_EXITS_MWAIT;
+               break;
+@@ -4248,7 +4248,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+               if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
+                       kvm_can_mwait_in_guest())
+                       kvm->arch.mwait_in_guest = true;
+-              if (cap->args[0] & KVM_X86_DISABLE_EXITS_HTL)
++              if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
+                       kvm->arch.hlt_in_guest = true;
+               if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
+                       kvm->arch.pause_in_guest = true;
+@@ -4787,11 +4787,10 @@ static int kvm_fetch_guest_virt(struct 
x86_emulate_ctxt *ctxt,
+       return X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+                              gva_t addr, void *val, unsigned int bytes,
+                              struct x86_exception *exception)
+ {
+-      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ 
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+@@ -4799,12 +4798,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+ 
+-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-                                    gva_t addr, void *val, unsigned int bytes,
+-                                    struct x86_exception *exception)
++static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
++                           gva_t addr, void *val, unsigned int bytes,
++                           struct x86_exception *exception, bool system)
+ {
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+-      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
++      u32 access = 0;
++
++      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++              access |= PFERR_USER_MASK;
++
++      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 
exception);
+ }
+ 
+ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+@@ -4816,18 +4820,16 @@ static int kvm_read_guest_phys_system(struct 
x86_emulate_ctxt *ctxt,
+       return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-                                     gva_t addr, void *val,
+-                                     unsigned int bytes,
+-                                     struct x86_exception *exception)
++static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int 
bytes,
++                                    struct kvm_vcpu *vcpu, u32 access,
++                                    struct x86_exception *exception)
+ {
+-      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       void *data = val;
+       int r = X86EMUL_CONTINUE;
+ 
+       while (bytes) {
+               gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+-                                                           PFERR_WRITE_MASK,
++                                                           access,
+                                                            exception);
+               unsigned offset = addr & (PAGE_SIZE-1);
+               unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+@@ -4848,6 +4850,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
+ out:
+       return r;
+ }
++
++static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void 
*val,
++                            unsigned int bytes, struct x86_exception 
*exception,
++                            bool system)
++{
++      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++      u32 access = PFERR_WRITE_MASK;
++
++      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++              access |= PFERR_USER_MASK;
++
++      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++                                         access, exception);
++}
++
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
++                              unsigned int bytes, struct x86_exception 
*exception)
++{
++      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++                                         PFERR_WRITE_MASK, exception);
++}
+ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+ 
+ int handle_ud(struct kvm_vcpu *vcpu)
+@@ -4858,8 +4881,8 @@ int handle_ud(struct kvm_vcpu *vcpu)
+       struct x86_exception e;
+ 
+       if (force_emulation_prefix &&
+-          kvm_read_guest_virt(&vcpu->arch.emulate_ctxt,
+-                              kvm_get_linear_rip(vcpu), sig, sizeof(sig), &e) 
== 0 &&
++          kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
++                              sig, sizeof(sig), &e) == 0 &&
+           memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
+               kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
+               emul_type = 0;
+@@ -5600,8 +5623,8 @@ static int emulator_pre_leave_smm(struct 
x86_emulate_ctxt *ctxt, u64 smbase)
+ static const struct x86_emulate_ops emulate_ops = {
+       .read_gpr            = emulator_read_gpr,
+       .write_gpr           = emulator_write_gpr,
+-      .read_std            = kvm_read_guest_virt_system,
+-      .write_std           = kvm_write_guest_virt_system,
++      .read_std            = emulator_read_std,
++      .write_std           = emulator_write_std,
+       .read_phys           = kvm_read_guest_phys_system,
+       .fetch               = kvm_fetch_guest_virt,
+       .read_emulated       = emulator_read_emulated,
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index c9492f764902..331993c49dae 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -247,11 +247,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, 
int irq, int inc_eip);
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ u64 get_kvmclock_ns(struct kvm *kvm);
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+       gva_t addr, void *val, unsigned int bytes,
+       struct x86_exception *exception);
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
+       gva_t addr, void *val, unsigned int bytes,
+       struct x86_exception *exception);
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 08e84ef2bc05..3d08dc84db16 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -328,7 +328,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, 
fmode_t mode,
+       if (!rep.nr_zones)
+               return -EINVAL;
+ 
+-      zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
++      if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
++              return -ERANGE;
++
++      zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
++                      GFP_KERNEL | __GFP_ZERO);
+       if (!zones)
+               return -ENOMEM;
+ 
+@@ -350,7 +354,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, 
fmode_t mode,
+       }
+ 
+  out:
+-      kfree(zones);
++      kvfree(zones);
+ 
+       return ret;
+ }
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 7207a535942d..d67667970f7e 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -769,15 +769,18 @@ struct aead_edesc {
+  * @src_nents: number of segments in input s/w scatterlist
+  * @dst_nents: number of segments in output s/w scatterlist
+  * @iv_dma: dma address of iv for checking continuity and link table
++ * @iv_dir: DMA mapping direction for IV
+  * @sec4_sg_bytes: length of dma mapped sec4_sg space
+  * @sec4_sg_dma: bus physical mapped address of h/w link table
+  * @sec4_sg: pointer to h/w link table
+  * @hw_desc: the h/w job descriptor followed by any referenced link tables
++ *         and IV
+  */
+ struct ablkcipher_edesc {
+       int src_nents;
+       int dst_nents;
+       dma_addr_t iv_dma;
++      enum dma_data_direction iv_dir;
+       int sec4_sg_bytes;
+       dma_addr_t sec4_sg_dma;
+       struct sec4_sg_entry *sec4_sg;
+@@ -787,7 +790,8 @@ struct ablkcipher_edesc {
+ static void caam_unmap(struct device *dev, struct scatterlist *src,
+                      struct scatterlist *dst, int src_nents,
+                      int dst_nents,
+-                     dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
++                     dma_addr_t iv_dma, int ivsize,
++                     enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+                      int sec4_sg_bytes)
+ {
+       if (dst != src) {
+@@ -799,7 +803,7 @@ static void caam_unmap(struct device *dev, struct 
scatterlist *src,
+       }
+ 
+       if (iv_dma)
+-              dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
++              dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+       if (sec4_sg_bytes)
+               dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+                                DMA_TO_DEVICE);
+@@ -810,7 +814,7 @@ static void aead_unmap(struct device *dev,
+                      struct aead_request *req)
+ {
+       caam_unmap(dev, req->src, req->dst,
+-                 edesc->src_nents, edesc->dst_nents, 0, 0,
++                 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+                  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+ 
+@@ -823,7 +827,7 @@ static void ablkcipher_unmap(struct device *dev,
+ 
+       caam_unmap(dev, req->src, req->dst,
+                  edesc->src_nents, edesc->dst_nents,
+-                 edesc->iv_dma, ivsize,
++                 edesc->iv_dma, ivsize, edesc->iv_dir,
+                  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+ 
+@@ -912,6 +916,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
+       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+                                ivsize, 0);
+ 
++      /* In case initial IV was generated, copy it in GIVCIPHER request */
++      if (edesc->iv_dir == DMA_FROM_DEVICE) {
++              u8 *iv;
++              struct skcipher_givcrypt_request *greq;
++
++              greq = container_of(req, struct skcipher_givcrypt_request,
++                                  creq);
++              iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
++                   edesc->sec4_sg_bytes;
++              memcpy(greq->giv, iv, ivsize);
++      }
++
+       kfree(edesc);
+ 
+       ablkcipher_request_complete(req, err);
+@@ -922,10 +938,10 @@ static void ablkcipher_decrypt_done(struct device 
*jrdev, u32 *desc, u32 err,
+ {
+       struct ablkcipher_request *req = context;
+       struct ablkcipher_edesc *edesc;
++#ifdef DEBUG
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 
+-#ifdef DEBUG
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+ 
+@@ -943,14 +959,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
+                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ 
+       ablkcipher_unmap(jrdev, edesc, req);
+-
+-      /*
+-       * The crypto API expects us to set the IV (req->info) to the last
+-       * ciphertext block.
+-       */
+-      scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+-                               ivsize, 0);
+-
+       kfree(edesc);
+ 
+       ablkcipher_request_complete(req, err);
+@@ -1099,15 +1107,14 @@ static void init_authenc_job(struct aead_request *req,
+  */
+ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+                               struct ablkcipher_edesc *edesc,
+-                              struct ablkcipher_request *req,
+-                              bool iv_contig)
++                              struct ablkcipher_request *req)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       u32 *desc = edesc->hw_desc;
+-      u32 out_options = 0, in_options;
+-      dma_addr_t dst_dma, src_dma;
+-      int len, sec4_sg_index = 0;
++      u32 out_options = 0;
++      dma_addr_t dst_dma;
++      int len;
+ 
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+@@ -1123,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_desc, 
dma_addr_t ptr,
+       len = desc_len(sh_desc);
+       init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ 
+-      if (iv_contig) {
+-              src_dma = edesc->iv_dma;
+-              in_options = 0;
+-      } else {
+-              src_dma = edesc->sec4_sg_dma;
+-              sec4_sg_index += edesc->src_nents + 1;
+-              in_options = LDST_SGF;
+-      }
+-      append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
++      append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
++                        LDST_SGF);
+ 
+       if (likely(req->src == req->dst)) {
+-              if (edesc->src_nents == 1 && iv_contig) {
+-                      dst_dma = sg_dma_address(req->src);
+-              } else {
+-                      dst_dma = edesc->sec4_sg_dma +
+-                              sizeof(struct sec4_sg_entry);
+-                      out_options = LDST_SGF;
+-              }
++              dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
++              out_options = LDST_SGF;
+       } else {
+               if (edesc->dst_nents == 1) {
+                       dst_dma = sg_dma_address(req->dst);
+               } else {
+-                      dst_dma = edesc->sec4_sg_dma +
+-                              sec4_sg_index * sizeof(struct sec4_sg_entry);
++                      dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
++                                sizeof(struct sec4_sg_entry);
+                       out_options = LDST_SGF;
+               }
+       }
+@@ -1158,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_desc, 
dma_addr_t ptr,
+  */
+ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+                                   struct ablkcipher_edesc *edesc,
+-                                  struct ablkcipher_request *req,
+-                                  bool iv_contig)
++                                  struct ablkcipher_request *req)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       u32 *desc = edesc->hw_desc;
+-      u32 out_options, in_options;
++      u32 in_options;
+       dma_addr_t dst_dma, src_dma;
+       int len, sec4_sg_index = 0;
+ 
+@@ -1190,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, 
dma_addr_t ptr,
+       }
+       append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+ 
+-      if (iv_contig) {
+-              dst_dma = edesc->iv_dma;
+-              out_options = 0;
+-      } else {
+-              dst_dma = edesc->sec4_sg_dma +
+-                        sec4_sg_index * sizeof(struct sec4_sg_entry);
+-              out_options = LDST_SGF;
+-      }
+-      append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
++      dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
++                sizeof(struct sec4_sg_entry);
++      append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ }
+ 
+ /*
+@@ -1287,7 +1275,7 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
+                       GFP_DMA | flags);
+       if (!edesc) {
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+@@ -1491,8 +1479,7 @@ static int aead_decrypt(struct aead_request *req)
+  * allocate and map the ablkcipher extended descriptor for ablkcipher
+  */
+ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct 
ablkcipher_request
+-                                                     *req, int desc_bytes,
+-                                                     bool *iv_contig_out)
++                                                     *req, int desc_bytes)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+@@ -1501,8 +1488,8 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+                      GFP_KERNEL : GFP_ATOMIC;
+       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       struct ablkcipher_edesc *edesc;
+-      dma_addr_t iv_dma = 0;
+-      bool in_contig;
++      dma_addr_t iv_dma;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ 
+@@ -1546,33 +1533,20 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+               }
+       }
+ 
+-      iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+-      if (dma_mapping_error(jrdev, iv_dma)) {
+-              dev_err(jrdev, "unable to map IV\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      if (mapped_src_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->src)) {
+-              in_contig = true;
+-              sec4_sg_ents = 0;
+-      } else {
+-              in_contig = false;
+-              sec4_sg_ents = 1 + mapped_src_nents;
+-      }
++      sec4_sg_ents = 1 + mapped_src_nents;
+       dst_sg_idx = sec4_sg_ents;
+       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+ 
+-      /* allocate space for base edesc and hw desc commands, link tables */
+-      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++      /*
++       * allocate space for base edesc and hw desc commands, link tables, IV
++       */
++      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+                       GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+@@ -1581,13 +1555,24 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
+       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+                        desc_bytes;
++      edesc->iv_dir = DMA_TO_DEVICE;
+ 
+-      if (!in_contig) {
+-              dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+-              sg_to_sec4_sg_last(req->src, mapped_src_nents,
+-                                 edesc->sec4_sg + 1, 0);
++      /* Make sure IV is located in a DMAable area */
++      iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++      memcpy(iv, req->info, ivsize);
++
++      iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
++      if (dma_mapping_error(jrdev, iv_dma)) {
++              dev_err(jrdev, "unable to map IV\n");
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
++              kfree(edesc);
++              return ERR_PTR(-ENOMEM);
+       }
+ 
++      dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
++      sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
++
+       if (mapped_dst_nents > 1) {
+               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+                                  edesc->sec4_sg + dst_sg_idx, 0);
+@@ -1598,7 +1583,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1611,7 +1596,6 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+                      sec4_sg_bytes, 1);
+ #endif
+ 
+-      *iv_contig_out = in_contig;
+       return edesc;
+ }
+ 
+@@ -1621,19 +1605,16 @@ static int ablkcipher_encrypt(struct 
ablkcipher_request *req)
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig;
+       u32 *desc;
+       int ret = 0;
+ 
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+ 
+       /* Create and submit job descriptor*/
+-      init_ablkcipher_job(ctx->sh_desc_enc,
+-              ctx->sh_desc_enc_dma, edesc, req, iv_contig);
++      init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+@@ -1657,20 +1638,25 @@ static int ablkcipher_decrypt(struct 
ablkcipher_request *req)
+       struct ablkcipher_edesc *edesc;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig;
+       u32 *desc;
+       int ret = 0;
+ 
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+ 
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block.
++       */
++      scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
++                               ivsize, 0);
++
+       /* Create and submit job descriptor*/
+-      init_ablkcipher_job(ctx->sh_desc_dec,
+-              ctx->sh_desc_dec_dma, edesc, req, iv_contig);
++      init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+       desc = edesc->hw_desc;
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+@@ -1695,8 +1681,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request 
*req)
+  */
+ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+                               struct skcipher_givcrypt_request *greq,
+-                              int desc_bytes,
+-                              bool *iv_contig_out)
++                              int desc_bytes)
+ {
+       struct ablkcipher_request *req = &greq->creq;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+@@ -1706,8 +1691,8 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+                      GFP_KERNEL : GFP_ATOMIC;
+       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+       struct ablkcipher_edesc *edesc;
+-      dma_addr_t iv_dma = 0;
+-      bool out_contig;
++      dma_addr_t iv_dma;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ 
+@@ -1752,36 +1737,20 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+               }
+       }
+ 
+-      /*
+-       * Check if iv can be contiguous with source and destination.
+-       * If so, include it. If not, create scatterlist.
+-       */
+-      iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+-      if (dma_mapping_error(jrdev, iv_dma)) {
+-              dev_err(jrdev, "unable to map IV\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+       sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+       dst_sg_idx = sec4_sg_ents;
+-      if (mapped_dst_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->dst)) {
+-              out_contig = true;
+-      } else {
+-              out_contig = false;
+-              sec4_sg_ents += 1 + mapped_dst_nents;
+-      }
++      sec4_sg_ents += 1 + mapped_dst_nents;
+ 
+-      /* allocate space for base edesc and hw desc commands, link tables */
++      /*
++       * allocate space for base edesc and hw desc commands, link tables, IV
++       */
+       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+-      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+                       GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+@@ -1790,24 +1759,33 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
+       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+                        desc_bytes;
++      edesc->iv_dir = DMA_FROM_DEVICE;
++
++      /* Make sure IV is located in a DMAable area */
++      iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++      iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
++      if (dma_mapping_error(jrdev, iv_dma)) {
++              dev_err(jrdev, "unable to map IV\n");
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
++              kfree(edesc);
++              return ERR_PTR(-ENOMEM);
++      }
+ 
+       if (mapped_src_nents > 1)
+               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+                                  0);
+ 
+-      if (!out_contig) {
+-              dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
+-                                 iv_dma, ivsize, 0);
+-              sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+-                                 edesc->sec4_sg + dst_sg_idx + 1, 0);
+-      }
++      dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
++      sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
++                         dst_sg_idx + 1, 0);
+ 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1820,7 +1798,6 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+                      sec4_sg_bytes, 1);
+ #endif
+ 
+-      *iv_contig_out = out_contig;
+       return edesc;
+ }
+ 
+@@ -1831,19 +1808,17 @@ static int ablkcipher_givencrypt(struct 
skcipher_givcrypt_request *creq)
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig = false;
+       u32 *desc;
+       int ret = 0;
+ 
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+ 
+       /* Create and submit job descriptor*/
+       init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+-                              edesc, req, iv_contig);
++                              edesc, req);
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR,
+                      "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+diff --git a/drivers/crypto/caam/caamalg_qi.c 
b/drivers/crypto/caam/caamalg_qi.c
+index cacda0831390..6e61cc93c2b0 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -728,7 +728,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
+  * @assoclen: associated data length, in CAAM endianness
+  * @assoclen_dma: bus physical mapped address of req->assoclen
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct aead_edesc {
+       int src_nents;
+@@ -739,9 +739,6 @@ struct aead_edesc {
+       unsigned int assoclen;
+       dma_addr_t assoclen_dma;
+       struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_AEAD_SG                                           \
+-      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
+-       sizeof(struct qm_sg_entry))
+       struct qm_sg_entry sgt[0];
+ };
+ 
+@@ -753,7 +750,7 @@ struct aead_edesc {
+  * @qm_sg_bytes: length of dma mapped h/w link table
+  * @qm_sg_dma: bus physical mapped address of h/w link table
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct ablkcipher_edesc {
+       int src_nents;
+@@ -762,9 +759,6 @@ struct ablkcipher_edesc {
+       int qm_sg_bytes;
+       dma_addr_t qm_sg_dma;
+       struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_ABLKCIPHER_SG                                         \
+-      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+-       sizeof(struct qm_sg_entry))
+       struct qm_sg_entry sgt[0];
+ };
+ 
+@@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
+               }
+       }
+ 
+-      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+               ivsize = crypto_aead_ivsize(aead);
+-              iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+-              if (dma_mapping_error(qidev, iv_dma)) {
+-                      dev_err(qidev, "unable to map IV\n");
+-                      caam_unmap(qidev, req->src, req->dst, src_nents,
+-                                 dst_nents, 0, 0, op_type, 0, 0);
+-                      qi_cache_free(edesc);
+-                      return ERR_PTR(-ENOMEM);
+-              }
+-      }
+ 
+       /*
+        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+@@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
+        */
+       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++      sg_table = &edesc->sgt[0];
++      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
++                   CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+-      sg_table = &edesc->sgt[0];
+-      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++      if (ivsize) {
++              u8 *iv = (u8 *)(sg_table + qm_sg_ents);
++
++              /* Make sure IV is located in a DMAable area */
++              memcpy(iv, req->iv, ivsize);
++
++              iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++              if (dma_mapping_error(qidev, iv_dma)) {
++                      dev_err(qidev, "unable to map IV\n");
++                      caam_unmap(qidev, req->src, req->dst, src_nents,
++                                 dst_nents, 0, 0, 0, 0, 0);
++                      qi_cache_free(edesc);
++                      return ERR_PTR(-ENOMEM);
++              }
++      }
+ 
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+@@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req 
*drv_req, u32 status)
+ #endif
+ 
+       ablkcipher_unmap(qidev, edesc, req);
+-      qi_cache_free(edesc);
++
++      /* In case initial IV was generated, copy it in GIVCIPHER request */
++      if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
++              u8 *iv;
++              struct skcipher_givcrypt_request *greq;
++
++              greq = container_of(req, struct skcipher_givcrypt_request,
++                                  creq);
++              iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
++              memcpy(greq->giv, iv, ivsize);
++      }
+ 
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block. This is used e.g. by the CTS mode.
+        */
+-      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+-                               ivsize, 0);
++      if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
++              scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
++                                       ivsize, ivsize, 0);
+ 
++      qi_cache_free(edesc);
+       ablkcipher_request_complete(req, status);
+ }
+ 
+@@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       struct ablkcipher_edesc *edesc;
+       dma_addr_t iv_dma;
+-      bool in_contig;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+-      int dst_sg_idx, qm_sg_ents;
++      int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+       struct qm_sg_entry *sg_table, *fd_sgt;
+       struct caam_drv_ctx *drv_ctx;
+       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+@@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+               }
+       }
+ 
+-      iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
+-      if (dma_mapping_error(qidev, iv_dma)) {
+-              dev_err(qidev, "unable to map IV\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      if (mapped_src_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->src)) {
+-              in_contig = true;
+-              qm_sg_ents = 0;
+-      } else {
+-              in_contig = false;
+-              qm_sg_ents = 1 + mapped_src_nents;
+-      }
++      qm_sg_ents = 1 + mapped_src_nents;
+       dst_sg_idx = qm_sg_ents;
+ 
+       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++      qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++      if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++                   ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+-      /* allocate space for base edesc and link tables */
++      /* allocate space for base edesc, link tables and IV */
+       edesc = qi_cache_alloc(GFP_DMA | flags);
+       if (unlikely(!edesc)) {
+               dev_err(qidev, "could not allocate extended descriptor\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      /* Make sure IV is located in a DMAable area */
++      sg_table = &edesc->sgt[0];
++      iv = (u8 *)(sg_table + qm_sg_ents);
++      memcpy(iv, req->info, ivsize);
++
++      iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, iv_dma)) {
++              dev_err(qidev, "unable to map IV\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+       edesc->iv_dma = iv_dma;
+-      sg_table = &edesc->sgt[0];
+-      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      edesc->qm_sg_bytes = qm_sg_bytes;
+       edesc->drv_req.app_ctx = req;
+       edesc->drv_req.cbk = ablkcipher_done;
+       edesc->drv_req.drv_ctx = drv_ctx;
+ 
+-      if (!in_contig) {
+-              dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+-              sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+-      }
++      dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++      sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ 
+       if (mapped_dst_nents > 1)
+               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+@@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
+ 
+       fd_sgt = &edesc->drv_req.fd_sgt[0];
+ 
+-      if (!in_contig)
+-              dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+-                                        ivsize + req->nbytes, 0);
+-      else
+-              dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
+-                                    0);
++      dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++                                ivsize + req->nbytes, 0);
+ 
+       if (req->src == req->dst) {
+-              if (!in_contig)
+-                      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+-                                           sizeof(*sg_table), req->nbytes, 0);
+-              else
+-                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+-                                       req->nbytes, 0);
++              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++                                   sizeof(*sg_table), req->nbytes, 0);
+       } else if (mapped_dst_nents > 1) {
+               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+                                    sizeof(*sg_table), req->nbytes, 0);
+@@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+       struct ablkcipher_edesc *edesc;
+       dma_addr_t iv_dma;
+-      bool out_contig;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       struct qm_sg_entry *sg_table, *fd_sgt;
+-      int dst_sg_idx, qm_sg_ents;
++      int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+       struct caam_drv_ctx *drv_ctx;
+ 
+       drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+@@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+               mapped_dst_nents = src_nents;
+       }
+ 
+-      iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
+-      if (dma_mapping_error(qidev, iv_dma)) {
+-              dev_err(qidev, "unable to map IV\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+       dst_sg_idx = qm_sg_ents;
+-      if (mapped_dst_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->dst)) {
+-              out_contig = true;
+-      } else {
+-              out_contig = false;
+-              qm_sg_ents += 1 + mapped_dst_nents;
+-      }
+ 
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, GIVENCRYPT, 0, 0);
++      qm_sg_ents += 1 + mapped_dst_nents;
++      qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++      if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++                   ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+-      /* allocate space for base edesc and link tables */
++      /* allocate space for base edesc, link tables and IV */
+       edesc = qi_cache_alloc(GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(qidev, "could not allocate extended descriptor\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, GIVENCRYPT, 0, 0);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      /* Make sure IV is located in a DMAable area */
++      sg_table = &edesc->sgt[0];
++      iv = (u8 *)(sg_table + qm_sg_ents);
++      iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
++      if (dma_mapping_error(qidev, iv_dma)) {
++              dev_err(qidev, "unable to map IV\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+ 
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+       edesc->iv_dma = iv_dma;
+-      sg_table = &edesc->sgt[0];
+-      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      edesc->qm_sg_bytes = qm_sg_bytes;
+       edesc->drv_req.app_ctx = req;
+       edesc->drv_req.cbk = ablkcipher_done;
+       edesc->drv_req.drv_ctx = drv_ctx;
+@@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+       if (mapped_src_nents > 1)
+               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+ 
+-      if (!out_contig) {
+-              dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+-              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+-                               dst_sg_idx + 1, 0);
+-      }
++      dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
++      sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
++                       0);
+ 
+       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+                                         DMA_TO_DEVICE);
+@@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
+               dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+                                req->nbytes, 0);
+ 
+-      if (!out_contig)
+-              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+-                                   sizeof(*sg_table), ivsize + req->nbytes,
+-                                   0);
+-      else
+-              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+-                               ivsize + req->nbytes, 0);
++      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++                           sizeof(*sg_table), ivsize + req->nbytes, 0);
+ 
+       return edesc;
+ }
+@@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct 
ablkcipher_request *req, bool encrypt)
+       struct ablkcipher_edesc *edesc;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int ret;
+ 
+       if (unlikely(caam_congested))
+@@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct 
ablkcipher_request *req, bool encrypt)
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+ 
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block.
++       */
++      if (!encrypt)
++              scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
++                                       ivsize, ivsize, 0);
++
+       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+       if (!ret) {
+               ret = -EINPROGRESS;
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 7a897209f181..7ff4a25440ac 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct 
rsa_edesc *edesc,
+       struct caam_rsa_key *key = &ctx->key;
+       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+ 
+       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct 
rsa_edesc *edesc,
+       struct caam_rsa_key *key = &ctx->key;
+       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+ 
+       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 
*desc, u32 err,
+       akcipher_request_complete(req, err);
+ }
+ 
++static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
++                                      unsigned int nbytes,
++                                      unsigned int flags)
++{
++      struct sg_mapping_iter miter;
++      int lzeros, ents;
++      unsigned int len;
++      unsigned int tbytes = nbytes;
++      const u8 *buff;
++
++      ents = sg_nents_for_len(sgl, nbytes);
++      if (ents < 0)
++              return ents;
++
++      sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
++
++      lzeros = 0;
++      len = 0;
++      while (nbytes > 0) {
++              while (len && !*buff) {
++                      lzeros++;
++                      len--;
++                      buff++;
++              }
++
++              if (len && *buff)
++                      break;
++
++              sg_miter_next(&miter);
++              buff = miter.addr;
++              len = miter.length;
++
++              nbytes -= lzeros;
++              lzeros = 0;
++      }
++
++      miter.consumed = lzeros;
++      sg_miter_stop(&miter);
++      nbytes -= lzeros;
++
++      return tbytes - nbytes;
++}
++
+ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+                                        size_t desclen)
+ {
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = ctx->dev;
++      struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+       struct rsa_edesc *edesc;
+       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                      GFP_KERNEL : GFP_ATOMIC;
++      int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+       int sgc;
+       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+       int src_nents, dst_nents;
++      int lzeros;
++
++      lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
++      if (lzeros < 0)
++              return ERR_PTR(lzeros);
++
++      req->src_len -= lzeros;
++      req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ 
+       src_nents = sg_nents_for_len(req->src, req->src_len);
+       dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+@@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request 
*req,
+       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+       int sec4_sg_index = 0;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+ 
+       pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, pdb->d_dma)) {
+@@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request 
*req,
+       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+       int sec4_sg_index = 0;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+ 
+       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, pdb->p_dma)) {
+@@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
+       .max_size = caam_rsa_max_size,
+       .init = caam_rsa_init_tfm,
+       .exit = caam_rsa_exit_tfm,
++      .reqsize = sizeof(struct caam_rsa_req_ctx),
+       .base = {
+               .cra_name = "rsa",
+               .cra_driver_name = "rsa-caam",
+diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
+index fd145c46eae1..82645bcf8b27 100644
+--- a/drivers/crypto/caam/caampkc.h
++++ b/drivers/crypto/caam/caampkc.h
+@@ -95,6 +95,14 @@ struct caam_rsa_ctx {
+       struct device *dev;
+ };
+ 
++/**
++ * caam_rsa_req_ctx - per request context.
++ * @src: input scatterlist (stripped of leading zeros)
++ */
++struct caam_rsa_req_ctx {
++      struct scatterlist src[2];
++};
++
+ /**
+  * rsa_edesc - s/w-extended rsa descriptor
+  * @src_nents     : number of segments in input scatterlist
+diff --git a/drivers/crypto/cavium/zip/common.h 
b/drivers/crypto/cavium/zip/common.h
+index dc451e0a43c5..58fb3ed6e644 100644
+--- a/drivers/crypto/cavium/zip/common.h
++++ b/drivers/crypto/cavium/zip/common.h
+@@ -46,8 +46,10 @@
+ #ifndef __COMMON_H__
+ #define __COMMON_H__
+ 
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+@@ -149,6 +151,25 @@ struct zip_operation {
+       u32   sizeofzops;
+ };
+ 
++static inline int zip_poll_result(union zip_zres_s *result)
++{
++      int retries = 1000;
++
++      while (!result->s.compcode) {
++              if (!--retries) {
++                      pr_err("ZIP ERR: request timed out");
++                      return -ETIMEDOUT;
++              }
++              udelay(10);
++              /*
++               * Force re-reading of compcode which is updated
++               * by the ZIP coprocessor.
++               */
++              rmb();
++      }
++      return 0;
++}
++
+ /* error messages */
+ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+                             fmt "\n", __func__, __LINE__, ## args)
+diff --git a/drivers/crypto/cavium/zip/zip_crypto.c 
b/drivers/crypto/cavium/zip/zip_crypto.c
+index 8df4d26cf9d4..b92b6e7e100f 100644
+--- a/drivers/crypto/cavium/zip/zip_crypto.c
++++ b/drivers/crypto/cavium/zip/zip_crypto.c
+@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
+                struct zip_kernel_ctx *zip_ctx)
+ {
+       struct zip_operation  *zip_ops   = NULL;
+-      struct zip_state      zip_state;
++      struct zip_state      *zip_state;
+       struct zip_device     *zip = NULL;
+       int ret;
+ 
+@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
+       if (!zip)
+               return -ENODEV;
+ 
+-      memset(&zip_state, 0, sizeof(struct zip_state));
++      zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++      if (!zip_state)
++              return -ENOMEM;
++
+       zip_ops = &zip_ctx->zip_comp;
+ 
+       zip_ops->input_len  = slen;
+       zip_ops->output_len = *dlen;
+       memcpy(zip_ops->input, src, slen);
+ 
+-      ret = zip_deflate(zip_ops, &zip_state, zip);
++      ret = zip_deflate(zip_ops, zip_state, zip);
+ 
+       if (!ret) {
+               *dlen = zip_ops->output_len;
+               memcpy(dst, zip_ops->output, *dlen);
+       }
+-
++      kfree(zip_state);
+       return ret;
+ }
+ 
+@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
+                  struct zip_kernel_ctx *zip_ctx)
+ {
+       struct zip_operation  *zip_ops   = NULL;
+-      struct zip_state      zip_state;
++      struct zip_state      *zip_state;
+       struct zip_device     *zip = NULL;
+       int ret;
+ 
+@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
+       if (!zip)
+               return -ENODEV;
+ 
+-      memset(&zip_state, 0, sizeof(struct zip_state));
++      zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++      if (!zip_state)
++              return -ENOMEM;
++
+       zip_ops = &zip_ctx->zip_decomp;
+       memcpy(zip_ops->input, src, slen);
+ 
+@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
+       zip_ops->input_len  = slen;
+       zip_ops->output_len = *dlen;
+ 
+-      ret = zip_inflate(zip_ops, &zip_state, zip);
++      ret = zip_inflate(zip_ops, zip_state, zip);
+ 
+       if (!ret) {
+               *dlen = zip_ops->output_len;
+               memcpy(dst, zip_ops->output, *dlen);
+       }
+-
++      kfree(zip_state);
+       return ret;
+ }
+ 
+diff --git a/drivers/crypto/cavium/zip/zip_deflate.c 
b/drivers/crypto/cavium/zip/zip_deflate.c
+index 9a944b8c1e29..d7133f857d67 100644
+--- a/drivers/crypto/cavium/zip/zip_deflate.c
++++ b/drivers/crypto/cavium/zip/zip_deflate.c
+@@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct 
zip_state *s,
+       /* Stats update for compression requests submitted */
+       atomic64_inc(&zip_dev->stats.comp_req_submit);
+ 
+-      while (!result_ptr->s.compcode)
+-              continue;
++      /* Wait for completion or error */
++      zip_poll_result(result_ptr);
+ 
+       /* Stats update for compression requests completed */
+       atomic64_inc(&zip_dev->stats.comp_req_complete);
+diff --git a/drivers/crypto/cavium/zip/zip_inflate.c 
b/drivers/crypto/cavium/zip/zip_inflate.c
+index 50cbdd83dbf2..7e0d73e2f89e 100644
+--- a/drivers/crypto/cavium/zip/zip_inflate.c
++++ b/drivers/crypto/cavium/zip/zip_inflate.c
+@@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct 
zip_state *s,
+       /* Decompression requests submitted stats update */
+       atomic64_inc(&zip_dev->stats.decomp_req_submit);
+ 
+-      while (!result_ptr->s.compcode)
+-              continue;
++      /* Wait for completion or error */
++      zip_poll_result(result_ptr);
+ 
+       /* Decompression requests completed stats update */
+       atomic64_inc(&zip_dev->stats.decomp_req_complete);
+diff --git a/drivers/crypto/ccree/cc_debugfs.c 
b/drivers/crypto/ccree/cc_debugfs.c
+index 08f8db489cf0..5ca184e42483 100644
+--- a/drivers/crypto/ccree/cc_debugfs.c
++++ b/drivers/crypto/ccree/cc_debugfs.c
+@@ -26,7 +26,8 @@ struct cc_debugfs_ctx {
+ static struct dentry *cc_debugfs_dir;
+ 
+ static struct debugfs_reg32 debug_regs[] = {
+-      CC_DEBUG_REG(HOST_SIGNATURE),
++      { .name = "SIGNATURE" }, /* Must be 0th */
++      { .name = "VERSION" }, /* Must be 1st */
+       CC_DEBUG_REG(HOST_IRR),
+       CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+       CC_DEBUG_REG(AXIM_MON_ERR),
+@@ -34,7 +35,6 @@ static struct debugfs_reg32 debug_regs[] = {
+       CC_DEBUG_REG(HOST_IMR),
+       CC_DEBUG_REG(AXIM_CFG),
+       CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+-      CC_DEBUG_REG(HOST_VERSION),
+       CC_DEBUG_REG(GPR_HOST),
+       CC_DEBUG_REG(AXIM_MON_COMP),
+ };
+@@ -58,6 +58,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
+       struct debugfs_regset32 *regset;
+       struct dentry *file;
+ 
++      debug_regs[0].offset = drvdata->sig_offset;
++      debug_regs[1].offset = drvdata->ver_offset;
++
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+diff --git a/drivers/crypto/ccree/cc_driver.c 
b/drivers/crypto/ccree/cc_driver.c
+index 89ce013ae093..6f93ce7701ec 100644
+--- a/drivers/crypto/ccree/cc_driver.c
++++ b/drivers/crypto/ccree/cc_driver.c
+@@ -207,9 +207,13 @@ static int init_cc_resources(struct platform_device 
*plat_dev)
+       if (hw_rev->rev >= CC_HW_REV_712) {
+               new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
+               new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
++              new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
++              new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
+       } else {
+               new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
+               new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
++              new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
++              new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
+       }
+ 
+       platform_set_drvdata(plat_dev, new_drvdata);
+@@ -276,7 +280,7 @@ static int init_cc_resources(struct platform_device 
*plat_dev)
+       }
+ 
+       /* Verify correct mapping */
+-      signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
++      signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+       if (signature_val != hw_rev->sig) {
+               dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != 
expected=0x%08X\n",
+                       signature_val, hw_rev->sig);
+@@ -287,7 +291,7 @@ static int init_cc_resources(struct platform_device 
*plat_dev)
+ 
+       /* Display HW versions */
+       dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver 
version %s\n",
+-               hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
++               hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
+                DRV_MODULE_VERSION);
+ 
+       rc = init_cc_regs(new_drvdata, true);
+diff --git a/drivers/crypto/ccree/cc_driver.h 
b/drivers/crypto/ccree/cc_driver.h
+index 2048fdeb9579..95f82b2d1e70 100644
+--- a/drivers/crypto/ccree/cc_driver.h
++++ b/drivers/crypto/ccree/cc_driver.h
+@@ -129,6 +129,8 @@ struct cc_drvdata {
+       enum cc_hw_rev hw_rev;
+       u32 hash_len_sz;
+       u32 axim_mon_offset;
++      u32 sig_offset;
++      u32 ver_offset;
+ };
+ 
+ struct cc_crypto_alg {
+diff --git a/drivers/crypto/ccree/cc_host_regs.h 
b/drivers/crypto/ccree/cc_host_regs.h
+index f51001898ca1..616b2e1c41ba 100644
+--- a/drivers/crypto/ccree/cc_host_regs.h
++++ b/drivers/crypto/ccree/cc_host_regs.h
+@@ -45,7 +45,8 @@
+ #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE   0x1UL
+ #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT     0x17UL
+ #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE      0x1UL
+-#define CC_HOST_SIGNATURE_REG_OFFSET  0xA24UL
++#define CC_HOST_SIGNATURE_712_REG_OFFSET      0xA24UL
++#define CC_HOST_SIGNATURE_630_REG_OFFSET      0xAC8UL
+ #define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT     0x0UL
+ #define CC_HOST_SIGNATURE_VALUE_BIT_SIZE      0x20UL
+ #define CC_HOST_BOOT_REG_OFFSET       0xA28UL
+@@ -105,7 +106,8 @@
+ #define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE      0x1UL
+ #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT       0x1EUL
+ #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE        0x1UL
+-#define CC_HOST_VERSION_REG_OFFSET    0xA40UL
++#define CC_HOST_VERSION_712_REG_OFFSET        0xA40UL
++#define CC_HOST_VERSION_630_REG_OFFSET        0xAD8UL
+ #define CC_HOST_VERSION_VALUE_BIT_SHIFT       0x0UL
+ #define CC_HOST_VERSION_VALUE_BIT_SIZE        0x20UL
+ #define CC_HOST_KFDE0_VALID_REG_OFFSET        0xA60UL
+diff --git a/drivers/crypto/chelsio/chcr_ipsec.c 
b/drivers/crypto/chelsio/chcr_ipsec.c
+index 8e0aa3f175c9..461b97e2f1fd 100644
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+                               struct net_device *dev,
+                               void *pos)
+ {
++      struct cpl_tx_pkt_core *cpl;
++      struct sge_eth_txq *q;
+       struct adapter *adap;
+       struct port_info *pi;
+-      struct sge_eth_txq *q;
+-      struct cpl_tx_pkt_core *cpl;
+-      u64 cntrl = 0;
+       u32 ctrl0, qidx;
++      u64 cntrl = 0;
++      int left;
+ 
+       pi = netdev_priv(dev);
+       adap = pi->adapter;
+       qidx = skb->queue_mapping;
+       q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ 
++      left = (void *)q->q.stat - pos;
++      if (!left)
++              pos = q->q.desc;
++
+       cpl = (struct cpl_tx_pkt_core *)pos;
+ 
+       cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+@@ -382,18 +387,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+                               void *pos,
+                               struct ipsec_sa_entry *sa_entry)
+ {
+-      struct adapter *adap;
+-      struct port_info *pi;
+-      struct sge_eth_txq *q;
+-      unsigned int len, qidx;
+       struct _key_ctx *key_ctx;
+       int left, eoq, key_len;
++      struct sge_eth_txq *q;
++      struct adapter *adap;
++      struct port_info *pi;
++      unsigned int qidx;
+ 
+       pi = netdev_priv(dev);
+       adap = pi->adapter;
+       qidx = skb->queue_mapping;
+       q = &adap->sge.ethtxq[qidx + pi->first_qset];
+-      len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+       key_len = sa_entry->kctx_len;
+ 
+       /* end of queue, reset pos to start of queue */
+@@ -411,19 +415,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+       pos += sizeof(struct _key_ctx);
+       left -= sizeof(struct _key_ctx);
+ 
+-      if (likely(len <= left)) {
++      if (likely(key_len <= left)) {
+               memcpy(key_ctx->key, sa_entry->key, key_len);
+               pos += key_len;
+       } else {
+-              if (key_len <= left) {
+-                      memcpy(pos, sa_entry->key, key_len);
+-                      pos += key_len;
+-              } else {
+-                      memcpy(pos, sa_entry->key, left);
+-                      memcpy(q->q.desc, sa_entry->key + left,
+-                             key_len - left);
+-                      pos = (u8 *)q->q.desc + (key_len - left);
+-              }
++              memcpy(pos, sa_entry->key, left);
++              memcpy(q->q.desc, sa_entry->key + left,
++                     key_len - left);
++              pos = (u8 *)q->q.desc + (key_len - left);
+       }
+       /* Copy CPL TX PKT XT */
+       pos = copy_cpltx_pktxt(skb, dev, pos);
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index ad02aa63b519..d1a1c74fb56a 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -1087,7 +1087,7 @@ static void omap_sham_finish_req(struct ahash_request 
*req, int err)
+ 
+       if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+               free_pages((unsigned long)sg_virt(ctx->sg),
+-                         get_order(ctx->sg->length));
++                         get_order(ctx->sg->length + ctx->bufcnt));
+ 
+       if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+               kfree(ctx->sg);
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index 96072b9b55c4..d7316f7a3a69 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ 
+       crypto_cipher_set_flags(fallback,
+                               crypto_cipher_get_flags((struct
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 7394d35d5936..5285ece4f33a 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+-
+ 
+       crypto_skcipher_set_flags(
+               fallback,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index fc60d00a2e84..cd777c75291d 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+ 
+       crypto_skcipher_set_flags(
+               fallback,
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
+index 8cd6e62e4c90..8bd9aff0f55f 100644
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
+                       alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+ 
+       crypto_skcipher_set_flags(
+               fallback,
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index 27a94a119009..1c4b5b889fba 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+ 
+       crypto_shash_set_flags(fallback,
+                              crypto_shash_get_flags((struct crypto_shash
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 06e9650b3b30..a89b81b35932 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -123,6 +123,7 @@ static const struct xpad_device {
+       u8 mapping;
+       u8 xtype;
+ } xpad_device[] = {
++      { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+       { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+       { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+       { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+@@ -409,6 +410,7 @@ static const signed short xpad_abs_triggers[] = {
+ 
+ static const struct usb_device_id xpad_table[] = {
+       { USB_INTERFACE_INFO('X', 'B', 0) },    /* X-Box USB-IF not approved 
class */
++      XPAD_XBOX360_VENDOR(0x0079),            /* GPD Win 2 Controller */
+       XPAD_XBOX360_VENDOR(0x044f),            /* Thrustmaster X-Box 360 
controllers */
+       XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 
controllers */
+       XPAD_XBOXONE_VENDOR(0x045e),            /* Microsoft X-Box One 
controllers */
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 75e757520ef0..93967c8139e7 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1262,6 +1262,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN060B", 0 },
+       { "ELAN060C", 0 },
+       { "ELAN0611", 0 },
++      { "ELAN0612", 0 },
+       { "ELAN1000", 0 },
+       { }
+ };
+diff --git a/drivers/input/touchscreen/goodix.c 
b/drivers/input/touchscreen/goodix.c
+index 9736c83dd418..f2d9c2c41885 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -933,6 +933,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id goodix_acpi_match[] = {
+       { "GDIX1001", 0 },
++      { "GDIX1002", 0 },
+       { }
+ };
+ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 9047c0a529b2..efd733472a35 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
+               }
+       }
+ 
+-      if (b->batch_page) {
+-              vunmap(b->batch_page);
+-              b->batch_page = NULL;
+-      }
+-
+-      if (b->page) {
+-              __free_page(b->page);
+-              b->page = NULL;
+-      }
++      /* Clearing the batch_page unconditionally has no adverse effect */
++      free_page((unsigned long)b->batch_page);
++      b->batch_page = NULL;
+ }
+ 
+ /*
+@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops 
= {
+ 
+ static bool vmballoon_init_batching(struct vmballoon *b)
+ {
+-      b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
+-      if (!b->page)
+-              return false;
++      struct page *page;
+ 
+-      b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
+-      if (!b->batch_page) {
+-              __free_page(b->page);
++      page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++      if (!page)
+               return false;
+-      }
+ 
++      b->batch_page = page_address(page);
+       return true;
+ }
+ 
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index e153e8b64bb8..d5553c47014f 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -62,6 +62,9 @@ struct pn533_usb_phy {
+       struct urb *out_urb;
+       struct urb *in_urb;
+ 
++      struct urb *ack_urb;
++      u8 *ack_buffer;
++
+       struct pn533 *priv;
+ };
+ 
+@@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t 
flags)
+       struct pn533_usb_phy *phy = dev->phy;
+       static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+       /* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
+-      int rc;
+ 
+-      phy->out_urb->transfer_buffer = (u8 *)ack;
+-      phy->out_urb->transfer_buffer_length = sizeof(ack);
+-      rc = usb_submit_urb(phy->out_urb, flags);
++      if (!phy->ack_buffer) {
++              phy->ack_buffer = kmemdup(ack, sizeof(ack), flags);
++              if (!phy->ack_buffer)
++                      return -ENOMEM;
++      }
+ 
+-      return rc;
++      phy->ack_urb->transfer_buffer = phy->ack_buffer;
++      phy->ack_urb->transfer_buffer_length = sizeof(ack);
++      return usb_submit_urb(phy->ack_urb, flags);
+ }
+ 
+ static int pn533_usb_send_frame(struct pn533 *dev,
+@@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy 
*phy)
+       /* Power on th reader (CCID cmd) */
+       u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
+                     0, 0, 0, 0, 0, 0, 3, 0, 0};
++      char *buffer;
++      int transferred;
+       int rc;
+       void *cntx;
+       struct pn533_acr122_poweron_rdr_arg arg;
+ 
+       dev_dbg(&phy->udev->dev, "%s\n", __func__);
+ 
++      buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
++      if (!buffer)
++              return -ENOMEM;
++
+       init_completion(&arg.done);
+       cntx = phy->in_urb->context;  /* backup context */
+ 
+       phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
+       phy->in_urb->context = &arg;
+ 
+-      phy->out_urb->transfer_buffer = cmd;
+-      phy->out_urb->transfer_buffer_length = sizeof(cmd);
+-
+       print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
+                      cmd, sizeof(cmd), false);
+ 
+-      rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+-      if (rc) {
++      rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
++                        &transferred, 0);
++      kfree(buffer);
++      if (rc || (transferred != sizeof(cmd))) {
+               nfc_err(&phy->udev->dev,
+                       "Reader power on cmd error %d\n", rc);
+               return rc;
+@@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ 
+       phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+       phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
++      phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 
+-      if (!phy->in_urb || !phy->out_urb)
++      if (!phy->in_urb || !phy->out_urb || !phy->ack_urb)
+               goto error;
+ 
+       usb_fill_bulk_urb(phy->in_urb, phy->udev,
+@@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
+       usb_fill_bulk_urb(phy->out_urb, phy->udev,
+                         usb_sndbulkpipe(phy->udev, out_endpoint),
+                         NULL, 0, pn533_send_complete, phy);
+-
++      usb_fill_bulk_urb(phy->ack_urb, phy->udev,
++                        usb_sndbulkpipe(phy->udev, out_endpoint),
++                        NULL, 0, pn533_send_complete, phy);
+ 
+       switch (id->driver_info) {
+       case PN533_DEVICE_STD:
+@@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ error:
+       usb_free_urb(phy->in_urb);
+       usb_free_urb(phy->out_urb);
++      usb_free_urb(phy->ack_urb);
+       usb_put_dev(phy->udev);
+       kfree(in_buf);
+ 
+@@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface 
*interface)
+ 
+       usb_kill_urb(phy->in_urb);
+       usb_kill_urb(phy->out_urb);
++      usb_kill_urb(phy->ack_urb);
+ 
+       kfree(phy->in_urb->transfer_buffer);
+       usb_free_urb(phy->in_urb);
+       usb_free_urb(phy->out_urb);
++      usb_free_urb(phy->ack_urb);
++      kfree(phy->ack_buffer);
+ 
+       nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
+ }
+diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c 
b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+index 94afeac1a19e..40fdef8b5b75 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
++++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+@@ -315,6 +315,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy 
*qphy)
+       const struct qusb2_phy_cfg *cfg = qphy->cfg;
+       u8 *val;
+ 
++      /* efuse register is optional */
++      if (!qphy->cell)
++              return;
++
+       /*
+        * Read efuse register having TUNE2/1 parameter's high nibble.
+        * If efuse register shows value as 0x0, or if we fail to find
+diff --git a/drivers/staging/android/ion/ion.c 
b/drivers/staging/android/ion/ion.c
+index e74db7902549..a68329411b29 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -114,8 +114,11 @@ static struct ion_buffer *ion_buffer_create(struct 
ion_heap *heap,
+ 
+ void ion_buffer_destroy(struct ion_buffer *buffer)
+ {
+-      if (WARN_ON(buffer->kmap_cnt > 0))
++      if (buffer->kmap_cnt > 0) {
++              pr_warn_once("%s: buffer still mapped in the kernel\n",
++                           __func__);
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
++      }
+       buffer->heap->ops->free(buffer);
+       kfree(buffer);
+ }
+diff --git a/drivers/tty/serial/8250/8250_omap.c 
b/drivers/tty/serial/8250/8250_omap.c
+index 624b501fd253..93de20e87abe 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1110,13 +1110,14 @@ static int omap8250_no_handle_irq(struct uart_port 
*port)
+       return 0;
+ }
+ 
++static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
+ static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+ static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
+ 
+ static const struct of_device_id omap8250_dt_ids[] = {
+       { .compatible = "ti,omap2-uart" },
+       { .compatible = "ti,omap3-uart" },
+-      { .compatible = "ti,omap4-uart" },
++      { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
+       { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+       { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
+       { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
+@@ -1353,6 +1354,19 @@ static int omap8250_soft_reset(struct device *dev)
+       int sysc;
+       int syss;
+ 
++      /*
++       * At least on omap4, unused uarts may not idle after reset without
++       * a basic scr dma configuration even with no dma in use. The
++       * module clkctrl status bits will be 1 instead of 3 blocking idle
++       * for the whole clockdomain. The softreset below will clear scr,
++       * and we restore it on resume so this is safe to do on all SoCs
++       * needing omap8250_soft_reset() quirk. Do it in two writes as
++       * recommended in the comment for omap8250_update_scr().
++       */
++      serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
++      serial_out(up, UART_OMAP_SCR,
++                 OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
++
+       sysc = serial_in(up, UART_OMAP_SYSC);
+ 
+       /* softreset the UART */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 4b40a5b449ee..ebd33c0232e6 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1727,10 +1727,26 @@ static int pl011_allocate_irq(struct uart_amba_port 
*uap)
+  */
+ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ {
++      unsigned int i;
++
+       spin_lock_irq(&uap->port.lock);
+ 
+       /* Clear out any spuriously appearing RX interrupts */
+       pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
++
++      /*
++       * RXIS is asserted only when the RX FIFO transitions from below
++       * to above the trigger threshold.  If the RX FIFO is already
++       * full to the threshold this can't happen and RXIS will now be
++       * stuck off.  Drain the RX FIFO explicitly to fix this:
++       */
++      for (i = 0; i < uap->fifosize * 2; ++i) {
++              if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
++                      break;
++
++              pl011_read(uap, REG_DR);
++      }
++
+       uap->im = UART011_RTIM;
+       if (!pl011_dma_rx_running(uap))
+               uap->im |= UART011_RXIM;
+diff --git a/drivers/tty/serial/atmel_serial.c 
b/drivers/tty/serial/atmel_serial.c
+index e287fe8f10fc..55b3eff148b1 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1757,7 +1757,6 @@ static int atmel_startup(struct uart_port *port)
+ {
+       struct platform_device *pdev = to_platform_device(port->dev);
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+-      struct tty_struct *tty = port->state->port.tty;
+       int retval;
+ 
+       /*
+@@ -1772,8 +1771,8 @@ static int atmel_startup(struct uart_port *port)
+        * Allocate the IRQ
+        */
+       retval = request_irq(port->irq, atmel_interrupt,
+-                      IRQF_SHARED | IRQF_COND_SUSPEND,
+-                      tty ? tty->name : "atmel_serial", port);
++                           IRQF_SHARED | IRQF_COND_SUSPEND,
++                           dev_name(&pdev->dev), port);
+       if (retval) {
+               dev_err(port->dev, "atmel_startup - Can't get irq\n");
+               return retval;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 3f2f8c118ce0..64e96926f1ad 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -862,15 +862,12 @@ static int s3c24xx_serial_request_dma(struct 
s3c24xx_uart_port *p)
+       dma->rx_conf.direction          = DMA_DEV_TO_MEM;
+       dma->rx_conf.src_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       dma->rx_conf.src_addr           = p->port.mapbase + S3C2410_URXH;
+-      dma->rx_conf.src_maxburst       = 16;
++      dma->rx_conf.src_maxburst       = 1;
+ 
+       dma->tx_conf.direction          = DMA_MEM_TO_DEV;
+       dma->tx_conf.dst_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       dma->tx_conf.dst_addr           = p->port.mapbase + S3C2410_UTXH;
+-      if (dma_get_cache_alignment() >= 16)
+-              dma->tx_conf.dst_maxburst = 16;
+-      else
+-              dma->tx_conf.dst_maxburst = 1;
++      dma->tx_conf.dst_maxburst       = 1;
+ 
+       dma->rx_chan = dma_request_chan(p->port.dev, "rx");
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index fdbbff547106..a4f82ec665fe 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -2704,8 +2704,8 @@ static int sci_init_clocks(struct sci_port *sci_port, 
struct device *dev)
+                       dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
+                               PTR_ERR(clk));
+               else
+-                      dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
+-                              clk, clk);
++                      dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
++                              clk, clk_get_rate(clk));
+               sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
+       }
+       return 0;
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 0c11d40a12bc..7b137003c2be 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -940,7 +940,7 @@ int usb_set_isoch_delay(struct usb_device *dev)
+       return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+                       USB_REQ_SET_ISOCH_DELAY,
+                       USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+-                      cpu_to_le16(dev->hub_delay), 0, NULL, 0,
++                      dev->hub_delay, 0, NULL, 0,
+                       USB_CTRL_SET_TIMEOUT);
+ }
+ 
+diff --git a/drivers/usb/gadget/function/f_printer.c 
b/drivers/usb/gadget/function/f_printer.c
+index d359efe06c76..9c7ed2539ff7 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -631,19 +631,19 @@ printer_write(struct file *fd, const char __user *buf, 
size_t len, loff_t *ptr)
+                       return -EAGAIN;
+               }
+ 
++              list_add(&req->list, &dev->tx_reqs_active);
++
+               /* here, we unlock, and only unlock, to avoid deadlock. */
+               spin_unlock(&dev->lock);
+               value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
+               spin_lock(&dev->lock);
+               if (value) {
++                      list_del(&req->list);
+                       list_add(&req->list, &dev->tx_reqs);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       mutex_unlock(&dev->lock_printer_io);
+                       return -EAGAIN;
+               }
+-
+-              list_add(&req->list, &dev->tx_reqs_active);
+-
+       }
+ 
+       spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c 
b/drivers/usb/gadget/udc/renesas_usb3.c
+index 409cde4e6a51..5caf78bbbf7c 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -333,6 +333,7 @@ struct renesas_usb3 {
+       struct extcon_dev *extcon;
+       struct work_struct extcon_work;
+       struct phy *phy;
++      struct dentry *dentry;
+ 
+       struct renesas_usb3_ep *usb3_ep;
+       int num_usb3_eps;
+@@ -622,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
+       usb3_usb2_pullup(usb3, 0);
+       usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+       usb3_reset_epc(usb3);
++      usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
++                         USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
++                         USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
++                         USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
++                         USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
++      usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
++      usb3_init_epc_registers(usb3);
+ 
+       if (usb3->driver)
+               usb3->driver->disconnect(&usb3->gadget);
+@@ -2393,8 +2401,12 @@ static void renesas_usb3_debugfs_init(struct 
renesas_usb3 *usb3,
+ 
+       file = debugfs_create_file("b_device", 0644, root, usb3,
+                                  &renesas_usb3_b_device_fops);
+-      if (!file)
++      if (!file) {
+               dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
++              debugfs_remove_recursive(root);
++      } else {
++              usb3->dentry = root;
++      }
+ }
+ 
+ /*------- platform_driver ------------------------------------------------*/
+@@ -2402,14 +2414,13 @@ static int renesas_usb3_remove(struct platform_device 
*pdev)
+ {
+       struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
+ 
++      debugfs_remove_recursive(usb3->dentry);
+       device_remove_file(&pdev->dev, &dev_attr_role);
+ 
+       usb_del_gadget_udc(&usb3->gadget);
+       renesas_usb3_dma_free_prd(usb3, &pdev->dev);
+ 
+       __renesas_usb3_ep_free_request(usb3->ep0_req);
+-      if (usb3->phy)
+-              phy_put(usb3->phy);
+       pm_runtime_disable(&pdev->dev);
+ 
+       return 0;
+@@ -2628,6 +2639,17 @@ static int renesas_usb3_probe(struct platform_device 
*pdev)
+       if (ret < 0)
+               goto err_alloc_prd;
+ 
++      /*
++       * This is optional. So, if this driver cannot get a phy,
++       * this driver will not handle a phy anymore.
++       */
++      usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
++      if (IS_ERR(usb3->phy)) {
++              ret = PTR_ERR(usb3->phy);
++              goto err_add_udc;
++      }
++
++      pm_runtime_enable(&pdev->dev);
+       ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
+       if (ret < 0)
+               goto err_add_udc;
+@@ -2636,20 +2658,11 @@ static int renesas_usb3_probe(struct platform_device 
*pdev)
+       if (ret < 0)
+               goto err_dev_create;
+ 
+-      /*
+-       * This is an optional. So, if this driver cannot get a phy,
+-       * this driver will not handle a phy anymore.
+-       */
+-      usb3->phy = devm_phy_get(&pdev->dev, "usb");
+-      if (IS_ERR(usb3->phy))
+-              usb3->phy = NULL;
+-
+       usb3->workaround_for_vbus = priv->workaround_for_vbus;
+ 
+       renesas_usb3_debugfs_init(usb3, &pdev->dev);
+ 
+       dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
+-      pm_runtime_enable(usb3_to_dev(usb3));
+ 
+       return 0;
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 6034c39b67d1..9e9de5452860 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
+       if (devinfo->flags & US_FL_BROKEN_FUA)
+               sdev->broken_fua = 1;
+ 
++      /* UAS also needs to support FL_ALWAYS_SYNC */
++      if (devinfo->flags & US_FL_ALWAYS_SYNC) {
++              sdev->skip_ms_page_3f = 1;
++              sdev->skip_ms_page_8 = 1;
++              sdev->wce_default_on = 1;
++      }
+       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+       return 0;
+ }
+diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
+index 747d3a9596d9..22fcfccf453a 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2321,6 +2321,15 @@ UNUSUAL_DEV(  0x4146, 0xba01, 0x0100, 0x0100,
+               "Micro Mini 1GB",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+ 
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <[email protected]>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++              "SimpleTech",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_ALWAYS_SYNC),
++
+ /*
+  * Nick Bowler <[email protected]>
+  * SCSI stack spams (otherwise harmless) error messages.
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 38434d88954a..d0bdebd87ce3 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -107,3 +107,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
++
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <[email protected]>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++              "SimpleTech",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_ALWAYS_SYNC),
+diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
+index 19cca7f1b2c5..58dc7ea7cf0d 100644
+--- a/drivers/usb/typec/typec_wcove.c
++++ b/drivers/usb/typec/typec_wcove.c
+@@ -202,6 +202,10 @@ static int wcove_init(struct tcpc_dev *tcpc)
+       struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+       int ret;
+ 
++      ret = regmap_write(wcove->regmap, USBC_CONTROL1, 0);
++      if (ret)
++              return ret;
++
+       /* Unmask everything */
+       ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
+       if (ret)
+@@ -285,8 +289,30 @@ static int wcove_get_cc(struct tcpc_dev *tcpc, enum 
typec_cc_status *cc1,
+ 
+ static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+ {
+-      /* XXX: Relying on the HW FSM to configure things correctly for now */
+-      return 0;
++      struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
++      unsigned int ctrl;
++
++      switch (cc) {
++      case TYPEC_CC_RD:
++              ctrl = USBC_CONTROL1_MODE_SNK;
++              break;
++      case TYPEC_CC_RP_DEF:
++              ctrl = USBC_CONTROL1_CURSRC_UA_80 | USBC_CONTROL1_MODE_SRC;
++              break;
++      case TYPEC_CC_RP_1_5:
++              ctrl = USBC_CONTROL1_CURSRC_UA_180 | USBC_CONTROL1_MODE_SRC;
++              break;
++      case TYPEC_CC_RP_3_0:
++              ctrl = USBC_CONTROL1_CURSRC_UA_330 | USBC_CONTROL1_MODE_SRC;
++              break;
++      case TYPEC_CC_OPEN:
++              ctrl = 0;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return regmap_write(wcove->regmap, USBC_CONTROL1, ctrl);
+ }
+ 
+ static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity 
pol)
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 48808388ec33..be37aec250c2 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -10,6 +10,9 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "usbip_common.h"
+ #include "vhci.h"
+ 
+@@ -205,16 +208,20 @@ static int vhci_port_disconnect(struct vhci_hcd 
*vhci_hcd, __u32 rhport)
+       return 0;
+ }
+ 
+-static int valid_port(__u32 pdev_nr, __u32 rhport)
++static int valid_port(__u32 *pdev_nr, __u32 *rhport)
+ {
+-      if (pdev_nr >= vhci_num_controllers) {
+-              pr_err("pdev %u\n", pdev_nr);
++      if (*pdev_nr >= vhci_num_controllers) {
++              pr_err("pdev %u\n", *pdev_nr);
+               return 0;
+       }
+-      if (rhport >= VHCI_HC_PORTS) {
+-              pr_err("rhport %u\n", rhport);
++      *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
++
++      if (*rhport >= VHCI_HC_PORTS) {
++              pr_err("rhport %u\n", *rhport);
+               return 0;
+       }
++      *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
++
+       return 1;
+ }
+ 
+@@ -232,7 +239,7 @@ static ssize_t detach_store(struct device *dev, struct 
device_attribute *attr,
+       pdev_nr = port_to_pdev_nr(port);
+       rhport = port_to_rhport(port);
+ 
+-      if (!valid_port(pdev_nr, rhport))
++      if (!valid_port(&pdev_nr, &rhport))
+               return -EINVAL;
+ 
+       hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
+@@ -258,7 +265,8 @@ static ssize_t detach_store(struct device *dev, struct 
device_attribute *attr,
+ }
+ static DEVICE_ATTR_WO(detach);
+ 
+-static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed 
speed)
++static int valid_args(__u32 *pdev_nr, __u32 *rhport,
++                    enum usb_device_speed speed)
+ {
+       if (!valid_port(pdev_nr, rhport)) {
+               return 0;
+@@ -322,7 +330,7 @@ static ssize_t attach_store(struct device *dev, struct 
device_attribute *attr,
+                            sockfd, devid, speed);
+ 
+       /* check received parameters */
+-      if (!valid_args(pdev_nr, rhport, speed))
++      if (!valid_args(&pdev_nr, &rhport, speed))
+               return -EINVAL;
+ 
+       hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index b02c41e53d56..39e364c70caf 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -677,10 +677,10 @@ struct kvm_ioeventfd {
+ };
+ 
+ #define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
+-#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
++#define KVM_X86_DISABLE_EXITS_HLT            (1 << 1)
+ #define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
+ #define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
+-                                              KVM_X86_DISABLE_EXITS_HTL | \
++                                              KVM_X86_DISABLE_EXITS_HLT | \
+                                               KVM_X86_DISABLE_EXITS_PAUSE)
+ 
+ /* for KVM_ENABLE_CAP */
+diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
+index b02c41e53d56..39e364c70caf 100644
+--- a/tools/include/uapi/linux/kvm.h
++++ b/tools/include/uapi/linux/kvm.h
+@@ -677,10 +677,10 @@ struct kvm_ioeventfd {
+ };
+ 
+ #define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
+-#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
++#define KVM_X86_DISABLE_EXITS_HLT            (1 << 1)
+ #define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
+ #define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
+-                                              KVM_X86_DISABLE_EXITS_HTL | \
++                                              KVM_X86_DISABLE_EXITS_HLT | \
+                                               KVM_X86_DISABLE_EXITS_PAUSE)
+ 
+ /* for KVM_ENABLE_CAP */

Reply via email to