diff --git a/Makefile b/Makefile
index e45c66b27241..55554f392115 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 16
-SUBLEVEL = 15
+SUBLEVEL = 16
 EXTRAVERSION =
 NAME = Fearless Coyote
 
diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
index b24b1c8b3979..0f82cd91cd3c 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -107,11 +107,12 @@ struct x86_emulate_ops {
         *  @addr:  [IN ] Linear address from which to read.
         *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to read from memory.
+        *  @system:[IN ] Whether the access is forced to be at CPL0.
         */
        int (*read_std)(struct x86_emulate_ctxt *ctxt,
                        unsigned long addr, void *val,
                        unsigned int bytes,
-                       struct x86_exception *fault);
+                       struct x86_exception *fault, bool system);
 
        /*
         * read_phys: Read bytes of standard (non-emulated/special) memory.
@@ -129,10 +130,11 @@ struct x86_emulate_ops {
         *  @addr:  [IN ] Linear address to which to write.
         *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to write to memory.
+        *  @system:[IN ] Whether the access is forced to be at CPL0.
         */
        int (*write_std)(struct x86_emulate_ctxt *ctxt,
                         unsigned long addr, void *val, unsigned int bytes,
-                        struct x86_exception *fault);
+                        struct x86_exception *fault, bool system);
        /*
         * fetch: Read bytes of standard (non-emulated/special) memory.
         *        Used for instruction fetch.
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d91eaeb01034..defbce750e7c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -811,6 +811,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, 
int rel)
        return assign_eip_near(ctxt, ctxt->_eip + rel);
 }
 
+static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
+                             void *data, unsigned size)
+{
+       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
true);
+}
+
+static int linear_write_system(struct x86_emulate_ctxt *ctxt,
+                              ulong linear, void *data,
+                              unsigned int size)
+{
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
true);
+}
+
 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
                              struct segmented_address addr,
                              void *data,
@@ -822,7 +835,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
        rc = linearize(ctxt, addr, size, false, &linear);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
+       return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
false);
 }
 
 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
@@ -836,7 +849,7 @@ static int segmented_write_std(struct x86_emulate_ctxt 
*ctxt,
        rc = linearize(ctxt, addr, size, true, &linear);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
false);
 }
 
 /*
@@ -1495,8 +1508,7 @@ static int read_interrupt_descriptor(struct 
x86_emulate_ctxt *ctxt,
                return emulate_gp(ctxt, index << 3 | 0x2);
 
        addr = dt.address + index * 8;
-       return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
-                                  &ctxt->exception);
+       return linear_read_system(ctxt, addr, desc, sizeof *desc);
 }
 
 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
@@ -1559,8 +1571,7 @@ static int read_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
-                                  &ctxt->exception);
+       return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
 }
 
 /* allowed just for 8 bytes segments */
@@ -1574,8 +1585,7 @@ static int write_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
-                                   &ctxt->exception);
+       return linear_write_system(ctxt, addr, desc, sizeof *desc);
 }
 
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
@@ -1736,8 +1746,7 @@ static int __load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
                                return ret;
                }
        } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
-               ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
-                               sizeof(base3), &ctxt->exception);
+               ret = linear_read_system(ctxt, desc_addr+8, &base3, 
sizeof(base3));
                if (ret != X86EMUL_CONTINUE)
                        return ret;
                if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
@@ -2050,11 +2059,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt 
*ctxt, int irq)
        eip_addr = dt.address + (irq << 2);
        cs_addr = dt.address + (irq << 2) + 2;
 
-       rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
+       rc = linear_read_system(ctxt, cs_addr, &cs, 2);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
+       rc = linear_read_system(ctxt, eip_addr, &eip, 2);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
@@ -2907,12 +2916,12 @@ static bool emulator_io_port_access_allowed(struct 
x86_emulate_ctxt *ctxt,
 #ifdef CONFIG_X86_64
        base |= ((u64)base3) << 32;
 #endif
-       r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
+       r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
        if (r != X86EMUL_CONTINUE)
                return false;
        if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
                return false;
-       r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
+       r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, 
true);
        if (r != X86EMUL_CONTINUE)
                return false;
        if ((perm >> bit_idx) & mask)
@@ -3041,35 +3050,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
                          u16 tss_selector, u16 old_tss_sel,
                          ulong old_tss_base, struct desc_struct *new_desc)
 {
-       const struct x86_emulate_ops *ops = ctxt->ops;
        struct tss_segment_16 tss_seg;
        int ret;
        u32 new_tss_base = get_desc_base(new_desc);
 
-       ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        save_state_to_tss16(ctxt, &tss_seg);
 
-       ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                            &ctxt->exception);
+       ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        if (old_tss_sel != 0xffff) {
                tss_seg.prev_task_link = old_tss_sel;
 
-               ret = ops->write_std(ctxt, new_tss_base,
-                                    &tss_seg.prev_task_link,
-                                    sizeof tss_seg.prev_task_link,
-                                    &ctxt->exception);
+               ret = linear_write_system(ctxt, new_tss_base,
+                                         &tss_seg.prev_task_link,
+                                         sizeof tss_seg.prev_task_link);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
@@ -3185,38 +3189,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
                          u16 tss_selector, u16 old_tss_sel,
                          ulong old_tss_base, struct desc_struct *new_desc)
 {
-       const struct x86_emulate_ops *ops = ctxt->ops;
        struct tss_segment_32 tss_seg;
        int ret;
        u32 new_tss_base = get_desc_base(new_desc);
        u32 eip_offset = offsetof(struct tss_segment_32, eip);
        u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
 
-       ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        save_state_to_tss32(ctxt, &tss_seg);
 
        /* Only GP registers and segment selectors are saved */
-       ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
-                            ldt_sel_offset - eip_offset, &ctxt->exception);
+       ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+                                 ldt_sel_offset - eip_offset);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
-                           &ctxt->exception);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        if (old_tss_sel != 0xffff) {
                tss_seg.prev_task_link = old_tss_sel;
 
-               ret = ops->write_std(ctxt, new_tss_base,
-                                    &tss_seg.prev_task_link,
-                                    sizeof tss_seg.prev_task_link,
-                                    &ctxt->exception);
+               ret = linear_write_system(ctxt, new_tss_base,
+                                         &tss_seg.prev_task_link,
+                                         sizeof tss_seg.prev_task_link);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
@@ -4177,7 +4177,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
                                maxphyaddr = eax & 0xff;
                        else
                                maxphyaddr = 36;
-                       rsvd = rsvd_bits(maxphyaddr, 62);
+                       rsvd = rsvd_bits(maxphyaddr, 63);
+                       if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
+                               rsvd &= ~CR3_PCID_INVD;
                }
 
                if (new_val & rsvd)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 11e2147c3824..e3b589e28264 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7344,8 +7344,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, 
gpa_t *vmpointer)
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
-                               sizeof(*vmpointer), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -7426,6 +7425,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
+       /* CPL=0 must be checked manually. */
+       if (vmx_get_cpl(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
+       }
+
        if (vmx->nested.vmxon) {
                nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
                return kvm_skip_emulated_instruction(vcpu);
@@ -7485,6 +7490,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
  */
 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
 {
+       if (vmx_get_cpl(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+       }
+
        if (!to_vmx(vcpu)->nested.vmxon) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 0;
@@ -7785,9 +7795,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                if (get_vmx_mem_address(vcpu, exit_qualification,
                                vmx_instruction_info, true, &gva))
                        return 1;
-               /* _system ok, as hardware has verified cpl=0 */
-               kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
-                            &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+               /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+               kvm_write_guest_virt_system(vcpu, gva, &field_value,
+                                           (is_long_mode(vcpu) ? 8 : 4), NULL);
        }
 
        nested_vmx_succeed(vcpu);
@@ -7825,8 +7835,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                if (get_vmx_mem_address(vcpu, exit_qualification,
                                vmx_instruction_info, false, &gva))
                        return 1;
-               if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
-                          &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+               if (kvm_read_guest_virt(vcpu, gva, &field_value,
+                                       (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
                        kvm_inject_page_fault(vcpu, &e);
                        return 1;
                }
@@ -7945,10 +7955,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, exit_qualification,
                        vmx_instruction_info, true, &vmcs_gva))
                return 1;
-       /* ok to use *_system, as hardware has verified cpl=0 */
-       if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
-                                (void *)&to_vmx(vcpu)->nested.current_vmptr,
-                                sizeof(u64), &e)) {
+       /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
+       if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
+                                       (void 
*)&to_vmx(vcpu)->nested.current_vmptr,
+                                       sizeof(u64), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -7995,8 +8005,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmx_instruction_info, false, &gva))
                return 1;
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
-                               sizeof(operand), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -8060,8 +8069,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmx_instruction_info, false, &gva))
                return 1;
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
-                               sizeof(operand), &e)) {
+       if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 999560ff12b5..cf08ac8a910c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -844,7 +844,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
        }
 
        if (is_long_mode(vcpu) &&
-           (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
+           (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
                return 1;
        else if (is_pae(vcpu) && is_paging(vcpu) &&
                   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
@@ -4703,11 +4703,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt 
*ctxt,
        return X86EMUL_CONTINUE;
 }
 
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
                               gva_t addr, void *val, unsigned int bytes,
                               struct x86_exception *exception)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
@@ -4715,12 +4714,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
 
-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
-                                     gva_t addr, void *val, unsigned int bytes,
-                                     struct x86_exception *exception)
+static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
+                            gva_t addr, void *val, unsigned int bytes,
+                            struct x86_exception *exception, bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+       u32 access = 0;
+
+       if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+               access |= PFERR_USER_MASK;
+
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 
exception);
 }
 
 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
@@ -4732,18 +4736,16 @@ static int kvm_read_guest_phys_system(struct 
x86_emulate_ctxt *ctxt,
        return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
 }
 
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
-                                      gva_t addr, void *val,
-                                      unsigned int bytes,
-                                      struct x86_exception *exception)
+static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int 
bytes,
+                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct x86_exception *exception)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
                gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
-                                                            PFERR_WRITE_MASK,
+                                                            access,
                                                             exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
@@ -4764,6 +4766,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
 out:
        return r;
 }
+
+static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void 
*val,
+                             unsigned int bytes, struct x86_exception 
*exception,
+                             bool system)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       u32 access = PFERR_WRITE_MASK;
+
+       if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+               access |= PFERR_USER_MASK;
+
+       return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+                                          access, exception);
+}
+
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+                               unsigned int bytes, struct x86_exception 
*exception)
+{
+       return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+                                          PFERR_WRITE_MASK, exception);
+}
 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
 
 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
@@ -5492,8 +5515,8 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt 
*ctxt, u64 smbase)
 static const struct x86_emulate_ops emulate_ops = {
        .read_gpr            = emulator_read_gpr,
        .write_gpr           = emulator_write_gpr,
-       .read_std            = kvm_read_guest_virt_system,
-       .write_std           = kvm_write_guest_virt_system,
+       .read_std            = emulator_read_std,
+       .write_std           = emulator_write_std,
        .read_phys           = kvm_read_guest_phys_system,
        .fetch               = kvm_fetch_guest_virt,
        .read_emulated       = emulator_read_emulated,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b91215d1fd80..949bd317ad5c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -213,11 +213,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, 
int irq, int inc_eip);
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 u64 get_kvmclock_ns(struct kvm *kvm);
 
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index acb7252c7e81..416079fdc8a6 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -328,7 +328,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, 
fmode_t mode,
        if (!rep.nr_zones)
                return -EINVAL;
 
-       zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
+       if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
+               return -ERANGE;
+
+       zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
+                       GFP_KERNEL | __GFP_ZERO);
        if (!zones)
                return -ENOMEM;
 
@@ -350,7 +354,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, 
fmode_t mode,
        }
 
  out:
-       kfree(zones);
+       kvfree(zones);
 
        return ret;
 }
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2188235be02d..526c1b0e7dcb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -760,15 +760,18 @@ struct aead_edesc {
  * @src_nents: number of segments in input s/w scatterlist
  * @dst_nents: number of segments in output s/w scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
  * @sec4_sg: pointer to h/w link table
  * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ *          and IV
  */
 struct ablkcipher_edesc {
        int src_nents;
        int dst_nents;
        dma_addr_t iv_dma;
+       enum dma_data_direction iv_dir;
        int sec4_sg_bytes;
        dma_addr_t sec4_sg_dma;
        struct sec4_sg_entry *sec4_sg;
@@ -778,7 +781,8 @@ struct ablkcipher_edesc {
 static void caam_unmap(struct device *dev, struct scatterlist *src,
                       struct scatterlist *dst, int src_nents,
                       int dst_nents,
-                      dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+                      dma_addr_t iv_dma, int ivsize,
+                      enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
                       int sec4_sg_bytes)
 {
        if (dst != src) {
@@ -790,7 +794,7 @@ static void caam_unmap(struct device *dev, struct 
scatterlist *src,
        }
 
        if (iv_dma)
-               dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+               dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
        if (sec4_sg_bytes)
                dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
                                 DMA_TO_DEVICE);
@@ -801,7 +805,7 @@ static void aead_unmap(struct device *dev,
                       struct aead_request *req)
 {
        caam_unmap(dev, req->src, req->dst,
-                  edesc->src_nents, edesc->dst_nents, 0, 0,
+                  edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -814,7 +818,7 @@ static void ablkcipher_unmap(struct device *dev,
 
        caam_unmap(dev, req->src, req->dst,
                   edesc->src_nents, edesc->dst_nents,
-                  edesc->iv_dma, ivsize,
+                  edesc->iv_dma, ivsize, edesc->iv_dir,
                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -903,6 +907,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
        scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
                                 ivsize, 0);
 
+       /* In case initial IV was generated, copy it in GIVCIPHER request */
+       if (edesc->iv_dir == DMA_FROM_DEVICE) {
+               u8 *iv;
+               struct skcipher_givcrypt_request *greq;
+
+               greq = container_of(req, struct skcipher_givcrypt_request,
+                                   creq);
+               iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+                    edesc->sec4_sg_bytes;
+               memcpy(greq->giv, iv, ivsize);
+       }
+
        kfree(edesc);
 
        ablkcipher_request_complete(req, err);
@@ -913,10 +929,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
 {
        struct ablkcipher_request *req = context;
        struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
-#ifdef DEBUG
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -934,14 +950,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
                     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 
        ablkcipher_unmap(jrdev, edesc, req);
-
-       /*
-        * The crypto API expects us to set the IV (req->info) to the last
-        * ciphertext block.
-        */
-       scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
-                                ivsize, 0);
-
        kfree(edesc);
 
        ablkcipher_request_complete(req, err);
@@ -1090,15 +1098,14 @@ static void init_authenc_job(struct aead_request *req,
  */
 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
                                struct ablkcipher_edesc *edesc,
-                               struct ablkcipher_request *req,
-                               bool iv_contig)
+                               struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        u32 *desc = edesc->hw_desc;
-       u32 out_options = 0, in_options;
-       dma_addr_t dst_dma, src_dma;
-       int len, sec4_sg_index = 0;
+       u32 out_options = 0;
+       dma_addr_t dst_dma;
+       int len;
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1114,30 +1121,18 @@ static void init_ablkcipher_job(u32 *sh_desc, 
dma_addr_t ptr,
        len = desc_len(sh_desc);
        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
-       if (iv_contig) {
-               src_dma = edesc->iv_dma;
-               in_options = 0;
-       } else {
-               src_dma = edesc->sec4_sg_dma;
-               sec4_sg_index += edesc->src_nents + 1;
-               in_options = LDST_SGF;
-       }
-       append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+       append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+                         LDST_SGF);
 
        if (likely(req->src == req->dst)) {
-               if (edesc->src_nents == 1 && iv_contig) {
-                       dst_dma = sg_dma_address(req->src);
-               } else {
-                       dst_dma = edesc->sec4_sg_dma +
-                               sizeof(struct sec4_sg_entry);
-                       out_options = LDST_SGF;
-               }
+               dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
+               out_options = LDST_SGF;
        } else {
                if (edesc->dst_nents == 1) {
                        dst_dma = sg_dma_address(req->dst);
                } else {
-                       dst_dma = edesc->sec4_sg_dma +
-                               sec4_sg_index * sizeof(struct sec4_sg_entry);
+                       dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
+                                 sizeof(struct sec4_sg_entry);
                        out_options = LDST_SGF;
                }
        }
@@ -1149,13 +1144,12 @@ static void init_ablkcipher_job(u32 *sh_desc, 
dma_addr_t ptr,
  */
 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
                                    struct ablkcipher_edesc *edesc,
-                                   struct ablkcipher_request *req,
-                                   bool iv_contig)
+                                   struct ablkcipher_request *req)
 {
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        u32 *desc = edesc->hw_desc;
-       u32 out_options, in_options;
+       u32 in_options;
        dma_addr_t dst_dma, src_dma;
        int len, sec4_sg_index = 0;
 
@@ -1181,15 +1175,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, 
dma_addr_t ptr,
        }
        append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
 
-       if (iv_contig) {
-               dst_dma = edesc->iv_dma;
-               out_options = 0;
-       } else {
-               dst_dma = edesc->sec4_sg_dma +
-                         sec4_sg_index * sizeof(struct sec4_sg_entry);
-               out_options = LDST_SGF;
-       }
-       append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+       dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+                 sizeof(struct sec4_sg_entry);
+       append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
 }
 
 /*
@@ -1278,7 +1266,7 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                        GFP_DMA | flags);
        if (!edesc) {
                caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, 0, 0);
+                          0, DMA_NONE, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1482,8 +1470,7 @@ static int aead_decrypt(struct aead_request *req)
  * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct 
ablkcipher_request
-                                                      *req, int desc_bytes,
-                                                      bool *iv_contig_out)
+                                                      *req, int desc_bytes)
 {
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
@@ -1492,8 +1479,8 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
                       GFP_KERNEL : GFP_ATOMIC;
        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
        struct ablkcipher_edesc *edesc;
-       dma_addr_t iv_dma = 0;
-       bool in_contig;
+       dma_addr_t iv_dma;
+       u8 *iv;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
 
@@ -1537,33 +1524,20 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
                }
        }
 
-       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, iv_dma)) {
-               dev_err(jrdev, "unable to map IV\n");
-               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, 0, 0);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (mapped_src_nents == 1 &&
-           iv_dma + ivsize == sg_dma_address(req->src)) {
-               in_contig = true;
-               sec4_sg_ents = 0;
-       } else {
-               in_contig = false;
-               sec4_sg_ents = 1 + mapped_src_nents;
-       }
+       sec4_sg_ents = 1 + mapped_src_nents;
        dst_sg_idx = sec4_sg_ents;
        sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
        sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
 
-       /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+       /*
+        * allocate space for base edesc and hw desc commands, link tables, IV
+        */
+       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
                        GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
-               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, 0, 0);
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, DMA_NONE, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1572,13 +1546,24 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
+       edesc->iv_dir = DMA_TO_DEVICE;
 
-       if (!in_contig) {
-               dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
-               sg_to_sec4_sg_last(req->src, mapped_src_nents,
-                                  edesc->sec4_sg + 1, 0);
+       /* Make sure IV is located in a DMAable area */
+       iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+       memcpy(iv, req->info, ivsize);
+
+       iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, DMA_NONE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
        }
 
+       dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+       sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+
        if (mapped_dst_nents > 1) {
                sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
                                   edesc->sec4_sg + dst_sg_idx, 0);
@@ -1589,7 +1574,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
        if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
                dev_err(jrdev, "unable to map S/G table\n");
                caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, 0, 0);
+                          iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
                kfree(edesc);
                return ERR_PTR(-ENOMEM);
        }
@@ -1602,7 +1587,6 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
                       sec4_sg_bytes, 1);
 #endif
 
-       *iv_contig_out = in_contig;
        return edesc;
 }
 
@@ -1612,19 +1596,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request 
*req)
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
-       bool iv_contig;
        u32 *desc;
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
-                                      CAAM_CMD_SZ, &iv_contig);
+       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
        /* Create and submit job descriptor*/
-       init_ablkcipher_job(ctx->sh_desc_enc,
-               ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+       init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -1648,20 +1629,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request 
*req)
        struct ablkcipher_edesc *edesc;
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        struct device *jrdev = ctx->jrdev;
-       bool iv_contig;
        u32 *desc;
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
-                                      CAAM_CMD_SZ, &iv_contig);
+       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block.
+        */
+       scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+                                ivsize, 0);
+
        /* Create and submit job descriptor*/
-       init_ablkcipher_job(ctx->sh_desc_dec,
-               ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+       init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
        desc = edesc->hw_desc;
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
@@ -1686,8 +1672,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request 
*req)
  */
 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
                                struct skcipher_givcrypt_request *greq,
-                               int desc_bytes,
-                               bool *iv_contig_out)
+                               int desc_bytes)
 {
        struct ablkcipher_request *req = &greq->creq;
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
@@ -1697,8 +1682,8 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
                       GFP_KERNEL : GFP_ATOMIC;
        int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
        struct ablkcipher_edesc *edesc;
-       dma_addr_t iv_dma = 0;
-       bool out_contig;
+       dma_addr_t iv_dma;
+       u8 *iv;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
 
@@ -1743,36 +1728,20 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
                }
        }
 
-       /*
-        * Check if iv can be contiguous with source and destination.
-        * If so, include it. If not, create scatterlist.
-        */
-       iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, iv_dma)) {
-               dev_err(jrdev, "unable to map IV\n");
-               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, 0, 0);
-               return ERR_PTR(-ENOMEM);
-       }
-
        sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
        dst_sg_idx = sec4_sg_ents;
-       if (mapped_dst_nents == 1 &&
-           iv_dma + ivsize == sg_dma_address(req->dst)) {
-               out_contig = true;
-       } else {
-               out_contig = false;
-               sec4_sg_ents += 1 + mapped_dst_nents;
-       }
+       sec4_sg_ents += 1 + mapped_dst_nents;
 
-       /* allocate space for base edesc and hw desc commands, link tables */
+       /*
+        * allocate space for base edesc and hw desc commands, link tables, IV
+        */
        sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
-       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
                        GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
-               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, 0, 0);
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, DMA_NONE, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1781,24 +1750,33 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
+       edesc->iv_dir = DMA_FROM_DEVICE;
+
+       /* Make sure IV is located in a DMAable area */
+       iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+       iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, DMA_NONE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
 
        if (mapped_src_nents > 1)
                sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
                                   0);
 
-       if (!out_contig) {
-               dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
-                                  iv_dma, ivsize, 0);
-               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
-                                  edesc->sec4_sg + dst_sg_idx + 1, 0);
-       }
+       dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
+       sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
+                          dst_sg_idx + 1, 0);
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
                dev_err(jrdev, "unable to map S/G table\n");
                caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, 0, 0);
+                          iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
                kfree(edesc);
                return ERR_PTR(-ENOMEM);
        }
@@ -1811,7 +1789,6 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
                       sec4_sg_bytes, 1);
 #endif
 
-       *iv_contig_out = out_contig;
        return edesc;
 }
 
@@ -1822,19 +1799,17 @@ static int ablkcipher_givencrypt(struct 
skcipher_givcrypt_request *creq)
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
-       bool iv_contig = false;
        u32 *desc;
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
-                                      CAAM_CMD_SZ, &iv_contig);
+       edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
        /* Create and submit job descriptor*/
        init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
-                               edesc, req, iv_contig);
+                               edesc, req);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4aecc9435f69..03e017233051 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -423,7 +423,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
  * @assoclen: associated data length, in CAAM endianness
  * @assoclen_dma: bus physical mapped address of req->assoclen
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct aead_edesc {
        int src_nents;
@@ -434,9 +434,6 @@ struct aead_edesc {
        unsigned int assoclen;
        dma_addr_t assoclen_dma;
        struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_AEAD_SG                                            \
-       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
-        sizeof(struct qm_sg_entry))
        struct qm_sg_entry sgt[0];
 };
 
@@ -448,7 +445,7 @@ struct aead_edesc {
  * @qm_sg_bytes: length of dma mapped h/w link table
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct ablkcipher_edesc {
        int src_nents;
@@ -457,9 +454,6 @@ struct ablkcipher_edesc {
        int qm_sg_bytes;
        dma_addr_t qm_sg_dma;
        struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
-       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-        sizeof(struct qm_sg_entry))
        struct qm_sg_entry sgt[0];
 };
 
@@ -671,17 +665,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
                }
        }
 
-       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
                ivsize = crypto_aead_ivsize(aead);
-               iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-               if (dma_mapping_error(qidev, iv_dma)) {
-                       dev_err(qidev, "unable to map IV\n");
-                       caam_unmap(qidev, req->src, req->dst, src_nents,
-                                  dst_nents, 0, 0, op_type, 0, 0);
-                       qi_cache_free(edesc);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
 
        /*
         * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
@@ -689,16 +674,33 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
         */
        qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
                     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-       if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-               dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-                       qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, op_type, 0, 0);
+       sg_table = &edesc->sgt[0];
+       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+       if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+                    CAAM_QI_MEMCACHE_SIZE)) {
+               dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+                       qm_sg_ents, ivsize);
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
                qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
-       sg_table = &edesc->sgt[0];
-       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+       if (ivsize) {
+               u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+               /* Make sure IV is located in a DMAable area */
+               memcpy(iv, req->iv, ivsize);
+
+               iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+               if (dma_mapping_error(qidev, iv_dma)) {
+                       dev_err(qidev, "unable to map IV\n");
+                       caam_unmap(qidev, req->src, req->dst, src_nents,
+                                  dst_nents, 0, 0, 0, 0, 0);
+                       qi_cache_free(edesc);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
 
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
@@ -835,15 +837,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, 
u32 status)
 #endif
 
        ablkcipher_unmap(qidev, edesc, req);
-       qi_cache_free(edesc);
+
+       /* In case initial IV was generated, copy it in GIVCIPHER request */
+       if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+               u8 *iv;
+               struct skcipher_givcrypt_request *greq;
+
+               greq = container_of(req, struct skcipher_givcrypt_request,
+                                   creq);
+               iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+               memcpy(greq->giv, iv, ivsize);
+       }
 
        /*
         * The crypto API expects us to set the IV (req->info) to the last
         * ciphertext block. This is used e.g. by the CTS mode.
         */
-       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
-                                ivsize, 0);
+       if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+               scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+                                        ivsize, ivsize, 0);
 
+       qi_cache_free(edesc);
        ablkcipher_request_complete(req, status);
 }
 
@@ -858,9 +872,9 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
        struct ablkcipher_edesc *edesc;
        dma_addr_t iv_dma;
-       bool in_contig;
+       u8 *iv;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-       int dst_sg_idx, qm_sg_ents;
+       int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
        struct qm_sg_entry *sg_table, *fd_sgt;
        struct caam_drv_ctx *drv_ctx;
        enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
@@ -907,55 +921,53 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
                }
        }
 
-       iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
-       if (dma_mapping_error(qidev, iv_dma)) {
-               dev_err(qidev, "unable to map IV\n");
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, 0, 0, 0);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (mapped_src_nents == 1 &&
-           iv_dma + ivsize == sg_dma_address(req->src)) {
-               in_contig = true;
-               qm_sg_ents = 0;
-       } else {
-               in_contig = false;
-               qm_sg_ents = 1 + mapped_src_nents;
-       }
+       qm_sg_ents = 1 + mapped_src_nents;
        dst_sg_idx = qm_sg_ents;
 
        qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-               dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, op_type, 0, 0);
+       qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+       if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+                    ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+               dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+                       qm_sg_ents, ivsize);
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
-       /* allocate space for base edesc and link tables */
+       /* allocate space for base edesc, link tables and IV */
        edesc = qi_cache_alloc(GFP_DMA | flags);
        if (unlikely(!edesc)) {
                dev_err(qidev, "could not allocate extended descriptor\n");
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, op_type, 0, 0);
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Make sure IV is located in a DMAable area */
+       sg_table = &edesc->sgt[0];
+       iv = (u8 *)(sg_table + qm_sg_ents);
+       memcpy(iv, req->info, ivsize);
+
+       iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(qidev, iv_dma)) {
+               dev_err(qidev, "unable to map IV\n");
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
+               qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->iv_dma = iv_dma;
-       sg_table = &edesc->sgt[0];
-       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+       edesc->qm_sg_bytes = qm_sg_bytes;
        edesc->drv_req.app_ctx = req;
        edesc->drv_req.cbk = ablkcipher_done;
        edesc->drv_req.drv_ctx = drv_ctx;
 
-       if (!in_contig) {
-               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
-       }
+       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
 
        if (mapped_dst_nents > 1)
                sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
@@ -973,20 +985,12 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
 
        fd_sgt = &edesc->drv_req.fd_sgt[0];
 
-       if (!in_contig)
-               dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
-                                         ivsize + req->nbytes, 0);
-       else
-               dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
-                                     0);
+       dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+                                 ivsize + req->nbytes, 0);
 
        if (req->src == req->dst) {
-               if (!in_contig)
-                       dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
-                                            sizeof(*sg_table), req->nbytes, 0);
-               else
-                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-                                        req->nbytes, 0);
+               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+                                    sizeof(*sg_table), req->nbytes, 0);
        } else if (mapped_dst_nents > 1) {
                dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
                                     sizeof(*sg_table), req->nbytes, 0);
@@ -1010,10 +1014,10 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
        int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
        struct ablkcipher_edesc *edesc;
        dma_addr_t iv_dma;
-       bool out_contig;
+       u8 *iv;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        struct qm_sg_entry *sg_table, *fd_sgt;
-       int dst_sg_idx, qm_sg_ents;
+       int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
        struct caam_drv_ctx *drv_ctx;
 
        drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
@@ -1061,46 +1065,45 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
                mapped_dst_nents = src_nents;
        }
 
-       iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
-       if (dma_mapping_error(qidev, iv_dma)) {
-               dev_err(qidev, "unable to map IV\n");
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-                          0, 0, 0, 0);
-               return ERR_PTR(-ENOMEM);
-       }
-
        qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
        dst_sg_idx = qm_sg_ents;
-       if (mapped_dst_nents == 1 &&
-           iv_dma + ivsize == sg_dma_address(req->dst)) {
-               out_contig = true;
-       } else {
-               out_contig = false;
-               qm_sg_ents += 1 + mapped_dst_nents;
-       }
 
-       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-               dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
+       qm_sg_ents += 1 + mapped_dst_nents;
+       qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+       if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+                    ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+               dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+                       qm_sg_ents, ivsize);
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
-       /* allocate space for base edesc and link tables */
+       /* allocate space for base edesc, link tables and IV */
        edesc = qi_cache_alloc(GFP_DMA | flags);
        if (!edesc) {
                dev_err(qidev, "could not allocate extended descriptor\n");
-               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Make sure IV is located in a DMAable area */
+       sg_table = &edesc->sgt[0];
+       iv = (u8 *)(sg_table + qm_sg_ents);
+       iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+       if (dma_mapping_error(qidev, iv_dma)) {
+               dev_err(qidev, "unable to map IV\n");
+               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0, 0);
+               qi_cache_free(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->iv_dma = iv_dma;
-       sg_table = &edesc->sgt[0];
-       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+       edesc->qm_sg_bytes = qm_sg_bytes;
        edesc->drv_req.app_ctx = req;
        edesc->drv_req.cbk = ablkcipher_done;
        edesc->drv_req.drv_ctx = drv_ctx;
@@ -1108,11 +1111,9 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
        if (mapped_src_nents > 1)
                sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
 
-       if (!out_contig) {
-               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
-               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-                                dst_sg_idx + 1, 0);
-       }
+       dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+       sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+                        0);
 
        edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
                                          DMA_TO_DEVICE);
@@ -1133,13 +1134,8 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
                dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
                                 req->nbytes, 0);
 
-       if (!out_contig)
-               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
-                                    sizeof(*sg_table), ivsize + req->nbytes,
-                                    0);
-       else
-               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
-                                ivsize + req->nbytes, 0);
+       dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+                            sizeof(*sg_table), ivsize + req->nbytes, 0);
 
        return edesc;
 }
@@ -1149,6 +1145,7 @@ static inline int ablkcipher_crypt(struct 
ablkcipher_request *req, bool encrypt)
        struct ablkcipher_edesc *edesc;
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        int ret;
 
        if (unlikely(caam_congested))
@@ -1159,6 +1156,14 @@ static inline int ablkcipher_crypt(struct 
ablkcipher_request *req, bool encrypt)
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block.
+        */
+       if (!encrypt)
+               scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+                                        ivsize, ivsize, 0);
+
        ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
        if (!ret) {
                ret = -EINPROGRESS;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 7a897209f181..7ff4a25440ac 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct 
rsa_edesc *edesc,
        struct caam_rsa_key *key = &ctx->key;
        struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
        size_t p_sz = key->p_sz;
-       size_t q_sz = key->p_sz;
+       size_t q_sz = key->q_sz;
 
        dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct 
rsa_edesc *edesc,
        struct caam_rsa_key *key = &ctx->key;
        struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
        size_t p_sz = key->p_sz;
-       size_t q_sz = key->p_sz;
+       size_t q_sz = key->q_sz;
 
        dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 
*desc, u32 err,
        akcipher_request_complete(req, err);
 }
 
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+                                       unsigned int nbytes,
+                                       unsigned int flags)
+{
+       struct sg_mapping_iter miter;
+       int lzeros, ents;
+       unsigned int len;
+       unsigned int tbytes = nbytes;
+       const u8 *buff;
+
+       ents = sg_nents_for_len(sgl, nbytes);
+       if (ents < 0)
+               return ents;
+
+       sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
+
+       lzeros = 0;
+       len = 0;
+       while (nbytes > 0) {
+               while (len && !*buff) {
+                       lzeros++;
+                       len--;
+                       buff++;
+               }
+
+               if (len && *buff)
+                       break;
+
+               sg_miter_next(&miter);
+               buff = miter.addr;
+               len = miter.length;
+
+               nbytes -= lzeros;
+               lzeros = 0;
+       }
+
+       miter.consumed = lzeros;
+       sg_miter_stop(&miter);
+       nbytes -= lzeros;
+
+       return tbytes - nbytes;
+}
+
 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
                                         size_t desclen)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
        struct device *dev = ctx->dev;
+       struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
        struct rsa_edesc *edesc;
        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                       GFP_KERNEL : GFP_ATOMIC;
+       int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
        int sgc;
        int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
        int src_nents, dst_nents;
+       int lzeros;
+
+       lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+       if (lzeros < 0)
+               return ERR_PTR(lzeros);
+
+       req->src_len -= lzeros;
+       req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
 
        src_nents = sg_nents_for_len(req->src, req->src_len);
        dst_nents = sg_nents_for_len(req->dst, req->dst_len);
@@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
        struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
        int sec4_sg_index = 0;
        size_t p_sz = key->p_sz;
-       size_t q_sz = key->p_sz;
+       size_t q_sz = key->q_sz;
 
        pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, pdb->d_dma)) {
@@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
        struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
        int sec4_sg_index = 0;
        size_t p_sz = key->p_sz;
-       size_t q_sz = key->p_sz;
+       size_t q_sz = key->q_sz;
 
        pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, pdb->p_dma)) {
@@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
        .max_size = caam_rsa_max_size,
        .init = caam_rsa_init_tfm,
        .exit = caam_rsa_exit_tfm,
+       .reqsize = sizeof(struct caam_rsa_req_ctx),
        .base = {
                .cra_name = "rsa",
                .cra_driver_name = "rsa-caam",
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index fd145c46eae1..82645bcf8b27 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -95,6 +95,14 @@ struct caam_rsa_ctx {
        struct device *dev;
 };
 
+/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+       struct scatterlist src[2];
+};
+
 /**
  * rsa_edesc - s/w-extended rsa descriptor
  * @src_nents     : number of segments in input scatterlist
diff --git a/drivers/crypto/cavium/zip/common.h 
b/drivers/crypto/cavium/zip/common.h
index dc451e0a43c5..58fb3ed6e644 100644
--- a/drivers/crypto/cavium/zip/common.h
+++ b/drivers/crypto/cavium/zip/common.h
@@ -46,8 +46,10 @@
 #ifndef __COMMON_H__
 #define __COMMON_H__
 
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -149,6 +151,25 @@ struct zip_operation {
        u32   sizeofzops;
 };
 
+static inline int zip_poll_result(union zip_zres_s *result)
+{
+       int retries = 1000;
+
+       while (!result->s.compcode) {
+               if (!--retries) {
+                       pr_err("ZIP ERR: request timed out");
+                       return -ETIMEDOUT;
+               }
+               udelay(10);
+               /*
+                * Force re-reading of compcode which is updated
+                * by the ZIP coprocessor.
+                */
+               rmb();
+       }
+       return 0;
+}
+
 /* error messages */
 #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
                              fmt "\n", __func__, __LINE__, ## args)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c 
b/drivers/crypto/cavium/zip/zip_crypto.c
index 8df4d26cf9d4..b92b6e7e100f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
                 struct zip_kernel_ctx *zip_ctx)
 {
        struct zip_operation  *zip_ops   = NULL;
-       struct zip_state      zip_state;
+       struct zip_state      *zip_state;
        struct zip_device     *zip = NULL;
        int ret;
 
@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
        if (!zip)
                return -ENODEV;
 
-       memset(&zip_state, 0, sizeof(struct zip_state));
+       zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+       if (!zip_state)
+               return -ENOMEM;
+
        zip_ops = &zip_ctx->zip_comp;
 
        zip_ops->input_len  = slen;
        zip_ops->output_len = *dlen;
        memcpy(zip_ops->input, src, slen);
 
-       ret = zip_deflate(zip_ops, &zip_state, zip);
+       ret = zip_deflate(zip_ops, zip_state, zip);
 
        if (!ret) {
                *dlen = zip_ops->output_len;
                memcpy(dst, zip_ops->output, *dlen);
        }
-
+       kfree(zip_state);
        return ret;
 }
 
@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
                   struct zip_kernel_ctx *zip_ctx)
 {
        struct zip_operation  *zip_ops   = NULL;
-       struct zip_state      zip_state;
+       struct zip_state      *zip_state;
        struct zip_device     *zip = NULL;
        int ret;
 
@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
        if (!zip)
                return -ENODEV;
 
-       memset(&zip_state, 0, sizeof(struct zip_state));
+       zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+       if (!zip_state)
+               return -ENOMEM;
+
        zip_ops = &zip_ctx->zip_decomp;
        memcpy(zip_ops->input, src, slen);
 
@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
        zip_ops->input_len  = slen;
        zip_ops->output_len = *dlen;
 
-       ret = zip_inflate(zip_ops, &zip_state, zip);
+       ret = zip_inflate(zip_ops, zip_state, zip);
 
        if (!ret) {
                *dlen = zip_ops->output_len;
                memcpy(dst, zip_ops->output, *dlen);
        }
-
+       kfree(zip_state);
        return ret;
 }
 
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c 
b/drivers/crypto/cavium/zip/zip_deflate.c
index 9a944b8c1e29..d7133f857d67 100644
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct 
zip_state *s,
        /* Stats update for compression requests submitted */
        atomic64_inc(&zip_dev->stats.comp_req_submit);
 
-       while (!result_ptr->s.compcode)
-               continue;
+       /* Wait for completion or error */
+       zip_poll_result(result_ptr);
 
        /* Stats update for compression requests completed */
        atomic64_inc(&zip_dev->stats.comp_req_complete);
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c 
b/drivers/crypto/cavium/zip/zip_inflate.c
index 50cbdd83dbf2..7e0d73e2f89e 100644
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct 
zip_state *s,
        /* Decompression requests submitted stats update */
        atomic64_inc(&zip_dev->stats.decomp_req_submit);
 
-       while (!result_ptr->s.compcode)
-               continue;
+       /* Wait for completion or error */
+       zip_poll_result(result_ptr);
 
        /* Decompression requests completed stats update */
        atomic64_inc(&zip_dev->stats.decomp_req_complete);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c 
b/drivers/crypto/chelsio/chcr_ipsec.c
index db1e241104ed..1a2a51f2262a 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
                                struct net_device *dev,
                                void *pos)
 {
+       struct cpl_tx_pkt_core *cpl;
+       struct sge_eth_txq *q;
        struct adapter *adap;
        struct port_info *pi;
-       struct sge_eth_txq *q;
-       struct cpl_tx_pkt_core *cpl;
-       u64 cntrl = 0;
        u32 ctrl0, qidx;
+       u64 cntrl = 0;
+       int left;
 
        pi = netdev_priv(dev);
        adap = pi->adapter;
        qidx = skb->queue_mapping;
        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 
+       left = (void *)q->q.stat - pos;
+       if (!left)
+               pos = q->q.desc;
+
        cpl = (struct cpl_tx_pkt_core *)pos;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -383,18 +388,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
                                void *pos,
                                struct ipsec_sa_entry *sa_entry)
 {
-       struct adapter *adap;
-       struct port_info *pi;
-       struct sge_eth_txq *q;
-       unsigned int len, qidx;
        struct _key_ctx *key_ctx;
        int left, eoq, key_len;
+       struct sge_eth_txq *q;
+       struct adapter *adap;
+       struct port_info *pi;
+       unsigned int qidx;
 
        pi = netdev_priv(dev);
        adap = pi->adapter;
        qidx = skb->queue_mapping;
        q = &adap->sge.ethtxq[qidx + pi->first_qset];
-       len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
        key_len = sa_entry->kctx_len;
 
        /* end of queue, reset pos to start of queue */
@@ -412,19 +416,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
        pos += sizeof(struct _key_ctx);
        left -= sizeof(struct _key_ctx);
 
-       if (likely(len <= left)) {
+       if (likely(key_len <= left)) {
                memcpy(key_ctx->key, sa_entry->key, key_len);
                pos += key_len;
        } else {
-               if (key_len <= left) {
-                       memcpy(pos, sa_entry->key, key_len);
-                       pos += key_len;
-               } else {
-                       memcpy(pos, sa_entry->key, left);
-                       memcpy(q->q.desc, sa_entry->key + left,
-                              key_len - left);
-                       pos = (u8 *)q->q.desc + (key_len - left);
-               }
+               memcpy(pos, sa_entry->key, left);
+               memcpy(q->q.desc, sa_entry->key + left,
+                      key_len - left);
+               pos = (u8 *)q->q.desc + (key_len - left);
        }
        /* Copy CPL TX PKT XT */
        pos = copy_cpltx_pktxt(skb, dev, pos);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 86b89ace836f..07235d0be29c 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1082,7 +1082,7 @@ static void omap_sham_finish_req(struct ahash_request 
*req, int err)
 
        if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
                free_pages((unsigned long)sg_virt(ctx->sg),
-                          get_order(ctx->sg->length));
+                          get_order(ctx->sg->length + ctx->bufcnt));
 
        if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
                kfree(ctx->sg);
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 96072b9b55c4..d7316f7a3a69 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
 
        crypto_cipher_set_flags(fallback,
                                crypto_cipher_get_flags((struct
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 7394d35d5936..5285ece4f33a 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-               crypto_skcipher_driver_name(fallback));
-
 
        crypto_skcipher_set_flags(
                fallback,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index fc60d00a2e84..cd777c75291d 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-               crypto_skcipher_driver_name(fallback));
 
        crypto_skcipher_set_flags(
                fallback,
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8cd6e62e4c90..8bd9aff0f55f 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
                        alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-               crypto_skcipher_driver_name(fallback));
 
        crypto_skcipher_set_flags(
                fallback,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 27a94a119009..1c4b5b889fba 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
                       alg, PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
-       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-              crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
 
        crypto_shash_set_flags(fallback,
                               crypto_shash_get_flags((struct crypto_shash
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 9d2688f3f961..cba80b0df188 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -123,6 +123,7 @@ static const struct xpad_device {
        u8 mapping;
        u8 xtype;
 } xpad_device[] = {
+       { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -408,6 +409,7 @@ static const signed short xpad_abs_triggers[] = {
 
 static const struct usb_device_id xpad_table[] = {
        { USB_INTERFACE_INFO('X', 'B', 0) },    /* X-Box USB-IF not approved 
class */
+       XPAD_XBOX360_VENDOR(0x0079),            /* GPD Win 2 Controller */
        XPAD_XBOX360_VENDOR(0x044f),            /* Thrustmaster X-Box 360 
controllers */
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 
controllers */
        XPAD_XBOXONE_VENDOR(0x045e),            /* Microsoft X-Box One 
controllers */
diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
index 75e757520ef0..93967c8139e7 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1262,6 +1262,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060B", 0 },
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
+       { "ELAN0612", 0 },
        { "ELAN1000", 0 },
        { }
 };
diff --git a/drivers/input/touchscreen/goodix.c 
b/drivers/input/touchscreen/goodix.c
index 9736c83dd418..f2d9c2c41885 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -933,6 +933,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id goodix_acpi_match[] = {
        { "GDIX1001", 0 },
+       { "GDIX1002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 9047c0a529b2..efd733472a35 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
                }
        }
 
-       if (b->batch_page) {
-               vunmap(b->batch_page);
-               b->batch_page = NULL;
-       }
-
-       if (b->page) {
-               __free_page(b->page);
-               b->page = NULL;
-       }
+       /* Clearing the batch_page unconditionally has no adverse effect */
+       free_page((unsigned long)b->batch_page);
+       b->batch_page = NULL;
 }
 
 /*
@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = 
{
 
 static bool vmballoon_init_batching(struct vmballoon *b)
 {
-       b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
-       if (!b->page)
-               return false;
+       struct page *page;
 
-       b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
-       if (!b->batch_page) {
-               __free_page(b->page);
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page)
                return false;
-       }
 
+       b->batch_page = page_address(page);
        return true;
 }
 
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index e153e8b64bb8..d5553c47014f 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -62,6 +62,9 @@ struct pn533_usb_phy {
        struct urb *out_urb;
        struct urb *in_urb;
 
+       struct urb *ack_urb;
+       u8 *ack_buffer;
+
        struct pn533 *priv;
 };
 
@@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t 
flags)
        struct pn533_usb_phy *phy = dev->phy;
        static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
        /* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
-       int rc;
 
-       phy->out_urb->transfer_buffer = (u8 *)ack;
-       phy->out_urb->transfer_buffer_length = sizeof(ack);
-       rc = usb_submit_urb(phy->out_urb, flags);
+       if (!phy->ack_buffer) {
+               phy->ack_buffer = kmemdup(ack, sizeof(ack), flags);
+               if (!phy->ack_buffer)
+                       return -ENOMEM;
+       }
 
-       return rc;
+       phy->ack_urb->transfer_buffer = phy->ack_buffer;
+       phy->ack_urb->transfer_buffer_length = sizeof(ack);
+       return usb_submit_urb(phy->ack_urb, flags);
 }
 
 static int pn533_usb_send_frame(struct pn533 *dev,
@@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy 
*phy)
        /* Power on th reader (CCID cmd) */
        u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
                      0, 0, 0, 0, 0, 0, 3, 0, 0};
+       char *buffer;
+       int transferred;
        int rc;
        void *cntx;
        struct pn533_acr122_poweron_rdr_arg arg;
 
        dev_dbg(&phy->udev->dev, "%s\n", __func__);
 
+       buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
        init_completion(&arg.done);
        cntx = phy->in_urb->context;  /* backup context */
 
        phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
        phy->in_urb->context = &arg;
 
-       phy->out_urb->transfer_buffer = cmd;
-       phy->out_urb->transfer_buffer_length = sizeof(cmd);
-
        print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
                       cmd, sizeof(cmd), false);
 
-       rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
-       if (rc) {
+       rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
+                         &transferred, 0);
+       kfree(buffer);
+       if (rc || (transferred != sizeof(cmd))) {
                nfc_err(&phy->udev->dev,
                        "Reader power on cmd error %d\n", rc);
                return rc;
@@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
        phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+       phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL);
 
-       if (!phy->in_urb || !phy->out_urb)
+       if (!phy->in_urb || !phy->out_urb || !phy->ack_urb)
                goto error;
 
        usb_fill_bulk_urb(phy->in_urb, phy->udev,
@@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
        usb_fill_bulk_urb(phy->out_urb, phy->udev,
                          usb_sndbulkpipe(phy->udev, out_endpoint),
                          NULL, 0, pn533_send_complete, phy);
-
+       usb_fill_bulk_urb(phy->ack_urb, phy->udev,
+                         usb_sndbulkpipe(phy->udev, out_endpoint),
+                         NULL, 0, pn533_send_complete, phy);
 
        switch (id->driver_info) {
        case PN533_DEVICE_STD:
@@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface,
 error:
        usb_free_urb(phy->in_urb);
        usb_free_urb(phy->out_urb);
+       usb_free_urb(phy->ack_urb);
        usb_put_dev(phy->udev);
        kfree(in_buf);
 
@@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface 
*interface)
 
        usb_kill_urb(phy->in_urb);
        usb_kill_urb(phy->out_urb);
+       usb_kill_urb(phy->ack_urb);
 
        kfree(phy->in_urb->transfer_buffer);
        usb_free_urb(phy->in_urb);
        usb_free_urb(phy->out_urb);
+       usb_free_urb(phy->ack_urb);
+       kfree(phy->ack_buffer);
 
        nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
 }
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c 
b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 6c575244c0fb..af9b7005a2ba 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -178,6 +178,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy 
*qphy)
        struct device *dev = &qphy->phy->dev;
        u8 *val;
 
+       /* efuse register is optional */
+       if (!qphy->cell)
+               return;
+
        /*
         * Read efuse register having TUNE2 parameter's high nibble.
         * If efuse register shows value as 0x0, or if we fail to find
diff --git a/drivers/staging/android/ion/ion.c 
b/drivers/staging/android/ion/ion.c
index 57e0d8035b2e..b68700e58432 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -119,8 +119,11 @@ static struct ion_buffer *ion_buffer_create(struct 
ion_heap *heap,
 
 void ion_buffer_destroy(struct ion_buffer *buffer)
 {
-       if (WARN_ON(buffer->kmap_cnt > 0))
+       if (buffer->kmap_cnt > 0) {
+               pr_warn_once("%s: buffer still mapped in the kernel\n",
+                            __func__);
                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+       }
        buffer->heap->ops->free(buffer);
        kfree(buffer);
 }
diff --git a/drivers/tty/serial/8250/8250_omap.c 
b/drivers/tty/serial/8250/8250_omap.c
index 57f6eba47f44..425d393658ed 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1101,13 +1101,14 @@ static int omap8250_no_handle_irq(struct uart_port 
*port)
        return 0;
 }
 
+static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
 static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
 static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
 
 static const struct of_device_id omap8250_dt_ids[] = {
        { .compatible = "ti,omap2-uart" },
        { .compatible = "ti,omap3-uart" },
-       { .compatible = "ti,omap4-uart" },
+       { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
        { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
        { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
        { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
@@ -1344,6 +1345,19 @@ static int omap8250_soft_reset(struct device *dev)
        int sysc;
        int syss;
 
+       /*
+        * At least on omap4, unused uarts may not idle after reset without
+        * a basic scr dma configuration even with no dma in use. The
+        * module clkctrl status bits will be 1 instead of 3 blocking idle
+        * for the whole clockdomain. The softreset below will clear scr,
+        * and we restore it on resume so this is safe to do on all SoCs
+        * needing omap8250_soft_reset() quirk. Do it in two writes as
+        * recommended in the comment for omap8250_update_scr().
+        */
+       serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
+       serial_out(up, UART_OMAP_SCR,
+                  OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
+
        sysc = serial_in(up, UART_OMAP_SYSC);
 
        /* softreset the UART */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4b40a5b449ee..ebd33c0232e6 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1727,10 +1727,26 @@ static int pl011_allocate_irq(struct uart_amba_port 
*uap)
  */
 static void pl011_enable_interrupts(struct uart_amba_port *uap)
 {
+       unsigned int i;
+
        spin_lock_irq(&uap->port.lock);
 
        /* Clear out any spuriously appearing RX interrupts */
        pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
+
+       /*
+        * RXIS is asserted only when the RX FIFO transitions from below
+        * to above the trigger threshold.  If the RX FIFO is already
+        * full to the threshold this can't happen and RXIS will now be
+        * stuck off.  Drain the RX FIFO explicitly to fix this:
+        */
+       for (i = 0; i < uap->fifosize * 2; ++i) {
+               if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
+                       break;
+
+               pl011_read(uap, REG_DR);
+       }
+
        uap->im = UART011_RTIM;
        if (!pl011_dma_rx_running(uap))
                uap->im |= UART011_RXIM;
diff --git a/drivers/tty/serial/atmel_serial.c 
b/drivers/tty/serial/atmel_serial.c
index e287fe8f10fc..55b3eff148b1 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1757,7 +1757,6 @@ static int atmel_startup(struct uart_port *port)
 {
        struct platform_device *pdev = to_platform_device(port->dev);
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-       struct tty_struct *tty = port->state->port.tty;
        int retval;
 
        /*
@@ -1772,8 +1771,8 @@ static int atmel_startup(struct uart_port *port)
         * Allocate the IRQ
         */
        retval = request_irq(port->irq, atmel_interrupt,
-                       IRQF_SHARED | IRQF_COND_SUSPEND,
-                       tty ? tty->name : "atmel_serial", port);
+                            IRQF_SHARED | IRQF_COND_SUSPEND,
+                            dev_name(&pdev->dev), port);
        if (retval) {
                dev_err(port->dev, "atmel_startup - Can't get irq\n");
                return retval;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 3f2f8c118ce0..64e96926f1ad 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -862,15 +862,12 @@ static int s3c24xx_serial_request_dma(struct 
s3c24xx_uart_port *p)
        dma->rx_conf.direction          = DMA_DEV_TO_MEM;
        dma->rx_conf.src_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
        dma->rx_conf.src_addr           = p->port.mapbase + S3C2410_URXH;
-       dma->rx_conf.src_maxburst       = 16;
+       dma->rx_conf.src_maxburst       = 1;
 
        dma->tx_conf.direction          = DMA_MEM_TO_DEV;
        dma->tx_conf.dst_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
        dma->tx_conf.dst_addr           = p->port.mapbase + S3C2410_UTXH;
-       if (dma_get_cache_alignment() >= 16)
-               dma->tx_conf.dst_maxburst = 16;
-       else
-               dma->tx_conf.dst_maxburst = 1;
+       dma->tx_conf.dst_maxburst       = 1;
 
        dma->rx_chan = dma_request_chan(p->port.dev, "rx");
 
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ab757546c6db..b8382135a78e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2691,8 +2691,8 @@ static int sci_init_clocks(struct sci_port *sci_port, 
struct device *dev)
                        dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
                                PTR_ERR(clk));
                else
-                       dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
-                               clk, clk);
+                       dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
+                               clk, clk_get_rate(clk));
                sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
        }
        return 0;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0c11d40a12bc..7b137003c2be 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -940,7 +940,7 @@ int usb_set_isoch_delay(struct usb_device *dev)
        return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                        USB_REQ_SET_ISOCH_DELAY,
                        USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
-                       cpu_to_le16(dev->hub_delay), 0, NULL, 0,
+                       dev->hub_delay, 0, NULL, 0,
                        USB_CTRL_SET_TIMEOUT);
 }
 
diff --git a/drivers/usb/gadget/function/f_printer.c 
b/drivers/usb/gadget/function/f_printer.c
index d359efe06c76..9c7ed2539ff7 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -631,19 +631,19 @@ printer_write(struct file *fd, const char __user *buf, 
size_t len, loff_t *ptr)
                        return -EAGAIN;
                }
 
+               list_add(&req->list, &dev->tx_reqs_active);
+
                /* here, we unlock, and only unlock, to avoid deadlock. */
                spin_unlock(&dev->lock);
                value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
                spin_lock(&dev->lock);
                if (value) {
+                       list_del(&req->list);
                        list_add(&req->list, &dev->tx_reqs);
                        spin_unlock_irqrestore(&dev->lock, flags);
                        mutex_unlock(&dev->lock_printer_io);
                        return -EAGAIN;
                }
-
-               list_add(&req->list, &dev->tx_reqs_active);
-
        }
 
        spin_unlock_irqrestore(&dev->lock, flags);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c 
b/drivers/usb/gadget/udc/renesas_usb3.c
index 409cde4e6a51..5caf78bbbf7c 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -333,6 +333,7 @@ struct renesas_usb3 {
        struct extcon_dev *extcon;
        struct work_struct extcon_work;
        struct phy *phy;
+       struct dentry *dentry;
 
        struct renesas_usb3_ep *usb3_ep;
        int num_usb3_eps;
@@ -622,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
        usb3_usb2_pullup(usb3, 0);
        usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
        usb3_reset_epc(usb3);
+       usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
+                          USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
+                          USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
+                          USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
+                          USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
+       usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
+       usb3_init_epc_registers(usb3);
 
        if (usb3->driver)
                usb3->driver->disconnect(&usb3->gadget);
@@ -2393,8 +2401,12 @@ static void renesas_usb3_debugfs_init(struct 
renesas_usb3 *usb3,
 
        file = debugfs_create_file("b_device", 0644, root, usb3,
                                   &renesas_usb3_b_device_fops);
-       if (!file)
+       if (!file) {
                dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
+               debugfs_remove_recursive(root);
+       } else {
+               usb3->dentry = root;
+       }
 }
 
 /*------- platform_driver ------------------------------------------------*/
@@ -2402,14 +2414,13 @@ static int renesas_usb3_remove(struct platform_device 
*pdev)
 {
        struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
 
+       debugfs_remove_recursive(usb3->dentry);
        device_remove_file(&pdev->dev, &dev_attr_role);
 
        usb_del_gadget_udc(&usb3->gadget);
        renesas_usb3_dma_free_prd(usb3, &pdev->dev);
 
        __renesas_usb3_ep_free_request(usb3->ep0_req);
-       if (usb3->phy)
-               phy_put(usb3->phy);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
@@ -2628,6 +2639,17 @@ static int renesas_usb3_probe(struct platform_device 
*pdev)
        if (ret < 0)
                goto err_alloc_prd;
 
+       /*
+        * This is optional. So, if this driver cannot get a phy,
+        * this driver will not handle a phy anymore.
+        */
+       usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
+       if (IS_ERR(usb3->phy)) {
+               ret = PTR_ERR(usb3->phy);
+               goto err_add_udc;
+       }
+
+       pm_runtime_enable(&pdev->dev);
        ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
        if (ret < 0)
                goto err_add_udc;
@@ -2636,20 +2658,11 @@ static int renesas_usb3_probe(struct platform_device 
*pdev)
        if (ret < 0)
                goto err_dev_create;
 
-       /*
-        * This is an optional. So, if this driver cannot get a phy,
-        * this driver will not handle a phy anymore.
-        */
-       usb3->phy = devm_phy_get(&pdev->dev, "usb");
-       if (IS_ERR(usb3->phy))
-               usb3->phy = NULL;
-
        usb3->workaround_for_vbus = priv->workaround_for_vbus;
 
        renesas_usb3_debugfs_init(usb3, &pdev->dev);
 
        dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
-       pm_runtime_enable(usb3_to_dev(usb3));
 
        return 0;
 
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 6034c39b67d1..9e9de5452860 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
+       /* UAS also needs to support FL_ALWAYS_SYNC */
+       if (devinfo->flags & US_FL_ALWAYS_SYNC) {
+               sdev->skip_ms_page_3f = 1;
+               sdev->skip_ms_page_8 = 1;
+               sdev->wce_default_on = 1;
+       }
        scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
index 747d3a9596d9..22fcfccf453a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2321,6 +2321,15 @@ UNUSUAL_DEV(  0x4146, 0xba01, 0x0100, 0x0100,
                "Micro Mini 1GB",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
 
+/* "G-DRIVE" external HDD hangs on write without these.
+ * Patch submitted by Alexander Kappner <a...@godking.net>
+ */
+UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
+               "SimpleTech",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_ALWAYS_SYNC),
+
 /*
  * Nick Bowler <nbow...@elliptictech.com>
  * SCSI stack spams (otherwise harmless) error messages.
diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
index 38434d88954a..d0bdebd87ce3 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -107,3 +107,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
                "External HDD",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_OPCODES),
+
+/* "G-DRIVE" external HDD hangs on write without these.
+ * Patch submitted by Alexander Kappner <a...@godking.net>
+ */
+UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
+               "SimpleTech",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_ALWAYS_SYNC),
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index 2e990e0d917d..1e13d4e15831 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -202,6 +202,10 @@ static int wcove_init(struct tcpc_dev *tcpc)
        struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
        int ret;
 
+       ret = regmap_write(wcove->regmap, USBC_CONTROL1, 0);
+       if (ret)
+               return ret;
+
        /* Unmask everything */
        ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
        if (ret)
@@ -285,8 +289,30 @@ static int wcove_get_cc(struct tcpc_dev *tcpc, enum 
typec_cc_status *cc1,
 
 static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
 {
-       /* XXX: Relying on the HW FSM to configure things correctly for now */
-       return 0;
+       struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+       unsigned int ctrl;
+
+       switch (cc) {
+       case TYPEC_CC_RD:
+               ctrl = USBC_CONTROL1_MODE_SNK;
+               break;
+       case TYPEC_CC_RP_DEF:
+               ctrl = USBC_CONTROL1_CURSRC_UA_80 | USBC_CONTROL1_MODE_SRC;
+               break;
+       case TYPEC_CC_RP_1_5:
+               ctrl = USBC_CONTROL1_CURSRC_UA_180 | USBC_CONTROL1_MODE_SRC;
+               break;
+       case TYPEC_CC_RP_3_0:
+               ctrl = USBC_CONTROL1_CURSRC_UA_330 | USBC_CONTROL1_MODE_SRC;
+               break;
+       case TYPEC_CC_OPEN:
+               ctrl = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return regmap_write(wcove->regmap, USBC_CONTROL1, ctrl);
 }
 
 static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity 
pol)
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 48808388ec33..be37aec250c2 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -10,6 +10,9 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
 #include "usbip_common.h"
 #include "vhci.h"
 
@@ -205,16 +208,20 @@ static int vhci_port_disconnect(struct vhci_hcd 
*vhci_hcd, __u32 rhport)
        return 0;
 }
 
-static int valid_port(__u32 pdev_nr, __u32 rhport)
+static int valid_port(__u32 *pdev_nr, __u32 *rhport)
 {
-       if (pdev_nr >= vhci_num_controllers) {
-               pr_err("pdev %u\n", pdev_nr);
+       if (*pdev_nr >= vhci_num_controllers) {
+               pr_err("pdev %u\n", *pdev_nr);
                return 0;
        }
-       if (rhport >= VHCI_HC_PORTS) {
-               pr_err("rhport %u\n", rhport);
+       *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
+
+       if (*rhport >= VHCI_HC_PORTS) {
+               pr_err("rhport %u\n", *rhport);
                return 0;
        }
+       *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
+
        return 1;
 }
 
@@ -232,7 +239,7 @@ static ssize_t detach_store(struct device *dev, struct 
device_attribute *attr,
        pdev_nr = port_to_pdev_nr(port);
        rhport = port_to_rhport(port);
 
-       if (!valid_port(pdev_nr, rhport))
+       if (!valid_port(&pdev_nr, &rhport))
                return -EINVAL;
 
        hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
@@ -258,7 +265,8 @@ static ssize_t detach_store(struct device *dev, struct 
device_attribute *attr,
 }
 static DEVICE_ATTR_WO(detach);
 
-static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
+static int valid_args(__u32 *pdev_nr, __u32 *rhport,
+                     enum usb_device_speed speed)
 {
        if (!valid_port(pdev_nr, rhport)) {
                return 0;
@@ -322,7 +330,7 @@ static ssize_t attach_store(struct device *dev, struct 
device_attribute *attr,
                             sockfd, devid, speed);
 
        /* check received parameters */
-       if (!valid_args(pdev_nr, rhport, speed))
+       if (!valid_args(&pdev_nr, &rhport, speed))
                return -EINVAL;
 
        hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7e2e7188e7f4..e62e52e8f141 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
        return 0;
 }
 
+static inline int sadb_key_len(const struct sadb_key *key)
+{
+       int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
+
+       return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
+                           sizeof(uint64_t));
+}
+
+static int verify_key_len(const void *p)
+{
+       const struct sadb_key *key = p;
+
+       if (sadb_key_len(key) > key->sadb_key_len)
+               return -EINVAL;
+
+       return 0;
+}
+
 static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
 {
        return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const 
struct sadb_msg *hdr, void *
                                return -EINVAL;
                        if (ext_hdrs[ext_type-1] != NULL)
                                return -EINVAL;
-                       if (ext_type == SADB_EXT_ADDRESS_SRC ||
-                           ext_type == SADB_EXT_ADDRESS_DST ||
-                           ext_type == SADB_EXT_ADDRESS_PROXY ||
-                           ext_type == SADB_X_EXT_NAT_T_OA) {
+                       switch (ext_type) {
+                       case SADB_EXT_ADDRESS_SRC:
+                       case SADB_EXT_ADDRESS_DST:
+                       case SADB_EXT_ADDRESS_PROXY:
+                       case SADB_X_EXT_NAT_T_OA:
                                if (verify_address_len(p))
                                        return -EINVAL;
-                       }
-                       if (ext_type == SADB_X_EXT_SEC_CTX) {
+                               break;
+                       case SADB_X_EXT_SEC_CTX:
                                if (verify_sec_ctx_len(p))
                                        return -EINVAL;
+                               break;
+                       case SADB_EXT_KEY_AUTH:
+                       case SADB_EXT_KEY_ENCRYPT:
+                               if (verify_key_len(p))
+                                       return -EINVAL;
+                               break;
+                       default:
+                               break;
                        }
                        ext_hdrs[ext_type-1] = (void *) p;
                }
@@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct 
net *net,
        key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
        if (key != NULL &&
            sa->sadb_sa_auth != SADB_X_AALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
        key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
        if (key != NULL &&
            sa->sadb_sa_encrypt != SADB_EALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
 
        x = xfrm_state_alloc(net);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 6ab274b14484..26f93c26db29 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -842,22 +842,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
                                  struct nft_object *obj, bool reset)
 {
        const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
-       const struct nf_conntrack_helper *helper = priv->helper4;
+       const struct nf_conntrack_helper *helper;
        u16 family;
 
+       if (priv->helper4 && priv->helper6) {
+               family = NFPROTO_INET;
+               helper = priv->helper4;
+       } else if (priv->helper6) {
+               family = NFPROTO_IPV6;
+               helper = priv->helper6;
+       } else {
+               family = NFPROTO_IPV4;
+               helper = priv->helper4;
+       }
+
        if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
                return -1;
 
        if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
                return -1;
 
-       if (priv->helper4 && priv->helper6)
-               family = NFPROTO_INET;
-       else if (priv->helper6)
-               family = NFPROTO_IPV6;
-       else
-               family = NFPROTO_IPV4;
-
        if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
                return -1;
 

Reply via email to