diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b0ea17da8ff6..654649556306 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -273,6 +273,24 @@ Contact: Daniel Vetter, Noralf Tronnes
 
 Level: Advanced
 
+Garbage collect fbdev scrolling acceleration
+--------------------------------------------
+
+Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode =
+SCROLL_REDRAW. There's a ton of code this will allow us to remove:
+- lots of code in fbcon.c
+- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
+  directly instead of the function table (with a switch on p->rotate)
+- fb_copyarea is unused after this, and can be deleted from all drivers
+
+Note that not all acceleration code can be deleted, since clearing and cursor
+support is still accelerated, which might be good candidates for further
+deletion projects.
+
+Contact: Daniel Vetter
+
+Level: Intermediate
+
 idr_init_base()
 ---------------
 
diff --git a/Makefile b/Makefile
index 1e50d6af932a..bb431fd473d2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
 PATCHLEVEL = 10
-SUBLEVEL = 4
+SUBLEVEL = 5
 EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ef12e097f318..27ca549ff47e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
 
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
-                                args->nid, args->zone, page_to_pfn(map_start),
+                                args->nid, args->zone, page_to_pfn(map_start), 
page_to_pfn(map_end),
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        return 0;
 }
@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long 
zone,
             unsigned long start_pfn)
 {
        if (!vmem_map) {
-               memmap_init_zone(size, nid, zone, start_pfn,
+               memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        } else {
                struct page *start;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7d0f7682d01d..6b1eca53e36c 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -102,14 +102,6 @@ static inline notrace unsigned long get_irq_happened(void)
        return happened;
 }
 
-static inline notrace int decrementer_check_overflow(void)
-{
-       u64 now = get_tb();
-       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
- 
-       return now >= *next_tb;
-}
-
 #ifdef CONFIG_PPC_BOOK3E
 
 /* This is called whenever we are re-enabling interrupts
@@ -142,35 +134,6 @@ notrace unsigned int __check_irq_replay(void)
        trace_hardirqs_on();
        trace_hardirqs_off();
 
-       /*
-        * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
-        * not be set, which means interrupts have only just been hard
-        * disabled as part of the local_irq_restore or interrupt return
-        * code. In that case, skip the decrementr check becaus it's
-        * expensive to read the TB.
-        *
-        * HARD_DIS then gets cleared here, but it's reconciled later.
-        * Either local_irq_disable will replay the interrupt and that
-        * will reconcile state like other hard interrupts. Or interrupt
-        * retur will replay the interrupt and in that case it sets
-        * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
-        */
-       if (happened & PACA_IRQ_HARD_DIS) {
-               local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-
-               /*
-                * We may have missed a decrementer interrupt if hard disabled.
-                * Check the decrementer register in case we had a rollover
-                * while hard disabled.
-                */
-               if (!(happened & PACA_IRQ_DEC)) {
-                       if (decrementer_check_overflow()) {
-                               local_paca->irq_happened |= PACA_IRQ_DEC;
-                               happened |= PACA_IRQ_DEC;
-                       }
-               }
-       }
-
        if (happened & PACA_IRQ_DEC) {
                local_paca->irq_happened &= ~PACA_IRQ_DEC;
                return 0x900;
@@ -186,6 +149,9 @@ notrace unsigned int __check_irq_replay(void)
                return 0x280;
        }
 
+       if (happened & PACA_IRQ_HARD_DIS)
+               local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
        /* There should be nothing left ! */
        BUG_ON(local_paca->irq_happened != 0);
 
@@ -229,18 +195,6 @@ void replay_soft_interrupts(void)
        if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
                WARN_ON_ONCE(mfmsr() & MSR_EE);
 
-       if (happened & PACA_IRQ_HARD_DIS) {
-               /*
-                * We may have missed a decrementer interrupt if hard disabled.
-                * Check the decrementer register in case we had a rollover
-                * while hard disabled.
-                */
-               if (!(happened & PACA_IRQ_DEC)) {
-                       if (decrementer_check_overflow())
-                               happened |= PACA_IRQ_DEC;
-               }
-       }
-
        /*
         * Force the delivery of pending soft-disabled interrupts on PS3.
         * Any HV call will have this side effect.
@@ -345,6 +299,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
                if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
                        WARN_ON_ONCE(!(mfmsr() & MSR_EE));
                __hard_irq_disable();
+               local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
        } else {
                /*
                 * We should already be hard disabled here. We had bugs
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 74efe46f5532..7d372ff3504b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -552,14 +552,11 @@ void timer_interrupt(struct pt_regs *regs)
        struct pt_regs *old_regs;
        u64 now;
 
-       /* Some implementations of hotplug will get timer interrupts while
-        * offline, just ignore these and we also need to set
-        * decrementers_next_tb as MAX to make sure __check_irq_replay
-        * don't replay timer interrupt when return, otherwise we'll trap
-        * here infinitely :(
+       /*
+        * Some implementations of hotplug will get timer interrupts while
+        * offline, just ignore these.
         */
        if (unlikely(!cpu_online(smp_processor_id()))) {
-               *next_tb = ~(u64)0;
                set_dec(decrementer_max);
                return;
        }
diff --git a/arch/powerpc/platforms/powernv/opal.c 
b/arch/powerpc/platforms/powernv/opal.c
index d95954ad4c0a..c61c3b62c8c6 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -731,7 +731,7 @@ int opal_hmi_exception_early2(struct pt_regs *regs)
        return 1;
 }
 
-/* HMI exception handler called in virtual mode during check_irq_replay. */
+/* HMI exception handler called in virtual mode when irqs are next enabled. */
 int opal_handle_hmi_exception(struct pt_regs *regs)
 {
        /*
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index f6b253e2be40..36ec0bdd8b63 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
 
        /* IO map the message register block. */
        of_address_to_resource(np, 0, &rsrc);
-       msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
+       msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, 
resource_size(&rsrc));
        if (!msgr_block_addr) {
                dev_err(&dev->dev, "Failed to iomap MPIC message registers");
                return -EFAULT;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6343dca0dbeb..71203324ff42 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -406,6 +406,7 @@ ENTRY(system_call)
        mvc     __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
        mvc     __PT_INT_CODE(4,%r11),__LC_SVC_ILC
        stg     %r14,__PT_FLAGS(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        ENABLE_INTS
 .Lsysc_do_svc:
        # clear user controlled register to prevent speculative use
@@ -422,7 +423,6 @@ ENTRY(system_call)
        jnl     .Lsysc_nr_ok
        slag    %r8,%r1,3
 .Lsysc_nr_ok:
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        stg     %r2,__PT_ORIG_GPR2(%r11)
        stg     %r7,STACK_FRAME_OVERHEAD(%r15)
        lg      %r9,0(%r8,%r10)                 # get system call add.
@@ -712,8 +712,8 @@ ENTRY(pgm_check_handler)
        mvc     __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
        mvc     __THREAD_per_cause(2,%r14),__LC_PER_CODE
        mvc     __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-6:     RESTORE_SM_CLEAR_PER
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+6:     xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       RESTORE_SM_CLEAR_PER
        larl    %r1,pgm_check_table
        llgh    %r10,__PT_INT_CODE+2(%r11)
        nill    %r10,0x007f
@@ -734,8 +734,8 @@ ENTRY(pgm_check_handler)
 # PER event in supervisor state, must be kprobes
 #
 .Lpgm_kprobe:
-       RESTORE_SM_CLEAR_PER
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       RESTORE_SM_CLEAR_PER
        lgr     %r2,%r11                # pass pointer to pt_regs
        brasl   %r14,do_per_trap
        j       .Lpgm_return
@@ -777,10 +777,10 @@ ENTRY(io_int_handler)
        stmg    %r8,%r9,__PT_PSW(%r11)
        mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
        jo      .Lio_restore
        TRACE_IRQS_OFF
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 .Lio_loop:
        lgr     %r2,%r11                # pass pointer to pt_regs
        lghi    %r3,IO_INTERRUPT
@@ -980,10 +980,10 @@ ENTRY(ext_int_handler)
        mvc     __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
        mvc     __PT_INT_PARM_LONG(8,%r11),0(%r1)
        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
        jo      .Lio_restore
        TRACE_IRQS_OFF
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        lgr     %r2,%r11                # pass pointer to pt_regs
        lghi    %r3,EXT_INTERRUPT
        brasl   %r14,do_IRQ
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index ce115fce52f0..e4b9b2ce9abf 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -11,6 +11,7 @@
 #include <linux/fs.h>
 #include <linux/interrupt.h>
 #include <linux/miscdevice.h>
+#include <linux/hw_random.h>
 #include <linux/delay.h>
 #include <linux/uaccess.h>
 #include <init.h>
@@ -18,9 +19,8 @@
 #include <os.h>
 
 /*
- * core module and version information
+ * core module information
  */
-#define RNG_VERSION "1.0.0"
 #define RNG_MODULE_NAME "hw_random"
 
 /* Changed at init time, in the non-modular case, and at module load
@@ -28,88 +28,36 @@
  * protects against a module being loaded twice at the same time.
  */
 static int random_fd = -1;
-static DECLARE_WAIT_QUEUE_HEAD(host_read_wait);
+static struct hwrng hwrng = { 0, };
+static DECLARE_COMPLETION(have_data);
 
-static int rng_dev_open (struct inode *inode, struct file *filp)
+static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
 {
-       /* enforce read-only access to this chrdev */
-       if ((filp->f_mode & FMODE_READ) == 0)
-               return -EINVAL;
-       if ((filp->f_mode & FMODE_WRITE) != 0)
-               return -EINVAL;
+       int ret;
 
-       return 0;
-}
-
-static atomic_t host_sleep_count = ATOMIC_INIT(0);
-
-static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
-                            loff_t *offp)
-{
-       u32 data;
-       int n, ret = 0, have_data;
-
-       while (size) {
-               n = os_read_file(random_fd, &data, sizeof(data));
-               if (n > 0) {
-                       have_data = n;
-                       while (have_data && size) {
-                               if (put_user((u8) data, buf++)) {
-                                       ret = ret ? : -EFAULT;
-                                       break;
-                               }
-                               size--;
-                               ret++;
-                               have_data--;
-                               data >>= 8;
-                       }
-               }
-               else if (n == -EAGAIN) {
-                       DECLARE_WAITQUEUE(wait, current);
-
-                       if (filp->f_flags & O_NONBLOCK)
-                               return ret ? : -EAGAIN;
-
-                       atomic_inc(&host_sleep_count);
+       for (;;) {
+               ret = os_read_file(random_fd, buf, max);
+               if (block && ret == -EAGAIN) {
                        add_sigio_fd(random_fd);
 
-                       add_wait_queue(&host_read_wait, &wait);
-                       set_current_state(TASK_INTERRUPTIBLE);
+                       ret = wait_for_completion_killable(&have_data);
 
-                       schedule();
-                       remove_wait_queue(&host_read_wait, &wait);
+                       ignore_sigio_fd(random_fd);
+                       deactivate_fd(random_fd, RANDOM_IRQ);
 
-                       if (atomic_dec_and_test(&host_sleep_count)) {
-                               ignore_sigio_fd(random_fd);
-                               deactivate_fd(random_fd, RANDOM_IRQ);
-                       }
+                       if (ret < 0)
+                               break;
+               } else {
+                       break;
                }
-               else
-                       return n;
-
-               if (signal_pending (current))
-                       return ret ? : -ERESTARTSYS;
        }
-       return ret;
-}
 
-static const struct file_operations rng_chrdev_ops = {
-       .owner          = THIS_MODULE,
-       .open           = rng_dev_open,
-       .read           = rng_dev_read,
-       .llseek         = noop_llseek,
-};
-
-/* rng_init shouldn't be called more than once at boot time */
-static struct miscdevice rng_miscdev = {
-       HWRNG_MINOR,
-       RNG_MODULE_NAME,
-       &rng_chrdev_ops,
-};
+       return ret != -EAGAIN ? ret : 0;
+}
 
 static irqreturn_t random_interrupt(int irq, void *data)
 {
-       wake_up(&host_read_wait);
+       complete(&have_data);
 
        return IRQ_HANDLED;
 }
@@ -126,18 +74,19 @@ static int __init rng_init (void)
                goto out;
 
        random_fd = err;
-
        err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
                             0, "random", NULL);
        if (err)
                goto err_out_cleanup_hw;
 
        sigio_broken(random_fd, 1);
+       hwrng.name = RNG_MODULE_NAME;
+       hwrng.read = rng_dev_read;
+       hwrng.quality = 1024;
 
-       err = misc_register (&rng_miscdev);
+       err = hwrng_register(&hwrng);
        if (err) {
-               printk (KERN_ERR RNG_MODULE_NAME ": misc device register "
-                       "failed\n");
+               pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err);
                goto err_out_cleanup_hw;
        }
 out:
@@ -161,8 +110,8 @@ static void cleanup(void)
 
 static void __exit rng_cleanup(void)
 {
+       hwrng_unregister(&hwrng);
        os_close_file(random_fd);
-       misc_deregister (&rng_miscdev);
 }
 
 module_init (rng_init);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index eae8c83364f7..b12c1b0d3e1d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -47,18 +47,25 @@
 /* Max request size is determined by sector mask - 32K */
 #define UBD_MAX_REQUEST (8 * sizeof(long))
 
+struct io_desc {
+       char *buffer;
+       unsigned long length;
+       unsigned long sector_mask;
+       unsigned long long cow_offset;
+       unsigned long bitmap_words[2];
+};
+
 struct io_thread_req {
        struct request *req;
        int fds[2];
        unsigned long offsets[2];
        unsigned long long offset;
-       unsigned long length;
-       char *buffer;
        int sectorsize;
-       unsigned long sector_mask;
-       unsigned long long cow_offset;
-       unsigned long bitmap_words[2];
        int error;
+
+       int desc_cnt;
+       /* io_desc has to be the last element of the struct */
+       struct io_desc io_desc[];
 };
 
 
@@ -525,12 +532,7 @@ static void ubd_handler(void)
                                
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
                                blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 
io_req->req->q);
                        }
-                       if ((io_req->error) || (io_req->buffer == NULL))
-                               blk_mq_end_request(io_req->req, io_req->error);
-                       else {
-                               if (!blk_update_request(io_req->req, 
io_req->error, io_req->length))
-                                       __blk_mq_end_request(io_req->req, 
io_req->error);
-                       }
+                       blk_mq_end_request(io_req->req, io_req->error);
                        kfree(io_req);
                }
        }
@@ -946,6 +948,7 @@ static int ubd_add(int n, char **error_out)
        blk_queue_write_cache(ubd_dev->queue, true, false);
 
        blk_queue_max_segments(ubd_dev->queue, MAX_SG);
+       blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
        err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
        if(err){
                *error_out = "Failed to register device";
@@ -1289,37 +1292,74 @@ static void cowify_bitmap(__u64 io_offset, int length, 
unsigned long *cow_mask,
        *cow_offset += bitmap_offset;
 }
 
-static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
+static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
+                      unsigned long offset, unsigned long *bitmap,
                       __u64 bitmap_offset, __u64 bitmap_len)
 {
-       __u64 sector = req->offset >> SECTOR_SHIFT;
+       __u64 sector = offset >> SECTOR_SHIFT;
        int i;
 
-       if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
+       if (segment->length > (sizeof(segment->sector_mask) * 8) << 
SECTOR_SHIFT)
                panic("Operation too long");
 
        if (req_op(req->req) == REQ_OP_READ) {
-               for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
+               for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
                        if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
                                ubd_set_bit(i, (unsigned char *)
-                                           &req->sector_mask);
+                                           &segment->sector_mask);
+               }
+       } else {
+               cowify_bitmap(offset, segment->length, &segment->sector_mask,
+                             &segment->cow_offset, bitmap, bitmap_offset,
+                             segment->bitmap_words, bitmap_len);
+       }
+}
+
+static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
+                       struct request *req)
+{
+       struct bio_vec bvec;
+       struct req_iterator iter;
+       int i = 0;
+       unsigned long byte_offset = io_req->offset;
+       int op = req_op(req);
+
+       if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
+               io_req->io_desc[0].buffer = NULL;
+               io_req->io_desc[0].length = blk_rq_bytes(req);
+       } else {
+               rq_for_each_segment(bvec, req, iter) {
+                       BUG_ON(i >= io_req->desc_cnt);
+
+                       io_req->io_desc[i].buffer =
+                               page_address(bvec.bv_page) + bvec.bv_offset;
+                       io_req->io_desc[i].length = bvec.bv_len;
+                       i++;
+               }
+       }
+
+       if (dev->cow.file) {
+               for (i = 0; i < io_req->desc_cnt; i++) {
+                       cowify_req(io_req, &io_req->io_desc[i], byte_offset,
+                                  dev->cow.bitmap, dev->cow.bitmap_offset,
+                                  dev->cow.bitmap_len);
+                       byte_offset += io_req->io_desc[i].length;
                }
+
        }
-       else cowify_bitmap(req->offset, req->length, &req->sector_mask,
-                          &req->cow_offset, bitmap, bitmap_offset,
-                          req->bitmap_words, bitmap_len);
 }
 
-static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
-               u64 off, struct bio_vec *bvec)
+static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request 
*req,
+                                          int desc_cnt)
 {
-       struct ubd *dev = hctx->queue->queuedata;
        struct io_thread_req *io_req;
-       int ret;
+       int i;
 
-       io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
+       io_req = kmalloc(sizeof(*io_req) +
+                        (desc_cnt * sizeof(struct io_desc)),
+                        GFP_ATOMIC);
        if (!io_req)
-               return -ENOMEM;
+               return NULL;
 
        io_req->req = req;
        if (dev->cow.file)
@@ -1327,26 +1367,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx 
*hctx, struct request *req,
        else
                io_req->fds[0] = dev->fd;
        io_req->error = 0;
-
-       if (bvec != NULL) {
-               io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
-               io_req->length = bvec->bv_len;
-       } else {
-               io_req->buffer = NULL;
-               io_req->length = blk_rq_bytes(req);
-       }
-
        io_req->sectorsize = SECTOR_SIZE;
        io_req->fds[1] = dev->fd;
-       io_req->cow_offset = -1;
-       io_req->offset = off;
-       io_req->sector_mask = 0;
+       io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
        io_req->offsets[0] = 0;
        io_req->offsets[1] = dev->cow.data_offset;
 
-       if (dev->cow.file)
-               cowify_req(io_req, dev->cow.bitmap,
-                          dev->cow.bitmap_offset, dev->cow.bitmap_len);
+       for (i = 0 ; i < desc_cnt; i++) {
+               io_req->io_desc[i].sector_mask = 0;
+               io_req->io_desc[i].cow_offset = -1;
+       }
+
+       return io_req;
+}
+
+static int ubd_submit_request(struct ubd *dev, struct request *req)
+{
+       int segs = 0;
+       struct io_thread_req *io_req;
+       int ret;
+       int op = req_op(req);
+
+       if (op == REQ_OP_FLUSH)
+               segs = 0;
+       else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
+               segs = 1;
+       else
+               segs = blk_rq_nr_phys_segments(req);
+
+       io_req = ubd_alloc_req(dev, req, segs);
+       if (!io_req)
+               return -ENOMEM;
+
+       io_req->desc_cnt = segs;
+       if (segs)
+               ubd_map_req(dev, io_req, req);
 
        ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
        if (ret != sizeof(io_req)) {
@@ -1357,22 +1412,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, 
struct request *req,
        return ret;
 }
 
-static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
-{
-       struct req_iterator iter;
-       struct bio_vec bvec;
-       int ret;
-       u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
-
-       rq_for_each_segment(bvec, req, iter) {
-               ret = ubd_queue_one_vec(hctx, req, off, &bvec);
-               if (ret < 0)
-                       return ret;
-               off += bvec.bv_len;
-       }
-       return 0;
-}
-
 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
                                 const struct blk_mq_queue_data *bd)
 {
@@ -1385,17 +1424,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx 
*hctx,
        spin_lock_irq(&ubd_dev->lock);
 
        switch (req_op(req)) {
-       /* operations with no lentgth/offset arguments */
        case REQ_OP_FLUSH:
-               ret = ubd_queue_one_vec(hctx, req, 0, NULL);
-               break;
        case REQ_OP_READ:
        case REQ_OP_WRITE:
-               ret = queue_rw_req(hctx, req);
-               break;
        case REQ_OP_DISCARD:
        case REQ_OP_WRITE_ZEROES:
-               ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, 
NULL);
+               ret = ubd_submit_request(ubd_dev, req);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -1483,22 +1517,22 @@ static int map_error(int error_code)
  * will result in unpredictable behaviour and/or crashes.
  */
 
-static int update_bitmap(struct io_thread_req *req)
+static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
 {
        int n;
 
-       if(req->cow_offset == -1)
+       if (segment->cow_offset == -1)
                return map_error(0);
 
-       n = os_pwrite_file(req->fds[1], &req->bitmap_words,
-                         sizeof(req->bitmap_words), req->cow_offset);
-       if (n != sizeof(req->bitmap_words))
+       n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
+                         sizeof(segment->bitmap_words), segment->cow_offset);
+       if (n != sizeof(segment->bitmap_words))
                return map_error(-n);
 
        return map_error(0);
 }
 
-static void do_io(struct io_thread_req *req)
+static void do_io(struct io_thread_req *req, struct io_desc *desc)
 {
        char *buf = NULL;
        unsigned long len;
@@ -1513,21 +1547,20 @@ static void do_io(struct io_thread_req *req)
                return;
        }
 
-       nsectors = req->length / req->sectorsize;
+       nsectors = desc->length / req->sectorsize;
        start = 0;
        do {
-               bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
+               bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
                end = start;
                while((end < nsectors) &&
-                     (ubd_test_bit(end, (unsigned char *)
-                                   &req->sector_mask) == bit))
+                     (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) 
== bit))
                        end++;
 
                off = req->offset + req->offsets[bit] +
                        start * req->sectorsize;
                len = (end - start) * req->sectorsize;
-               if (req->buffer != NULL)
-                       buf = &req->buffer[start * req->sectorsize];
+               if (desc->buffer != NULL)
+                       buf = &desc->buffer[start * req->sectorsize];
 
                switch (req_op(req->req)) {
                case REQ_OP_READ:
@@ -1567,7 +1600,8 @@ static void do_io(struct io_thread_req *req)
                start = end;
        } while(start < nsectors);
 
-       req->error = update_bitmap(req);
+       req->offset += len;
+       req->error = update_bitmap(req, desc);
 }
 
 /* Changed in start_io_thread, which is serialized by being called only
@@ -1600,8 +1634,13 @@ int io_thread(void *arg)
                }
 
                for (count = 0; count < n/sizeof(struct io_thread_req *); 
count++) {
+                       struct io_thread_req *req = (*io_req_buffer)[count];
+                       int i;
+
                        io_count++;
-                       do_io((*io_req_buffer)[count]);
+                       for (i = 0; !req->error && i < req->desc_cnt; i++)
+                               do_io(req, &(req->io_desc[i]));
+
                }
 
                written = 0;
diff --git a/block/blk-pm.c b/block/blk-pm.c
index b85234d758f7..17bd020268d4 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
 
        WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
 
+       spin_lock_irq(&q->queue_lock);
+       q->rpm_status = RPM_SUSPENDING;
+       spin_unlock_irq(&q->queue_lock);
+
        /*
         * Increase the pm_only counter before checking whether any
         * non-PM blk_queue_enter() calls are in progress to avoid that any
@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
        /* Switch q_usage_counter back to per-cpu mode. */
        blk_mq_unfreeze_queue(q);
 
-       spin_lock_irq(&q->queue_lock);
-       if (ret < 0)
+       if (ret < 0) {
+               spin_lock_irq(&q->queue_lock);
+               q->rpm_status = RPM_ACTIVE;
                pm_runtime_mark_last_busy(q->dev);
-       else
-               q->rpm_status = RPM_SUSPENDING;
-       spin_unlock_irq(&q->queue_lock);
+               spin_unlock_irq(&q->queue_lock);
 
-       if (ret)
                blk_clear_pm_only(q);
+       }
 
        return ret;
 }
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 78d635f1d156..376164cdf2ea 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -251,8 +251,12 @@ static int h5_close(struct hci_uart *hu)
        if (h5->vnd && h5->vnd->close)
                h5->vnd->close(h5);
 
-       if (!hu->serdev)
-               kfree(h5);
+       if (hu->serdev)
+               serdev_device_close(hu->serdev);
+
+       kfree_skb(h5->rx_skb);
+       kfree(h5);
+       h5 = NULL;
 
        return 0;
 }
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index e92c4d9469d8..5952210526aa 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -540,15 +540,15 @@ endif # HW_RANDOM
 
 config UML_RANDOM
        depends on UML
-       tristate "Hardware random number generator"
+       select HW_RANDOM
+       tristate "UML Random Number Generator support"
        help
          This option enables UML's "hardware" random number generator.  It
          attaches itself to the host's /dev/random, supplying as much entropy
          as the host has, rather than the small amount the UML gets from its
-         own drivers.  It registers itself as a standard hardware random number
-         generator, major 10, minor 183, and the canonical device name is
-         /dev/hwrng.
-         The way to make use of this is to install the rng-tools package
-         (check your distro, or download from
-         http://sourceforge.net/projects/gkernel/).  rngd periodically reads
-         /dev/hwrng and injects the entropy into /dev/random.
+         own drivers. It registers itself as a rng-core driver thus providing
+         a device which is usually called /dev/hwrng. This hardware random
+         number generator does feed into the kernel's random number generator
+         entropy pool.
+
+         If unsure, say Y.
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 27513d311242..de7b74505e75 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax)
 }
 EXPORT_SYMBOL_GPL(kill_dev_dax);
 
-static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+static void trim_dev_dax_range(struct dev_dax *dev_dax)
 {
+       int i = dev_dax->nr_range - 1;
+       struct range *range = &dev_dax->ranges[i].range;
        struct dax_region *dax_region = dev_dax->region;
-       int i;
 
        device_lock_assert(dax_region->dev);
-       for (i = 0; i < dev_dax->nr_range; i++) {
-               struct range *range = &dev_dax->ranges[i].range;
-
-               __release_region(&dax_region->res, range->start,
-                               range_len(range));
+       dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
+               (unsigned long long)range->start,
+               (unsigned long long)range->end);
+
+       __release_region(&dax_region->res, range->start, range_len(range));
+       if (--dev_dax->nr_range == 0) {
+               kfree(dev_dax->ranges);
+               dev_dax->ranges = NULL;
        }
-       dev_dax->nr_range = 0;
+}
+
+static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+{
+       while (dev_dax->nr_range)
+               trim_dev_dax_range(dev_dax);
 }
 
 static void unregister_dev_dax(void *dev)
@@ -804,15 +813,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, 
u64 start,
                return 0;
 
        rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
-       if (rc) {
-               dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 
1,
-                               &alloc->start, &alloc->end);
-               dev_dax->nr_range--;
-               __release_region(res, alloc->start, resource_size(alloc));
-               return rc;
-       }
+       if (rc)
+               trim_dev_dax_range(dev_dax);
 
-       return 0;
+       return rc;
 }
 
 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, 
resource_size_t size)
@@ -885,12 +889,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, 
resource_size_t size)
                if (shrink >= range_len(range)) {
                        devm_release_action(dax_region->dev,
                                        unregister_dax_mapping, &mapping->dev);
-                       __release_region(&dax_region->res, range->start,
-                                       range_len(range));
-                       dev_dax->nr_range--;
-                       dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
-                                       (unsigned long long) range->start,
-                                       (unsigned long long) range->end);
+                       trim_dev_dax_range(dev_dax);
                        to_shrink -= shrink;
                        if (!to_shrink)
                                break;
@@ -1274,7 +1273,6 @@ static void dev_dax_release(struct device *dev)
        put_dax(dax_dev);
        free_dev_dax_id(dev_dax);
        dax_region_put(dax_region);
-       kfree(dev_dax->ranges);
        kfree(dev_dax->pgmap);
        kfree(dev_dax);
 }
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6b431db146cd..1c6e401dd4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -704,24 +704,24 @@ static struct wm_table ddr4_wm_table_rn = {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 11.12,
+                       .sr_enter_plus_exit_time_us = 12.48,
                        .valid = true,
                },
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 11.12,
+                       .sr_enter_plus_exit_time_us = 12.48,
                        .valid = true,
                },
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 11.12,
+                       .sr_enter_plus_exit_time_us = 12.48,
                        .valid = true,
                },
        }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index b409f6b2bfd8..210466b2d863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs 
= {
        .disable_hpd = dce110_link_encoder_disable_hpd,
        .is_dig_enabled = dce110_is_dig_enabled,
        .destroy = dce110_link_encoder_destroy,
-       .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+       .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+       .get_dig_frontend = dce110_get_dig_frontend,
 };
 
 static enum bp_result link_transmitter_control(
@@ -235,6 +236,44 @@ static void set_link_training_complete(
 
 }
 
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
+{
+       struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+       u32 value;
+       enum engine_id result;
+
+       REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
+
+       switch (value) {
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
+               result = ENGINE_ID_DIGA;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
+               result = ENGINE_ID_DIGB;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
+               result = ENGINE_ID_DIGC;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
+               result = ENGINE_ID_DIGD;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
+               result = ENGINE_ID_DIGE;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
+               result = ENGINE_ID_DIGF;
+               break;
+       case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
+               result = ENGINE_ID_DIGG;
+               break;
+       default:
+               // invalid source select DIG
+               result = ENGINE_ID_UNKNOWN;
+       }
+
+       return result;
+}
+
 void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
        struct link_encoder *enc,
        uint32_t index)
@@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs 
dce60_lnk_enc_funcs = {
        .disable_hpd = dce110_link_encoder_disable_hpd,
        .is_dig_enabled = dce110_is_dig_enabled,
        .destroy = dce110_link_encoder_destroy,
-       .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+       .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+       .get_dig_frontend = dce110_get_dig_frontend
 };
 
 void dce60_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index cb714a48b171..fc6ade824c23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
        enum engine_id engine,
        bool connect);
 
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
+
 void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
        struct link_encoder *enc,
        uint32_t index);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 1c6b78ad5ade..b61bf53ec07a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller 
*master,
 
        ret = i3c_master_bus_init(master);
        if (ret)
-               goto err_put_dev;
+               goto err_destroy_wq;
 
        ret = device_add(&master->dev);
        if (ret)
@@ -2568,6 +2568,9 @@ int i3c_master_register(struct i3c_master_controller 
*master,
 err_cleanup_bus:
        i3c_master_bus_cleanup(master);
 
+err_destroy_wq:
+       destroy_workqueue(master->wq);
+
 err_put_dev:
        put_device(&master->dev);
 
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index f74982dcbea0..6b8e5bdd8526 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -537,6 +537,15 @@ static int verity_verify_io(struct dm_verity_io *io)
        return 0;
 }
 
+/*
+ * Skip verity work in response to I/O error when system is shutting down.
+ */
+static inline bool verity_is_system_shutting_down(void)
+{
+       return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+               || system_state == SYSTEM_RESTART;
+}
+
 /*
  * End one "io" structure with a given error.
  */
@@ -564,7 +573,8 @@ static void verity_end_io(struct bio *bio)
 {
        struct dm_verity_io *io = bio->bi_private;
 
-       if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+       if (bio->bi_status &&
+           (!verity_fec_is_enabled(io->v) || 
verity_is_system_shutting_down())) {
                verity_finish_io(io, bio->bi_status);
                return;
        }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b598a3cb462..9f9d8b67b5dd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1128,7 +1128,7 @@ static void raid10_read_request(struct mddev *mddev, 
struct bio *bio,
        struct md_rdev *err_rdev = NULL;
        gfp_t gfp = GFP_NOIO;
 
-       if (r10_bio->devs[slot].rdev) {
+       if (slot >= 0 && r10_bio->devs[slot].rdev) {
                /*
                 * This is an error retry, but we cannot
                 * safely dereference the rdev in the r10_bio,
@@ -1493,6 +1493,7 @@ static void __make_request(struct mddev *mddev, struct 
bio *bio, int sectors)
        r10_bio->mddev = mddev;
        r10_bio->sector = bio->bi_iter.bi_sector;
        r10_bio->state = 0;
+       r10_bio->read_slot = -1;
        memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
 
        if (bio_data_dir(bio) == READ)
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c 
b/drivers/media/usb/dvb-usb/gp8psk.c
index c07f46f5176e..b4f661bb5648 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -182,7 +182,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
 
 static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
 {
-       u8 status, buf;
+       u8 status = 0, buf;
        int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
 
        if (onoff) {
diff --git a/drivers/misc/vmw_vmci/vmci_context.c 
b/drivers/misc/vmw_vmci/vmci_context.c
index 16695366ec92..26ff49fdf0f7 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx 
*context,
                        return VMCI_ERROR_MORE_DATA;
                }
 
-               dbells = kmalloc(data_size, GFP_ATOMIC);
+               dbells = kzalloc(data_size, GFP_ATOMIC);
                if (!dbells)
                        return VMCI_ERROR_NO_MEM;
 
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0e0a5269dc82..903b465c8568 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1102,7 +1102,7 @@ static struct opp_table *_allocate_opp_table(struct 
device *dev, int index)
        if (IS_ERR(opp_table->clk)) {
                ret = PTR_ERR(opp_table->clk);
                if (ret == -EPROBE_DEFER)
-                       goto err;
+                       goto remove_opp_dev;
 
                dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
        }
@@ -1111,7 +1111,7 @@ static struct opp_table *_allocate_opp_table(struct 
device *dev, int index)
        ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
        if (ret) {
                if (ret == -EPROBE_DEFER)
-                       goto err;
+                       goto put_clk;
 
                dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
                         __func__, ret);
@@ -1125,6 +1125,11 @@ static struct opp_table *_allocate_opp_table(struct 
device *dev, int index)
        list_add(&opp_table->node, &opp_tables);
        return opp_table;
 
+put_clk:
+       if (!IS_ERR(opp_table->clk))
+               clk_put(opp_table->clk);
+remove_opp_dev:
+       _remove_opp_dev(opp_dev, opp_table);
 err:
        kfree(opp_table);
        return ERR_PTR(ret);
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index c6b89273feba..d4b2ab786126 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -361,8 +361,10 @@ static int pl031_probe(struct amba_device *adev, const 
struct amba_id *id)
 
        device_init_wakeup(&adev->dev, true);
        ldata->rtc = devm_rtc_allocate_device(&adev->dev);
-       if (IS_ERR(ldata->rtc))
-               return PTR_ERR(ldata->rtc);
+       if (IS_ERR(ldata->rtc)) {
+               ret = PTR_ERR(ldata->rtc);
+               goto out;
+       }
 
        ldata->rtc->ops = ops;
        ldata->rtc->range_min = vendor->range_min;
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index e2b8b150bcb4..f2818cdd11d8 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node 
*node,
                                                                300000000);
        if (IS_ERR(rtc->int_osc)) {
                pr_crit("Couldn't register the internal oscillator\n");
-               return;
+               goto err;
        }
 
        parents[0] = clk_hw_get_name(rtc->int_osc);
@@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node 
*node,
        rtc->losc = clk_register(NULL, &rtc->hw);
        if (IS_ERR(rtc->losc)) {
                pr_crit("Couldn't register the LOSC clock\n");
-               return;
+               goto err_register;
        }
 
        of_property_read_string_index(node, "clock-output-names", 1,
@@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node 
*node,
                                          &rtc->lock);
        if (IS_ERR(rtc->ext_losc)) {
                pr_crit("Couldn't register the LOSC external gate\n");
-               return;
+               goto err_register;
        }
 
        clk_data->num = 2;
@@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node 
*node,
        of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
        return;
 
+err_register:
+       clk_hw_unregister_fixed_rate(rtc->int_osc);
 err:
        kfree(clk_data);
 }
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig 
b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index b206e266b4e7..8b0deece9758 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
        depends on PCI && INET && (IPV6 || IPV6=n)
        depends on THERMAL || !THERMAL
        depends on ETHERNET
+       depends on TLS || TLS=n
        select NET_VENDOR_CHELSIO
        select CHELSIO_T4
        select CHELSIO_LIB
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3fd16b7f6150..aadaea052f51 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -256,6 +256,7 @@ config SPI_DW_BT1
        tristate "Baikal-T1 SPI driver for DW SPI core"
        depends on MIPS_BAIKAL_T1 || COMPILE_TEST
        select MULTIPLEXER
+       select MUX_MMIO
        help
          Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
          controllers. Two of them are pretty much normal: with IRQ, DMA,
@@ -269,8 +270,6 @@ config SPI_DW_BT1
 config SPI_DW_BT1_DIRMAP
        bool "Directly mapped Baikal-T1 Boot SPI flash support"
        depends on SPI_DW_BT1
-       select MULTIPLEXER
-       select MUX_MMIO
        help
          Directly mapped SPI flash memory is an interface specific to the
          Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cef437817b0d..8d1ae973041a 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init)
        struct vc_data *svc = *default_mode;
        struct fbcon_display *t, *p = &fb_display[vc->vc_num];
        int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
-       int cap, ret;
+       int ret;
 
        if (WARN_ON(info_idx == -1))
            return;
@@ -1042,7 +1042,6 @@ static void fbcon_init(struct vc_data *vc, int init)
                con2fb_map[vc->vc_num] = info_idx;
 
        info = registered_fb[con2fb_map[vc->vc_num]];
-       cap = info->flags;
 
        if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
                logo_shown = FBCON_LOGO_DONTSHOW;
@@ -1147,11 +1146,13 @@ static void fbcon_init(struct vc_data *vc, int init)
 
        ops->graphics = 0;
 
-       if ((cap & FBINFO_HWACCEL_COPYAREA) &&
-           !(cap & FBINFO_HWACCEL_DISABLED))
-               p->scrollmode = SCROLL_MOVE;
-       else /* default to something safe */
-               p->scrollmode = SCROLL_REDRAW;
+       /*
+        * No more hw acceleration for fbcon.
+        *
+        * FIXME: Garbage collect all the now dead code after sufficient time
+        * has passed.
+        */
+       p->scrollmode = SCROLL_REDRAW;
 
        /*
         *  ++guenther: console.c:vc_allocate() relies on initializing
@@ -1961,45 +1962,15 @@ static void updatescrollmode(struct fbcon_display *p,
 {
        struct fbcon_ops *ops = info->fbcon_par;
        int fh = vc->vc_font.height;
-       int cap = info->flags;
-       u16 t = 0;
-       int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
-                                 info->fix.xpanstep);
-       int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
        int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
        int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
                                   info->var.xres_virtual);
-       int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
-               divides(ypan, vc->vc_font.height) && vyres > yres;
-       int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
-               divides(ywrap, vc->vc_font.height) &&
-               divides(vc->vc_font.height, vyres) &&
-               divides(vc->vc_font.height, yres);
-       int reading_fast = cap & FBINFO_READS_FAST;
-       int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
-               !(cap & FBINFO_HWACCEL_DISABLED);
-       int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
-               !(cap & FBINFO_HWACCEL_DISABLED);
 
        p->vrows = vyres/fh;
        if (yres > (fh * (vc->vc_rows + 1)))
                p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
        if ((yres % fh) && (vyres % fh < yres % fh))
                p->vrows--;
-
-       if (good_wrap || good_pan) {
-               if (reading_fast || fast_copyarea)
-                       p->scrollmode = good_wrap ?
-                               SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
-               else
-                       p->scrollmode = good_wrap ? SCROLL_REDRAW :
-                               SCROLL_PAN_REDRAW;
-       } else {
-               if (reading_fast || (fast_copyarea && !fast_imageblit))
-                       p->scrollmode = SCROLL_MOVE;
-               else
-                       p->scrollmode = SCROLL_REDRAW;
-       }
 }
 
 #define PITCH(w) (((w) + 7) >> 3)
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 836319cbaca9..359302f71f7e 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
        ret = pm_runtime_get_sync(dev);
-       if (ret)
+       if (ret) {
+               pm_runtime_put_noidle(dev);
                return dev_err_probe(dev, ret, "runtime pm failed\n");
+       }
 
        platform_set_drvdata(pdev, wdt);
 
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 3ac7611ef7ce..fd691e4815c5 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -350,7 +350,7 @@ static int bfs_fill_super(struct super_block *s, void 
*data, int silent)
 
        info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / 
sizeof(struct bfs_inode) + BFS_ROOT_INO - 1;
        if (info->si_lasti == BFS_MAX_LASTI)
-               printf("WARNING: filesystem %s was created with 512 inodes, the 
real maximum is 511, mounting anyway\n", s->s_id);
+               printf("NOTE: filesystem %s was created with 512 inodes, the 
real maximum is 511, mounting anyway\n", s->s_id);
        else if (info->si_lasti > BFS_MAX_LASTI) {
                printf("Impossible last inode number %lu > %d on %s\n", 
info->si_lasti, BFS_MAX_LASTI, s->s_id);
                goto out1;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 526faf4778ce..2462a9a84b95 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1335,6 +1335,8 @@ int ceph_fill_trace(struct super_block *sb, struct 
ceph_mds_request *req)
                                in, ceph_vinop(in));
                        if (in->i_state & I_NEW)
                                discard_new_inode(in);
+                       else
+                               iput(in);
                        goto done;
                }
                req->r_target_inode = in;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 37a619bf1ac7..e67d5de6f28c 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2395,9 +2395,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context 
*ac)
 
                                nr = sbi->s_mb_prefetch;
                                if (ext4_has_feature_flex_bg(sb)) {
-                                       nr = (group / sbi->s_mb_prefetch) *
-                                               sbi->s_mb_prefetch;
-                                       nr = nr + sbi->s_mb_prefetch - group;
+                                       nr = 1 << sbi->s_log_groups_per_flex;
+                                       nr -= group & (nr - 1);
+                                       nr = min(nr, sbi->s_mb_prefetch);
                                }
                                prefetch_grp = ext4_mb_prefetch(sb, group,
                                                        nr, &prefetch_ios);
@@ -2733,7 +2733,8 @@ static int ext4_mb_init_backend(struct super_block *sb)
 
        if (ext4_has_feature_flex_bg(sb)) {
                /* a single flex group is supposed to be read by a single IO */
-               sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
+               sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
+                       BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
                sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
        } else {
                sbi->s_mb_prefetch = 32;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2b08b162075c..ea5aefa23a20 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4186,18 +4186,25 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
         */
        sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
 
-       blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
-
-       if (blocksize == PAGE_SIZE)
-               set_opt(sb, DIOREAD_NOLOCK);
-
-       if (blocksize < EXT4_MIN_BLOCK_SIZE ||
-           blocksize > EXT4_MAX_BLOCK_SIZE) {
+       if (le32_to_cpu(es->s_log_block_size) >
+           (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
                ext4_msg(sb, KERN_ERR,
-                      "Unsupported filesystem blocksize %d (%d 
log_block_size)",
-                        blocksize, le32_to_cpu(es->s_log_block_size));
+                        "Invalid log block size: %u",
+                        le32_to_cpu(es->s_log_block_size));
                goto failed_mount;
        }
+       if (le32_to_cpu(es->s_log_cluster_size) >
+           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Invalid log cluster size: %u",
+                        le32_to_cpu(es->s_log_cluster_size));
+               goto failed_mount;
+       }
+
+       blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+
+       if (blocksize == PAGE_SIZE)
+               set_opt(sb, DIOREAD_NOLOCK);
 
        if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
                sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
@@ -4416,21 +4423,6 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
        if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
                goto failed_mount;
 
-       if (le32_to_cpu(es->s_log_block_size) >
-           (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-               ext4_msg(sb, KERN_ERR,
-                        "Invalid log block size: %u",
-                        le32_to_cpu(es->s_log_block_size));
-               goto failed_mount;
-       }
-       if (le32_to_cpu(es->s_log_cluster_size) >
-           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-               ext4_msg(sb, KERN_ERR,
-                        "Invalid log cluster size: %u",
-                        le32_to_cpu(es->s_log_cluster_size));
-               goto failed_mount;
-       }
-
        if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
                ext4_msg(sb, KERN_ERR,
                         "Number of reserved GDT blocks insanely large: %d",
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 023462e80e58..b39bf416d511 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1600,7 +1600,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, 
struct cp_control *cpc)
                        goto out;
                }
 
-               if (NM_I(sbi)->dirty_nat_cnt == 0 &&
+               if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
                                SIT_I(sbi)->dirty_sentries == 0 &&
                                prefree_segments(sbi) == 0) {
                        f2fs_flush_sit_entries(sbi, cpc);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 14262e0f1cd6..c5fee4d7ea72 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -798,8 +798,6 @@ void f2fs_decompress_pages(struct bio *bio, struct page 
*page, bool verity)
        if (cops->destroy_decompress_ctx)
                cops->destroy_decompress_ctx(dic);
 out_free_dic:
-       if (verity)
-               atomic_set(&dic->pending_pages, dic->nr_cpages);
        if (!verity)
                f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
                                                                ret, false);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index be4da52604ed..b29243ee1c3e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -202,7 +202,7 @@ static void f2fs_verify_bio(struct bio *bio)
                dic = (struct decompress_io_ctx *)page_private(page);
 
                if (dic) {
-                       if (atomic_dec_return(&dic->pending_pages))
+                       if (atomic_dec_return(&dic->verity_pages))
                                continue;
                        f2fs_verify_pages(dic->rpages,
                                                dic->cluster_size);
@@ -1027,7 +1027,8 @@ static inline bool f2fs_need_verity(const struct inode 
*inode, pgoff_t idx)
 
 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
                                      unsigned nr_pages, unsigned op_flag,
-                                     pgoff_t first_idx, bool for_write)
+                                     pgoff_t first_idx, bool for_write,
+                                     bool for_verity)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct bio *bio;
@@ -1049,7 +1050,7 @@ static struct bio *f2fs_grab_read_bio(struct inode 
*inode, block_t blkaddr,
                post_read_steps |= 1 << STEP_DECRYPT;
        if (f2fs_compressed_file(inode))
                post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
-       if (f2fs_need_verity(inode, first_idx))
+       if (for_verity && f2fs_need_verity(inode, first_idx))
                post_read_steps |= 1 << STEP_VERITY;
 
        if (post_read_steps) {
@@ -1079,7 +1080,7 @@ static int f2fs_submit_page_read(struct inode *inode, 
struct page *page,
        struct bio *bio;
 
        bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
-                                       page->index, for_write);
+                                       page->index, for_write, true);
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
@@ -2133,7 +2134,7 @@ static int f2fs_read_single_page(struct inode *inode, 
struct page *page,
        if (bio == NULL) {
                bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
                                is_readahead ? REQ_RAHEAD : 0, page->index,
-                               false);
+                               false, true);
                if (IS_ERR(bio)) {
                        ret = PTR_ERR(bio);
                        bio = NULL;
@@ -2180,6 +2181,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct 
bio **bio_ret,
        const unsigned blkbits = inode->i_blkbits;
        const unsigned blocksize = 1 << blkbits;
        struct decompress_io_ctx *dic = NULL;
+       struct bio_post_read_ctx *ctx;
+       bool for_verity = false;
        int i;
        int ret = 0;
 
@@ -2245,10 +2248,29 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, 
struct bio **bio_ret,
                goto out_put_dnode;
        }
 
+       /*
+        * It's possible to enable fsverity on the fly when handling a cluster,
+        * which requires complicated error handling. Instead of adding more
+        * complexity, let's give a rule where end_io post-processes fsverity
+        * per cluster. In order to do that, we need to submit bio, if previous
+        * bio sets a different post-process policy.
+        */
+       if (fsverity_active(cc->inode)) {
+               atomic_set(&dic->verity_pages, cc->nr_cpages);
+               for_verity = true;
+
+               if (bio) {
+                       ctx = bio->bi_private;
+                       if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
+                               __submit_bio(sbi, bio, DATA);
+                               bio = NULL;
+                       }
+               }
+       }
+
        for (i = 0; i < dic->nr_cpages; i++) {
                struct page *page = dic->cpages[i];
                block_t blkaddr;
-               struct bio_post_read_ctx *ctx;
 
                blkaddr = data_blkaddr(dn.inode, dn.node_page,
                                                dn.ofs_in_node + i + 1);
@@ -2264,17 +2286,31 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, 
struct bio **bio_ret,
                if (!bio) {
                        bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
                                        is_readahead ? REQ_RAHEAD : 0,
-                                       page->index, for_write);
+                                       page->index, for_write, for_verity);
                        if (IS_ERR(bio)) {
+                               unsigned int remained = dic->nr_cpages - i;
+                               bool release = false;
+
                                ret = PTR_ERR(bio);
                                dic->failed = true;
-                               if (!atomic_sub_return(dic->nr_cpages - i,
-                                                       &dic->pending_pages)) {
+
+                               if (for_verity) {
+                                       if (!atomic_sub_return(remained,
+                                               &dic->verity_pages))
+                                               release = true;
+                               } else {
+                                       if (!atomic_sub_return(remained,
+                                               &dic->pending_pages))
+                                               release = true;
+                               }
+
+                               if (release) {
                                        f2fs_decompress_end_io(dic->rpages,
-                                                       cc->cluster_size, true,
-                                                       false);
+                                               cc->cluster_size, true,
+                                               false);
                                        f2fs_free_dic(dic);
                                }
+
                                f2fs_put_dnode(&dn);
                                *bio_ret = NULL;
                                return ret;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a8357fd4f5fa..197c914119da 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -145,8 +145,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
                si->node_pages = NODE_MAPPING(sbi)->nrpages;
        if (sbi->meta_inode)
                si->meta_pages = META_MAPPING(sbi)->nrpages;
-       si->nats = NM_I(sbi)->nat_cnt;
-       si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
+       si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
+       si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
        si->sits = MAIN_SEGS(sbi);
        si->dirty_sits = SIT_I(sbi)->dirty_sentries;
        si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
@@ -278,9 +278,10 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
                                NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
                                sizeof(struct free_nid);
-       si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
-       si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
-                                       sizeof(struct nat_entry_set);
+       si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] *
+                               sizeof(struct nat_entry);
+       si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
+                               sizeof(struct nat_entry_set);
        si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
        for (i = 0; i < MAX_INO_ENTRY; i++)
                si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a321c52face..06e5a6053f3f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -894,6 +894,13 @@ enum nid_state {
        MAX_NID_STATE,
 };
 
+enum nat_state {
+       TOTAL_NAT,
+       DIRTY_NAT,
+       RECLAIMABLE_NAT,
+       MAX_NAT_STATE,
+};
+
 struct f2fs_nm_info {
        block_t nat_blkaddr;            /* base disk address of NAT */
        nid_t max_nid;                  /* maximum possible node ids */
@@ -909,8 +916,7 @@ struct f2fs_nm_info {
        struct rw_semaphore nat_tree_lock;      /* protect nat_tree_lock */
        struct list_head nat_entries;   /* cached nat entry list (clean) */
        spinlock_t nat_list_lock;       /* protect clean nat entry list */
-       unsigned int nat_cnt;           /* the # of cached nat entries */
-       unsigned int dirty_nat_cnt;     /* total num of nat entries in set */
+       unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
        unsigned int nat_blocks;        /* # of nat blocks */
 
        /* free node ids management */
@@ -1404,6 +1410,7 @@ struct decompress_io_ctx {
        size_t rlen;                    /* valid data length in rbuf */
        size_t clen;                    /* valid data length in cbuf */
        atomic_t pending_pages;         /* in-flight compressed page count */
+       atomic_t verity_pages;          /* in-flight page count for verity */
        bool failed;                    /* indicate IO error during 
decompression */
        void *private;                  /* payload buffer for specified 
decompression algorithm */
        void *private2;                 /* extra payload buffer */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 42394de6c7eb..e65d73293a3f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int 
type)
                                sizeof(struct free_nid)) >> PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
        } else if (type == NAT_ENTRIES) {
-               mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
-                                                       PAGE_SHIFT;
+               mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
+                               sizeof(struct nat_entry)) >> PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
                if (excess_cached_nats(sbi))
                        res = false;
@@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct 
f2fs_nm_info *nm_i,
        list_add_tail(&ne->list, &nm_i->nat_entries);
        spin_unlock(&nm_i->nat_list_lock);
 
-       nm_i->nat_cnt++;
+       nm_i->nat_cnt[TOTAL_NAT]++;
+       nm_i->nat_cnt[RECLAIMABLE_NAT]++;
        return ne;
 }
 
@@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct 
f2fs_nm_info *nm_i,
 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry 
*e)
 {
        radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
-       nm_i->nat_cnt--;
+       nm_i->nat_cnt[TOTAL_NAT]--;
+       nm_i->nat_cnt[RECLAIMABLE_NAT]--;
        __free_nat_entry(e);
 }
 
@@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
        if (get_nat_flag(ne, IS_DIRTY))
                goto refresh_list;
 
-       nm_i->dirty_nat_cnt++;
+       nm_i->nat_cnt[DIRTY_NAT]++;
+       nm_i->nat_cnt[RECLAIMABLE_NAT]--;
        set_nat_flag(ne, IS_DIRTY, true);
 refresh_list:
        spin_lock(&nm_i->nat_list_lock);
@@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info 
*nm_i,
 
        set_nat_flag(ne, IS_DIRTY, false);
        set->entry_cnt--;
-       nm_i->dirty_nat_cnt--;
+       nm_i->nat_cnt[DIRTY_NAT]--;
+       nm_i->nat_cnt[RECLAIMABLE_NAT]++;
 }
 
 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
@@ -2944,14 +2948,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, 
struct cp_control *cpc)
        LIST_HEAD(sets);
        int err = 0;
 
-       /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+       /*
+        * during unmount, let's flush nat_bits before checking
+        * nat_cnt[DIRTY_NAT].
+        */
        if (enabled_nat_bits(sbi, cpc)) {
                down_write(&nm_i->nat_tree_lock);
                remove_nats_in_journal(sbi);
                up_write(&nm_i->nat_tree_lock);
        }
 
-       if (!nm_i->dirty_nat_cnt)
+       if (!nm_i->nat_cnt[DIRTY_NAT])
                return 0;
 
        down_write(&nm_i->nat_tree_lock);
@@ -2962,7 +2969,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, 
struct cp_control *cpc)
         * into nat entry set.
         */
        if (enabled_nat_bits(sbi, cpc) ||
-               !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+               !__has_cursum_space(journal,
+                       nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
                remove_nats_in_journal(sbi);
 
        while ((found = __gang_lookup_nat_set(nm_i,
@@ -3086,7 +3094,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
                                                F2FS_RESERVED_NODE_NUM;
        nm_i->nid_cnt[FREE_NID] = 0;
        nm_i->nid_cnt[PREALLOC_NID] = 0;
-       nm_i->nat_cnt = 0;
        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
        nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
        nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
@@ -3220,7 +3227,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
                        __del_from_nat_cache(nm_i, natvec[idx]);
                }
        }
-       f2fs_bug_on(sbi, nm_i->nat_cnt);
+       f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
 
        /* destroy nat set cache */
        nid = 0;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 69e5859e993c..f84541b57acb 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -126,13 +126,13 @@ static inline void raw_nat_from_node_info(struct 
f2fs_nat_entry *raw_ne,
 
 static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
 {
-       return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+       return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
                                        NM_I(sbi)->dirty_nats_ratio / 100;
 }
 
 static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
 {
-       return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+       return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
 }
 
 static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index d66de5999a26..dd3c3c7a90ec 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -18,9 +18,7 @@ static unsigned int shrinker_run_no;
 
 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
 {
-       long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
-
-       return count > 0 ? count : 0;
+       return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
 }
 
 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index fef22e476c52..aa284ce7ec00 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2744,7 +2744,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info 
*sbi,
        block_t total_sections, blocks_per_seg;
        struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
                                        (bh->b_data + F2FS_SUPER_OFFSET);
-       unsigned int blocksize;
        size_t crc_offset = 0;
        __u32 crc = 0;
 
@@ -2778,10 +2777,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info 
*sbi,
        }
 
        /* Currently, support only 4KB block size */
-       blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
-       if (blocksize != F2FS_BLKSIZE) {
-               f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
-                         blocksize);
+       if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
+               f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
+                         le32_to_cpu(raw_super->log_blocksize),
+                         F2FS_BLKSIZE_BITS);
                return -EFSCORRUPTED;
        }
 
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 19ac5baad50f..05b36b28f2e8 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -781,9 +781,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
 {
        struct task_struct *p;
        enum pid_type type;
+       unsigned long flags;
        struct pid *pid;
        
-       read_lock(&fown->lock);
+       read_lock_irqsave(&fown->lock, flags);
 
        type = fown->pid_type;
        pid = fown->pid;
@@ -804,7 +805,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
                read_unlock(&tasklist_lock);
        }
  out_unlock_fown:
-       read_unlock(&fown->lock);
+       read_unlock_irqrestore(&fown->lock, flags);
 }
 
 static void send_sigurg_to_task(struct task_struct *p,
@@ -819,9 +820,10 @@ int send_sigurg(struct fown_struct *fown)
        struct task_struct *p;
        enum pid_type type;
        struct pid *pid;
+       unsigned long flags;
        int ret = 0;
        
-       read_lock(&fown->lock);
+       read_lock_irqsave(&fown->lock, flags);
 
        type = fown->pid_type;
        pid = fown->pid;
@@ -844,7 +846,7 @@ int send_sigurg(struct fown_struct *fown)
                read_unlock(&tasklist_lock);
        }
  out_unlock_fown:
-       read_unlock(&fown->lock);
+       read_unlock_irqrestore(&fown->lock, flags);
        return ret;
 }
 
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0fcd065baa76..1f798c5c4213 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -941,6 +941,10 @@ enum io_mem_account {
        ACCT_PINNED,
 };
 
+static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
+static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+                       struct io_ring_ctx *ctx);
+
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
                             struct io_comp_state *cs);
 static void io_cqring_fill_event(struct io_kiocb *req, long res);
@@ -1369,6 +1373,13 @@ static bool io_grab_identity(struct io_kiocb *req)
                spin_unlock_irq(&ctx->inflight_lock);
                req->work.flags |= IO_WQ_WORK_FILES;
        }
+       if (!(req->work.flags & IO_WQ_WORK_MM) &&
+           (def->work_flags & IO_WQ_WORK_MM)) {
+               if (id->mm != current->mm)
+                       return false;
+               mmgrab(id->mm);
+               req->work.flags |= IO_WQ_WORK_MM;
+       }
 
        return true;
 }
@@ -1393,13 +1404,6 @@ static void io_prep_async_work(struct io_kiocb *req)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
-       /* ->mm can never change on us */
-       if (!(req->work.flags & IO_WQ_WORK_MM) &&
-           (def->work_flags & IO_WQ_WORK_MM)) {
-               mmgrab(id->mm);
-               req->work.flags |= IO_WQ_WORK_MM;
-       }
-
        /* if we fail grabbing identity, we must COW, regrab, and retry */
        if (io_grab_identity(req))
                return;
@@ -1632,8 +1636,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx 
*ctx, bool force,
        LIST_HEAD(list);
 
        if (!force) {
-               if (list_empty_careful(&ctx->cq_overflow_list))
-                       return true;
                if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
                    rings->cq_ring_entries))
                        return false;
@@ -5861,15 +5863,15 @@ static void io_req_drop_files(struct io_kiocb *req)
        struct io_ring_ctx *ctx = req->ctx;
        unsigned long flags;
 
+       put_files_struct(req->work.identity->files);
+       put_nsproxy(req->work.identity->nsproxy);
        spin_lock_irqsave(&ctx->inflight_lock, flags);
        list_del(&req->inflight_entry);
-       if (waitqueue_active(&ctx->inflight_wait))
-               wake_up(&ctx->inflight_wait);
        spin_unlock_irqrestore(&ctx->inflight_lock, flags);
        req->flags &= ~REQ_F_INFLIGHT;
-       put_files_struct(req->work.identity->files);
-       put_nsproxy(req->work.identity->nsproxy);
        req->work.flags &= ~IO_WQ_WORK_FILES;
+       if (waitqueue_active(&ctx->inflight_wait))
+               wake_up(&ctx->inflight_wait);
 }
 
 static void __io_clean_op(struct io_kiocb *req)
@@ -6575,8 +6577,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, 
unsigned int nr)
 
        /* if we have a backlog and couldn't flush it all, return BUSY */
        if (test_bit(0, &ctx->sq_check_overflow)) {
-               if (!list_empty(&ctx->cq_overflow_list) &&
-                   !io_cqring_overflow_flush(ctx, false, NULL, NULL))
+               if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
                        return -EBUSY;
        }
 
@@ -6798,8 +6799,16 @@ static int io_sq_thread(void *data)
                 * kthread parking. This synchronizes the thread vs users,
                 * the users are synchronized on the sqd->ctx_lock.
                 */
-               if (kthread_should_park())
+               if (kthread_should_park()) {
                        kthread_parkme();
+                       /*
+                        * When sq thread is unparked, in case the previous 
park operation
+                        * comes from io_put_sq_data(), which means that sq 
thread is going
+                        * to be stopped, so here needs to have a check.
+                        */
+                       if (kthread_should_stop())
+                               break;
+               }
 
                if (unlikely(!list_empty(&sqd->ctx_new_list)))
                        io_sqd_init_new(sqd);
@@ -6991,18 +7000,32 @@ static void io_file_ref_kill(struct percpu_ref *ref)
        complete(&data->done);
 }
 
+static void io_sqe_files_set_node(struct fixed_file_data *file_data,
+                                 struct fixed_file_ref_node *ref_node)
+{
+       spin_lock_bh(&file_data->lock);
+       file_data->node = ref_node;
+       list_add_tail(&ref_node->node, &file_data->ref_list);
+       spin_unlock_bh(&file_data->lock);
+       percpu_ref_get(&file_data->refs);
+}
+
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
        struct fixed_file_data *data = ctx->file_data;
-       struct fixed_file_ref_node *ref_node = NULL;
+       struct fixed_file_ref_node *backup_node, *ref_node = NULL;
        unsigned nr_tables, i;
+       int ret;
 
        if (!data)
                return -ENXIO;
+       backup_node = alloc_fixed_file_ref_node(ctx);
+       if (!backup_node)
+               return -ENOMEM;
 
-       spin_lock(&data->lock);
+       spin_lock_bh(&data->lock);
        ref_node = data->node;
-       spin_unlock(&data->lock);
+       spin_unlock_bh(&data->lock);
        if (ref_node)
                percpu_ref_kill(&ref_node->refs);
 
@@ -7010,7 +7033,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx 
*ctx)
 
        /* wait for all refs nodes to complete */
        flush_delayed_work(&ctx->file_put_work);
-       wait_for_completion(&data->done);
+       do {
+               ret = wait_for_completion_interruptible(&data->done);
+               if (!ret)
+                       break;
+               ret = io_run_task_work_sig();
+               if (ret < 0) {
+                       percpu_ref_resurrect(&data->refs);
+                       reinit_completion(&data->done);
+                       io_sqe_files_set_node(data, backup_node);
+                       return ret;
+               }
+       } while (1);
 
        __io_sqe_files_unregister(ctx);
        nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
@@ -7021,6 +7055,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx 
*ctx)
        kfree(data);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
+       destroy_fixed_file_ref_node(backup_node);
        return 0;
 }
 
@@ -7385,7 +7420,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
        data = ref_node->file_data;
        ctx = data->ctx;
 
-       spin_lock(&data->lock);
+       spin_lock_bh(&data->lock);
        ref_node->done = true;
 
        while (!list_empty(&data->ref_list)) {
@@ -7397,7 +7432,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
                list_del(&ref_node->node);
                first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
        }
-       spin_unlock(&data->lock);
+       spin_unlock_bh(&data->lock);
 
        if (percpu_ref_is_dying(&data->refs))
                delay = 0;
@@ -7519,11 +7554,7 @@ static int io_sqe_files_register(struct io_ring_ctx 
*ctx, void __user *arg,
                return PTR_ERR(ref_node);
        }
 
-       file_data->node = ref_node;
-       spin_lock(&file_data->lock);
-       list_add_tail(&ref_node->node, &file_data->ref_list);
-       spin_unlock(&file_data->lock);
-       percpu_ref_get(&file_data->refs);
+       io_sqe_files_set_node(file_data, ref_node);
        return ret;
 out_fput:
        for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7679,11 +7710,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
 
        if (needs_switch) {
                percpu_ref_kill(&data->node->refs);
-               spin_lock(&data->lock);
-               list_add_tail(&ref_node->node, &data->ref_list);
-               data->node = ref_node;
-               spin_unlock(&data->lock);
-               percpu_ref_get(&ctx->file_data->refs);
+               io_sqe_files_set_node(data, ref_node);
        } else
                destroy_fixed_file_ref_node(ref_node);
 
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 778275f48a87..5a7091746f68 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -38,6 +38,7 @@ struct jffs2_mount_opts {
         * users. This is implemented simply by means of not allowing the
         * latter users to write to the file system if the amount if the
         * available space is less then 'rp_size'. */
+       bool set_rp_size;
        unsigned int rp_size;
 };
 
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 4fd297bdf0f3..81ca58c10b72 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct 
dentry *root)
 
        if (opts->override_compr)
                seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
-       if (opts->rp_size)
+       if (opts->set_rp_size)
                seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
 
        return 0;
@@ -202,11 +202,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct 
fs_parameter *param)
        case Opt_rp_size:
                if (result.uint_32 > UINT_MAX / 1024)
                        return invalf(fc, "jffs2: rp_size unrepresentable");
-               opt = result.uint_32 * 1024;
-               if (opt > c->mtd->size)
-                       return invalf(fc, "jffs2: Too large reserve pool 
specified, max is %llu KB",
-                                     c->mtd->size / 1024);
-               c->mount_opts.rp_size = opt;
+               c->mount_opts.rp_size = result.uint_32 * 1024;
+               c->mount_opts.set_rp_size = true;
                break;
        default:
                return -EINVAL;
@@ -225,8 +222,10 @@ static inline void jffs2_update_mount_opts(struct 
fs_context *fc)
                c->mount_opts.override_compr = new_c->mount_opts.override_compr;
                c->mount_opts.compr = new_c->mount_opts.compr;
        }
-       if (new_c->mount_opts.rp_size)
+       if (new_c->mount_opts.set_rp_size) {
+               c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
                c->mount_opts.rp_size = new_c->mount_opts.rp_size;
+       }
        mutex_unlock(&c->alloc_sem);
 }
 
@@ -266,6 +265,10 @@ static int jffs2_fill_super(struct super_block *sb, struct 
fs_context *fc)
        c->mtd = sb->s_mtd;
        c->os_priv = sb;
 
+       if (c->mount_opts.rp_size > c->mtd->size)
+               return invalf(fc, "jffs2: Too large reserve pool specified, max 
is %llu KB",
+                             c->mtd->size / 1024);
+
        /* Initialize JFFS2 superblock locks, the further initialization will
         * be done later */
        mutex_init(&c->alloc_sem);
diff --git a/fs/namespace.c b/fs/namespace.c
index cebaa3e81794..93006abe7946 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n)
 /*
  * vfsmount lock must be held for write
  */
-unsigned int mnt_get_count(struct mount *mnt)
+int mnt_get_count(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
-       unsigned int count = 0;
+       int count = 0;
        int cpu;
 
        for_each_possible_cpu(cpu) {
@@ -1139,6 +1139,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, 
delayed_mntput);
 static void mntput_no_expire(struct mount *mnt)
 {
        LIST_HEAD(list);
+       int count;
 
        rcu_read_lock();
        if (likely(READ_ONCE(mnt->mnt_ns))) {
@@ -1162,7 +1163,9 @@ static void mntput_no_expire(struct mount *mnt)
         */
        smp_mb();
        mnt_add_count(mnt, -1);
-       if (mnt_get_count(mnt)) {
+       count = mnt_get_count(mnt);
+       if (count != 0) {
+               WARN_ON(count < 0);
                rcu_read_unlock();
                unlock_mount_hash();
                return;
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 8432bd6b95f0..c078f8855269 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -1019,29 +1019,24 @@ static int decode_deallocate(struct xdr_stream *xdr, 
struct nfs42_falloc_res *re
        return decode_op_hdr(xdr, OP_DEALLOCATE);
 }
 
-static int decode_read_plus_data(struct xdr_stream *xdr, struct nfs_pgio_res 
*res,
-                                uint32_t *eof)
+static int decode_read_plus_data(struct xdr_stream *xdr,
+                                struct nfs_pgio_res *res)
 {
        uint32_t count, recvd;
        uint64_t offset;
        __be32 *p;
 
        p = xdr_inline_decode(xdr, 8 + 4);
-       if (unlikely(!p))
-               return -EIO;
+       if (!p)
+               return 1;
 
        p = xdr_decode_hyper(p, &offset);
        count = be32_to_cpup(p);
        recvd = xdr_align_data(xdr, res->count, count);
        res->count += recvd;
 
-       if (count > recvd) {
-               dprintk("NFS: server cheating in read reply: "
-                               "count %u > recvd %u\n", count, recvd);
-               *eof = 0;
+       if (count > recvd)
                return 1;
-       }
-
        return 0;
 }
 
@@ -1052,18 +1047,16 @@ static int decode_read_plus_hole(struct xdr_stream 
*xdr, struct nfs_pgio_res *re
        __be32 *p;
 
        p = xdr_inline_decode(xdr, 8 + 8);
-       if (unlikely(!p))
-               return -EIO;
+       if (!p)
+               return 1;
 
        p = xdr_decode_hyper(p, &offset);
        p = xdr_decode_hyper(p, &length);
        recvd = xdr_expand_hole(xdr, res->count, length);
        res->count += recvd;
 
-       if (recvd < length) {
-               *eof = 0;
+       if (recvd < length)
                return 1;
-       }
        return 0;
 }
 
@@ -1088,12 +1081,12 @@ static int decode_read_plus(struct xdr_stream *xdr, 
struct nfs_pgio_res *res)
 
        for (i = 0; i < segments; i++) {
                p = xdr_inline_decode(xdr, 4);
-               if (unlikely(!p))
-                       return -EIO;
+               if (!p)
+                       goto early_out;
 
                type = be32_to_cpup(p++);
                if (type == NFS4_CONTENT_DATA)
-                       status = decode_read_plus_data(xdr, res, &eof);
+                       status = decode_read_plus_data(xdr, res);
                else if (type == NFS4_CONTENT_HOLE)
                        status = decode_read_plus_hole(xdr, res, &eof);
                else
@@ -1102,12 +1095,17 @@ static int decode_read_plus(struct xdr_stream *xdr, 
struct nfs_pgio_res *res)
                if (status < 0)
                        return status;
                if (status > 0)
-                       break;
+                       goto early_out;
        }
 
 out:
        res->eof = eof;
        return 0;
+early_out:
+       if (unlikely(!i))
+               return -EIO;
+       res->eof = 0;
+       return 0;
 }
 
 static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 93f5c1678ec2..984cc42ee54d 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -67,7 +67,7 @@ static void nfs4_evict_inode(struct inode *inode)
        nfs_inode_evict_delegation(inode);
        /* Note that above delegreturn would trigger pnfs return-on-close */
        pnfs_return_layout(inode);
-       pnfs_destroy_layout(NFS_I(inode));
+       pnfs_destroy_layout_final(NFS_I(inode));
        /* First call standard NFS clear_inode() code */
        nfs_clear_inode(inode);
        nfs4_xattr_cache_zap(inode);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0e50b9d45c32..07f59dc8cb2e 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -294,6 +294,7 @@ void
 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
        struct inode *inode;
+       unsigned long i_state;
 
        if (!lo)
                return;
@@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
                if (!list_empty(&lo->plh_segs))
                        WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
                pnfs_detach_layout_hdr(lo);
+               i_state = inode->i_state;
                spin_unlock(&inode->i_lock);
                pnfs_free_layout_hdr(lo);
+               /* Notify pnfs_destroy_layout_final() that we're done */
+               if (i_state & (I_FREEING | I_CLEAR))
+                       wake_up_var(lo);
        }
 }
 
@@ -734,8 +739,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
        }
 }
 
-void
-pnfs_destroy_layout(struct nfs_inode *nfsi)
+static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
 {
        struct pnfs_layout_hdr *lo;
        LIST_HEAD(tmp_list);
@@ -753,9 +757,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
                pnfs_put_layout_hdr(lo);
        } else
                spin_unlock(&nfsi->vfs_inode.i_lock);
+       return lo;
+}
+
+void pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+       __pnfs_destroy_layout(nfsi);
 }
 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
 
+static bool pnfs_layout_removed(struct nfs_inode *nfsi,
+                               struct pnfs_layout_hdr *lo)
+{
+       bool ret;
+
+       spin_lock(&nfsi->vfs_inode.i_lock);
+       ret = nfsi->layout != lo;
+       spin_unlock(&nfsi->vfs_inode.i_lock);
+       return ret;
+}
+
+void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
+{
+       struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
+
+       if (lo)
+               wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
+}
+
 static bool
 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
                struct list_head *layout_list)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2661c44c62db..78c389391848 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -266,6 +266,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct 
nfs4_layoutget *lgp);
 void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
 void pnfs_free_lseg_list(struct list_head *tmp_list);
 void pnfs_destroy_layout(struct nfs_inode *);
+void pnfs_destroy_layout_final(struct nfs_inode *);
 void pnfs_destroy_all_layouts(struct nfs_client *);
 int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
                struct nfs_fsid *fsid,
@@ -710,6 +711,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode 
*nfsi)
 {
 }
 
+static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
+{
+}
+
 static inline struct pnfs_layout_segment *
 pnfs_get_lseg(struct pnfs_layout_segment *lseg)
 {
diff --git a/fs/pnode.h b/fs/pnode.h
index 49a058c73e4c..26f74e092bd9 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int);
 void propagate_mount_unlock(struct mount *);
 void mnt_release_group_id(struct mount *);
 int get_dominating_id(struct mount *mnt, const struct path *root);
-unsigned int mnt_get_count(struct mount *mnt);
+int mnt_get_count(struct mount *mnt);
 void mnt_set_mountpoint(struct mount *, struct mountpoint *,
                        struct mount *);
 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index a6f856f341dc..c5562c871c8b 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint 
blk, char *buf)
 
        memset(buf, 0, info->dqi_usable_bs);
        return sb->s_op->quota_read(sb, info->dqi_type, buf,
-              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+              info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
 }
 
 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
@@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint 
blk, char *buf)
        ssize_t ret;
 
        ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
-              info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+              info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
        if (ret != info->dqi_usable_bs) {
                quota_error(sb, "dquota write failed");
                if (ret >= 0)
@@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
                            blk);
                goto out_buf;
        }
-       dquot->dq_off = (blk << info->dqi_blocksize_bits) +
+       dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
                        sizeof(struct qt_disk_dqdbheader) +
                        i * info->dqi_entry_size;
        kfree(buf);
@@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo 
*info,
                ret = -EIO;
                goto out_buf;
        } else {
-               ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
+               ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
                  qt_disk_dqdbheader) + i * info->dqi_entry_size;
        }
 out_buf:
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 8bf88d690729..476a7ff49482 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct 
buffer_head *bh)
                                         "(second one): %h", ih);
                        return 0;
                }
+               if (is_direntry_le_ih(ih) && (ih_item_len(ih) < 
(ih_entry_count(ih) * IH_SIZE))) {
+                       reiserfs_warning(NULL, "reiserfs-5093",
+                                        "item entry count seems wrong %h",
+                                        ih);
+                       return 0;
+               }
                prev_location = ih_location(ih);
        }
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db6ae4d3fb4e..cd5c313729ea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2439,8 +2439,9 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
 #endif
 
 extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
-               enum meminit_context, struct vmem_altmap *, int migratetype);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+               unsigned long, unsigned long, enum meminit_context,
+               struct vmem_altmap *, int migratetype);
 extern void setup_per_zone_wmarks(void);
 extern int __meminit init_per_zone_wmark_min(void);
 extern void mem_init(void);
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index 5ed721ad5b19..af2a44c08683 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
 #define _BITUL(x)      (_UL(1) << (x))
 #define _BITULL(x)     (_ULL(1) << (x))
 
+#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 
1)
+#define __ALIGN_KERNEL_MASK(x, mask)   (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
 #endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9ca87bc73c44..cde753bb2093 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -14,7 +14,7 @@
 #ifndef _UAPI_LINUX_ETHTOOL_H
 #define _UAPI_LINUX_ETHTOOL_H
 
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 0ff8f7477847..fadf2db71fe8 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -3,13 +3,6 @@
 #define _UAPI_LINUX_KERNEL_H
 
 #include <linux/sysinfo.h>
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 
1)
-#define __ALIGN_KERNEL_MASK(x, mask)   (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include <linux/const.h>
 
 #endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index f9a1be7fc696..ead2e72e5c88 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -21,7 +21,7 @@
 #define _UAPI_LINUX_LIGHTNVM_H
 
 #ifdef __KERNEL__
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/ioctl.h>
 #else /* __KERNEL__ */
 #include <stdio.h>
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index c36177a86516..a1fd6173e2db 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -2,7 +2,7 @@
 #ifndef _UAPI__LINUX_MROUTE6_H
 #define _UAPI__LINUX_MROUTE6_H
 
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/sockios.h>
 #include <linux/in6.h>         /* For struct sockaddr_in6. */
diff --git a/include/uapi/linux/netfilter/x_tables.h 
b/include/uapi/linux/netfilter/x_tables.h
index a8283f7dbc51..b8c6bb233ac1 100644
--- a/include/uapi/linux/netfilter/x_tables.h
+++ b/include/uapi/linux/netfilter/x_tables.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _UAPI_X_TABLES_H
 #define _UAPI_X_TABLES_H
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/types.h>
 
 #define XT_FUNCTION_MAXNAMELEN 30
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index c3816ff7bfc3..3d94269bbfa8 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -2,7 +2,7 @@
 #ifndef _UAPI__LINUX_NETLINK_H
 #define _UAPI__LINUX_NETLINK_H
 
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/socket.h> /* for __kernel_sa_family_t */
 #include <linux/types.h>
 
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 27c1ed2822e6..458179df9b27 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -23,7 +23,7 @@
 #ifndef _UAPI_LINUX_SYSCTL_H
 #define _UAPI_LINUX_SYSCTL_H
 
-#include <linux/kernel.h>
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 191c329e482a..32596fdbcd5b 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct 
fs_parameter *param)
        opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
        if (opt == -ENOPARAM) {
                if (strcmp(param->key, "source") == 0) {
+                       if (fc->source)
+                               return invalf(fc, "Multiple sources not 
supported");
                        fc->source = param->string;
                        param->string = NULL;
                        return 0;
diff --git a/kernel/module.c b/kernel/module.c
index a4fa44a652a7..e20499309b2a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1895,7 +1895,6 @@ static int mod_sysfs_init(struct module *mod)
        if (err)
                mod_kobject_put(mod);
 
-       /* delay uevent until full sysfs population */
 out:
        return err;
 }
@@ -1932,7 +1931,6 @@ static int mod_sysfs_setup(struct module *mod,
        add_sect_attrs(mod, info);
        add_notes_attrs(mod, info);
 
-       kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
        return 0;
 
 out_unreg_modinfo_attrs:
@@ -3639,6 +3637,9 @@ static noinline int do_init_module(struct module *mod)
        blocking_notifier_call_chain(&module_notify_list,
                                     MODULE_STATE_LIVE, mod);
 
+       /* Delay uevent until module has finished its init routine */
+       kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
+
        /*
         * We need to finish all async code before the module init sequence
         * is done.  This has potential to deadlock.  For example, a newly
@@ -3991,6 +3992,7 @@ static int load_module(struct load_info *info, const char 
__user *uargs,
                                     MODULE_STATE_GOING, mod);
        klp_module_going(mod);
  bug_cleanup:
+       mod->state = MODULE_STATE_GOING;
        /* module_bug_cleanup needs module_mutex protection */
        mutex_lock(&module_mutex);
        module_bug_cleanup(mod);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 81632cd5e3b7..e8d351b7f9b0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -941,13 +941,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched 
*ts)
                 */
                if (tick_do_timer_cpu == cpu)
                        return false;
-               /*
-                * Boot safety: make sure the timekeeping duty has been
-                * assigned before entering dyntick-idle mode,
-                * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
-                */
-               if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
-                       return false;
 
                /* Should not happen for nohz-full */
                if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
index 8e4d5afbbb10..66e1c96387c4 100644
--- a/lib/zlib_dfltcc/Makefile
+++ b/lib/zlib_dfltcc/Makefile
@@ -8,4 +8,4 @@
 
 obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
 
-zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
index c30de430b30c..782f76e9d4da 100644
--- a/lib/zlib_dfltcc/dfltcc.c
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: Zlib
 /* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
 
-#include <linux/zutil.h>
+#include <linux/export.h>
+#include <linux/module.h>
 #include "dfltcc_util.h"
 #include "dfltcc.h"
 
@@ -53,3 +54,6 @@ void dfltcc_reset(
     dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
     dfltcc_state->param.ribm = DFLTCC_RIBM;
 }
+EXPORT_SYMBOL(dfltcc_reset);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
index 00c185101c6d..6c946e8532ee 100644
--- a/lib/zlib_dfltcc/dfltcc_deflate.c
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -4,6 +4,7 @@
 #include "dfltcc_util.h"
 #include "dfltcc.h"
 #include <asm/setup.h>
+#include <linux/export.h>
 #include <linux/zutil.h>
 
 /*
@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
 
     return 1;
 }
+EXPORT_SYMBOL(dfltcc_can_deflate);
 
 static void dfltcc_gdht(
     z_streamp strm
@@ -277,3 +279,4 @@ int dfltcc_deflate(
         goto again; /* deflate() must use all input or all output */
     return 1;
 }
+EXPORT_SYMBOL(dfltcc_deflate);
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
index db107016d29b..fb60b5a6a1cb 100644
--- a/lib/zlib_dfltcc/dfltcc_inflate.c
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
     param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
     if (param->hl)
         param->nt = 0; /* Honor history for the first block */
-    param->cv = state->flags ? REVERSE(state->check) : state->check;
+    param->cv = state->check;
 
     /* Inflate */
     do {
@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
     state->bits = param->sbb;
     state->whave = param->hl;
     state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
-    state->check = state->flags ? REVERSE(param->cv) : param->cv;
+    state->check = param->cv;
     if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
         /* Report an error if stream is corrupted */
         state->mode = BAD;
diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
deleted file mode 100644
index 6f23481804c1..000000000000
--- a/lib/zlib_dfltcc/dfltcc_syms.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/lib/zlib_dfltcc/dfltcc_syms.c
- *
- * Exported symbols for the s390 zlib dfltcc support.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include "dfltcc.h"
-
-EXPORT_SYMBOL(dfltcc_can_deflate);
-EXPORT_SYMBOL(dfltcc_deflate);
-EXPORT_SYMBOL(dfltcc_reset);
-MODULE_LICENSE("GPL");
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3b38ea958e95..1fd11f96a707 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4106,10 +4106,30 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, 
struct vm_area_struct *vma,
                 * may get SIGKILLed if it later faults.
                 */
                if (outside_reserve) {
+                       struct address_space *mapping = vma->vm_file->f_mapping;
+                       pgoff_t idx;
+                       u32 hash;
+
                        put_page(old_page);
                        BUG_ON(huge_pte_none(pte));
+                       /*
+                        * Drop hugetlb_fault_mutex and i_mmap_rwsem before
+                        * unmapping.  unmapping needs to hold i_mmap_rwsem
+                        * in write mode.  Dropping i_mmap_rwsem in read mode
+                        * here is OK as COW mappings do not interact with
+                        * PMD sharing.
+                        *
+                        * Reacquire both after unmap operation.
+                        */
+                       idx = vma_hugecache_offset(h, vma, haddr);
+                       hash = hugetlb_fault_mutex_hash(mapping, idx);
+                       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+                       i_mmap_unlock_read(mapping);
+
                        unmap_ref_private(mm, vma, old_page, haddr);
-                       BUG_ON(huge_pte_none(pte));
+
+                       i_mmap_lock_read(mapping);
+                       mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        spin_lock(ptl);
                        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
                        if (likely(ptep &&
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0f855deea4b2..aa453a433143 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -714,7 +714,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, 
unsigned long start_pfn,
         * expects the zone spans the pfn range. All the pages in the range
         * are reserved so nobody should be touching them so we should be safe
         */
-       memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
+       memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
                         MEMINIT_HOTPLUG, altmap, migratetype);
 
        set_zone_contiguous(zone);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 32f783ddb5c3..14b9e83ff9da 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -448,6 +448,8 @@ defer_init(int nid, unsigned long pfn, unsigned long 
end_pfn)
        if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
                return false;
 
+       if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
+               return true;
        /*
         * We start only with one section of pages, more pages are added as
         * needed until the rest of deferred pages are initialized.
@@ -6050,7 +6052,7 @@ overlap_memmap_init(unsigned long zone, unsigned long 
*pfn)
  * zone stats (e.g., nr_isolate_pageblock) are touched.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long 
zone,
-               unsigned long start_pfn,
+               unsigned long start_pfn, unsigned long zone_end_pfn,
                enum meminit_context context,
                struct vmem_altmap *altmap, int migratetype)
 {
@@ -6086,7 +6088,7 @@ void __meminit memmap_init_zone(unsigned long size, int 
nid, unsigned long zone,
                if (context == MEMINIT_EARLY) {
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
-                       if (defer_init(nid, pfn, end_pfn))
+                       if (defer_init(nid, pfn, zone_end_pfn))
                                break;
                }
 
@@ -6200,7 +6202,7 @@ void __meminit __weak memmap_init(unsigned long size, int 
nid,
 
                if (end_pfn > start_pfn) {
                        size = end_pfn - start_pfn;
-                       memmap_init_zone(size, nid, zone, start_pfn,
+                       memmap_init_zone(size, nid, zone, start_pfn, 
range_end_pfn,
                                         MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
                }
        }
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 5635604cb9ba..25a9e566ef5c 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -194,8 +194,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct 
genl_info *info)
        if (netif_is_rxfh_configured(dev) &&
            !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
            (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
+               ret = -EINVAL;
                GENL_SET_ERR_MSG(info, "requested channel counts are too low 
for existing indirection table settings");
-               return -EINVAL;
+               goto out_ops;
        }
 
        /* Disabling channels, query zero-copy AF_XDP sockets */
@@ -203,8 +204,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct 
genl_info *info)
                       min(channels.rx_count, channels.tx_count);
        for (i = from_channel; i < old_total; i++)
                if (xsk_get_pool_from_qid(dev, i)) {
+                       ret = -EINVAL;
                        GENL_SET_ERR_MSG(info, "requested channel counts are 
too low for existing zerocopy AF_XDP sockets");
-                       return -EINVAL;
+                       goto out_ops;
                }
 
        ret = dev->ethtool_ops->set_channels(dev, &channels);
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index 0baad0ce1832..c3a5489964cd 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -182,7 +182,7 @@ static int strset_parse_request(struct ethnl_req_info 
*req_base,
                ret = strset_get_id(attr, &id, extack);
                if (ret < 0)
                        return ret;
-               if (ret >= ETH_SS_COUNT) {
+               if (id >= ETH_SS_COUNT) {
                        NL_SET_ERR_MSG_ATTR(extack, attr,
                                            "unknown string set id");
                        return -EOPNOTSUPP;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 88f2a7a0ccb8..967ce9ccfc0d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2081,6 +2081,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        sock_reset_flag(nsk, SOCK_RCU_FREE);
        /* will be fully established after successful MPC subflow creation */
        inet_sk_state_store(nsk, TCP_SYN_RECV);
+
+       security_inet_csk_clone(nsk, req);
        bh_unlock_sock(nsk);
 
        /* keep a single reference */
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index b0ad7687ee2c..c6653ee7f701 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1596,6 +1596,21 @@ static int taprio_change(struct Qdisc *sch, struct 
nlattr *opt,
        return err;
 }
 
+static void taprio_reset(struct Qdisc *sch)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       int i;
+
+       hrtimer_cancel(&q->advance_timer);
+       if (q->qdiscs) {
+               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+                       qdisc_reset(q->qdiscs[i]);
+       }
+       sch->qstats.backlog = 0;
+       sch->q.qlen = 0;
+}
+
 static void taprio_destroy(struct Qdisc *sch)
 {
        struct taprio_sched *q = qdisc_priv(sch);
@@ -1606,7 +1621,6 @@ static void taprio_destroy(struct Qdisc *sch)
        list_del(&q->taprio_list);
        spin_unlock(&taprio_list_lock);
 
-       hrtimer_cancel(&q->advance_timer);
 
        taprio_disable_offload(dev, q, NULL);
 
@@ -1953,6 +1967,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
        .init           = taprio_init,
        .change         = taprio_change,
        .destroy        = taprio_destroy,
+       .reset          = taprio_reset,
        .peek           = taprio_peek,
        .dequeue        = taprio_dequeue,
        .enqueue        = taprio_enqueue,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 47b155a49226..9f3f8e953ff0 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -755,8 +755,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream 
*substream,
                runtime->boundary *= 2;
 
        /* clear the buffer for avoiding possible kernel info leaks */
-       if (runtime->dma_area && !substream->ops->copy_user)
-               memset(runtime->dma_area, 0, runtime->dma_bytes);
+       if (runtime->dma_area && !substream->ops->copy_user) {
+               size_t size = runtime->dma_bytes;
+
+               if (runtime->info & SNDRV_PCM_INFO_MMAP)
+                       size = PAGE_ALIGN(size);
+               memset(runtime->dma_area, 0, size);
+       }
 
        snd_pcm_timer_resolution_change(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index c78720a3299c..257ad5206240 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -95,11 +95,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct 
file *file)
        }
 }
 
-static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
+static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
+{
+       return runtime->avail >= runtime->avail_min;
+}
+
+static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
 {
        struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long flags;
+       bool ready;
 
-       return runtime->avail >= runtime->avail_min;
+       spin_lock_irqsave(&runtime->lock, flags);
+       ready = __snd_rawmidi_ready(runtime);
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       return ready;
 }
 
 static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream 
*substream,
@@ -1019,7 +1029,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream 
*substream,
        if (result > 0) {
                if (runtime->event)
                        schedule_work(&runtime->event_work);
-               else if (snd_rawmidi_ready(substream))
+               else if (__snd_rawmidi_ready(runtime))
                        wake_up(&runtime->sleep);
        }
        spin_unlock_irqrestore(&runtime->lock, flags);
@@ -1098,7 +1108,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char 
__user *buf, size_t coun
        result = 0;
        while (count > 0) {
                spin_lock_irq(&runtime->lock);
-               while (!snd_rawmidi_ready(substream)) {
+               while (!__snd_rawmidi_ready(runtime)) {
                        wait_queue_entry_t wait;
 
                        if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
@@ -1115,9 +1125,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char 
__user *buf, size_t coun
                                return -ENODEV;
                        if (signal_pending(current))
                                return result > 0 ? result : -ERESTARTSYS;
-                       if (!runtime->avail)
-                               return result > 0 ? result : -EIO;
                        spin_lock_irq(&runtime->lock);
+                       if (!runtime->avail) {
+                               spin_unlock_irq(&runtime->lock);
+                               return result > 0 ? result : -EIO;
+                       }
                }
                spin_unlock_irq(&runtime->lock);
                count1 = snd_rawmidi_kernel_read1(substream,
@@ -1255,7 +1267,7 @@ int __snd_rawmidi_transmit_ack(struct 
snd_rawmidi_substream *substream, int coun
        runtime->avail += count;
        substream->bytes += count;
        if (count > 0) {
-               if (runtime->drain || snd_rawmidi_ready(substream))
+               if (runtime->drain || __snd_rawmidi_ready(runtime))
                        wake_up(&runtime->sleep);
        }
        return count;
@@ -1444,9 +1456,11 @@ static ssize_t snd_rawmidi_write(struct file *file, 
const char __user *buf,
                                return -ENODEV;
                        if (signal_pending(current))
                                return result > 0 ? result : -ERESTARTSYS;
-                       if (!runtime->avail && !timeout)
-                               return result > 0 ? result : -EIO;
                        spin_lock_irq(&runtime->lock);
+                       if (!runtime->avail && !timeout) {
+                               spin_unlock_irq(&runtime->lock);
+                               return result > 0 ? result : -EIO;
+                       }
                }
                spin_unlock_irq(&runtime->lock);
                count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
@@ -1526,6 +1540,7 @@ static void snd_rawmidi_proc_info_read(struct 
snd_info_entry *entry,
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_substream *substream;
        struct snd_rawmidi_runtime *runtime;
+       unsigned long buffer_size, avail, xruns;
 
        rmidi = entry->private_data;
        snd_iprintf(buffer, "%s\n\n", rmidi->name);
@@ -1544,13 +1559,16 @@ static void snd_rawmidi_proc_info_read(struct 
snd_info_entry *entry,
                                    "  Owner PID    : %d\n",
                                    pid_vnr(substream->pid));
                                runtime = substream->runtime;
+                               spin_lock_irq(&runtime->lock);
+                               buffer_size = runtime->buffer_size;
+                               avail = runtime->avail;
+                               spin_unlock_irq(&runtime->lock);
                                snd_iprintf(buffer,
                                    "  Mode         : %s\n"
                                    "  Buffer size  : %lu\n"
                                    "  Avail        : %lu\n",
                                    runtime->oss ? "OSS compatible" : "native",
-                                   (unsigned long) runtime->buffer_size,
-                                   (unsigned long) runtime->avail);
+                                   buffer_size, avail);
                        }
                }
        }
@@ -1568,13 +1586,16 @@ static void snd_rawmidi_proc_info_read(struct 
snd_info_entry *entry,
                                            "  Owner PID    : %d\n",
                                            pid_vnr(substream->pid));
                                runtime = substream->runtime;
+                               spin_lock_irq(&runtime->lock);
+                               buffer_size = runtime->buffer_size;
+                               avail = runtime->avail;
+                               xruns = runtime->xruns;
+                               spin_unlock_irq(&runtime->lock);
                                snd_iprintf(buffer,
                                            "  Buffer size  : %lu\n"
                                            "  Avail        : %lu\n"
                                            "  Overruns     : %lu\n",
-                                           (unsigned long) 
runtime->buffer_size,
-                                           (unsigned long) runtime->avail,
-                                           (unsigned long) runtime->xruns);
+                                           buffer_size, avail, xruns);
                        }
                }
        }
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
index 9254c8dbe5e3..25d2d6b61007 100644
--- a/sound/core/seq/seq_queue.h
+++ b/sound/core/seq/seq_queue.h
@@ -26,10 +26,10 @@ struct snd_seq_queue {
        
        struct snd_seq_timer *timer;    /* time keeper for this queue */
        int     owner;          /* client that 'owns' the timer */
-       unsigned int    locked:1,       /* timer is only accesibble by owner if 
set */
-               klocked:1,      /* kernel lock (after START) */ 
-               check_again:1,
-               check_blocked:1;
+       bool    locked;         /* timer is only accesibble by owner if set */
+       bool    klocked;        /* kernel lock (after START) */
+       bool    check_again;    /* concurrent access happened during check */
+       bool    check_blocked;  /* queue being checked */
 
        unsigned int flags;             /* status flags */
        unsigned int info_flags;        /* info for sync */
diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
index 5ed721ad5b19..af2a44c08683 100644
--- a/tools/include/uapi/linux/const.h
+++ b/tools/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
 #define _BITUL(x)      (_UL(1) << (x))
 #define _BITULL(x)     (_ULL(1) << (x))
 
+#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 
1)
+#define __ALIGN_KERNEL_MASK(x, mask)   (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
 #endif /* _UAPI_LINUX_CONST_H */
  • Linux 5.10.5 Greg Kroah-Hartman
    • Re: Linux 5.10.5 Greg Kroah-Hartman

Reply via email to