commit:     c429ed15c8c907dbe61005f1542b6f1c16e99543
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jun 26 16:24:12 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jun 26 16:24:12 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c429ed15

linux kernel 4.17.3

 0000_README             |    4 +
 1002_linux-4.17.3.patch | 2352 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2356 insertions(+)

diff --git a/0000_README b/0000_README
index a4cf389..daa330b 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.17.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.17.2
 
+Patch:  1002_linux-4.17.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.17.3
+
 Patch:  1800_iommu-amd-dma-direct-revert.patch
 From:   
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=e16c4790de39dc861b749674c2a9319507f6f64f
 Desc:   Revert iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and 
dma_direct_{alloc,free}(). See bug #658538.

diff --git a/1002_linux-4.17.3.patch b/1002_linux-4.17.3.patch
new file mode 100644
index 0000000..9fdc2fc
--- /dev/null
+++ b/1002_linux-4.17.3.patch
@@ -0,0 +1,2352 @@
+diff --git a/Makefile b/Makefile
+index f43cd522b175..31dc3a08295a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 17
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/um/drivers/vector_transports.c 
b/arch/um/drivers/vector_transports.c
+index 9065047f844b..77e4ebc206ae 100644
+--- a/arch/um/drivers/vector_transports.c
++++ b/arch/um/drivers/vector_transports.c
+@@ -120,7 +120,8 @@ static int raw_form_header(uint8_t *header,
+               skb,
+               vheader,
+               virtio_legacy_is_little_endian(),
+-              false
++              false,
++              0
+       );
+ 
+       return 0;
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 08acd954f00e..74a9e06b6cfd 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void 
(*eoi_write)(u32 reg, u32 v)) {}
+ 
+ #endif /* CONFIG_X86_LOCAL_APIC */
+ 
++extern void apic_ack_irq(struct irq_data *data);
++
+ static inline void ack_APIC_irq(void)
+ {
+       /*
+diff --git a/arch/x86/include/asm/trace/irq_vectors.h 
b/arch/x86/include/asm/trace/irq_vectors.h
+index 22647a642e98..0af81b590a0c 100644
+--- a/arch/x86/include/asm/trace/irq_vectors.h
++++ b/arch/x86/include/asm/trace/irq_vectors.h
+@@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
+       TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
+                int ret),
+ 
+-      TP_ARGS(irq, vector, ret, reserved),
++      TP_ARGS(irq, vector, reserved, ret),
+ 
+       TP_STRUCT__entry(
+               __field(        unsigned int,   irq             )
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 7553819c74c3..3982f79d2377 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data 
*irq_data)
+        * intr-remapping table entry. Hence for the io-apic
+        * EOI we use the pin number.
+        */
+-      ack_APIC_irq();
++      apic_ack_irq(irq_data);
+       eoi_ioapic_pin(data->entry.vector, data);
+ }
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index bb6f7a2148d7..b708f597eee3 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const 
struct cpumask *dest)
+       if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
+               return 0;
+ 
++      /*
++       * Careful here. @apicd might either have move_in_progress set or
++       * be enqueued for cleanup. Assigning a new vector would either
++       * leave a stale vector on some CPU around or in case of a pending
++       * cleanup corrupt the hlist.
++       */
++      if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
++              return -EBUSY;
++
+       vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
+       if (vector > 0)
+               apic_update_vector(irqd, vector, cpu);
+@@ -800,13 +809,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
+       return 1;
+ }
+ 
+-void apic_ack_edge(struct irq_data *irqd)
++void apic_ack_irq(struct irq_data *irqd)
+ {
+-      irq_complete_move(irqd_cfg(irqd));
+       irq_move_irq(irqd);
+       ack_APIC_irq();
+ }
+ 
++void apic_ack_edge(struct irq_data *irqd)
++{
++      irq_complete_move(irqd_cfg(irqd));
++      apic_ack_irq(irqd);
++}
++
+ static struct irq_chip lapic_controller = {
+       .name                   = "APIC",
+       .irq_ack                = apic_ack_edge,
+diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
+index 589b948e6e01..316a8875bd90 100644
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -821,6 +821,8 @@ static __init void rdt_quirks(void)
+       case INTEL_FAM6_SKYLAKE_X:
+               if (boot_cpu_data.x86_stepping <= 4)
+                       set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
++              else
++                      set_rdt_options("!l3cat");
+       }
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c 
b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+index 475cb4f5f14f..c805a06e14c3 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+@@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
+ 
+ static u8 n_banks;
+ 
+-#define MAX_FLAG_OPT_SIZE     3
++#define MAX_FLAG_OPT_SIZE     4
+ #define NBCFG                 0x44
+ 
+ enum injection_type {
+diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
+index e4cb9f4cde8a..fc13cbbb2dce 100644
+--- a/arch/x86/platform/uv/uv_irq.c
++++ b/arch/x86/platform/uv/uv_irq.c
+@@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct 
uv_irq_2_mmr_pnode *info)
+ 
+ static void uv_noop(struct irq_data *data) { }
+ 
+-static void uv_ack_apic(struct irq_data *data)
+-{
+-      ack_APIC_irq();
+-}
+-
+ static int
+ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+                   bool force)
+@@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
+       .name                   = "UV-CORE",
+       .irq_mask               = uv_noop,
+       .irq_unmask             = uv_noop,
+-      .irq_eoi                = uv_ack_apic,
++      .irq_eoi                = apic_ack_irq,
+       .irq_set_affinity       = uv_set_irq_affinity,
+ };
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 9ce9cac16c3f..90ffd8151c57 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2473,7 +2473,6 @@ static void blk_mq_del_queue_tag_set(struct 
request_queue *q)
+ 
+       mutex_lock(&set->tag_list_lock);
+       list_del_rcu(&q->tag_set_list);
+-      INIT_LIST_HEAD(&q->tag_set_list);
+       if (list_is_singular(&set->tag_list)) {
+               /* just transitioned to unshared */
+               set->flags &= ~BLK_MQ_F_TAG_SHARED;
+@@ -2481,8 +2480,8 @@ static void blk_mq_del_queue_tag_set(struct 
request_queue *q)
+               blk_mq_update_tag_set_depth(set, false);
+       }
+       mutex_unlock(&set->tag_list_lock);
+-
+       synchronize_rcu();
++      INIT_LIST_HEAD(&q->tag_set_list);
+ }
+ 
+ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
+diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
+index 68422afc365f..bc5f05906bd1 100644
+--- a/drivers/acpi/acpica/psloop.c
++++ b/drivers/acpi/acpica/psloop.c
+@@ -515,6 +515,22 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state 
*walk_state)
+                               if (ACPI_FAILURE(status)) {
+                                       return_ACPI_STATUS(status);
+                               }
++                              if (walk_state->opcode == AML_SCOPE_OP) {
++                                      /*
++                                       * If the scope op fails to parse, skip 
the body of the
++                                       * scope op because the parse failure 
indicates that the
++                                       * device may not exist.
++                                       */
++                                      walk_state->parser_state.aml =
++                                          walk_state->aml + 1;
++                                      walk_state->parser_state.aml =
++                                          acpi_ps_get_next_package_end
++                                          (&walk_state->parser_state);
++                                      walk_state->aml =
++                                          walk_state->parser_state.aml;
++                                      ACPI_ERROR((AE_INFO,
++                                                  "Skipping Scope block"));
++                              }
+ 
+                               continue;
+                       }
+@@ -557,7 +573,40 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state 
*walk_state)
+                               if (ACPI_FAILURE(status)) {
+                                       return_ACPI_STATUS(status);
+                               }
+-
++                              if ((walk_state->control_state) &&
++                                  ((walk_state->control_state->control.
++                                    opcode == AML_IF_OP)
++                                   || (walk_state->control_state->control.
++                                       opcode == AML_WHILE_OP))) {
++                                      /*
++                                       * If the if/while op fails to parse, 
we will skip parsing
++                                       * the body of the op.
++                                       */
++                                      parser_state->aml =
++                                          walk_state->control_state->control.
++                                          aml_predicate_start + 1;
++                                      parser_state->aml =
++                                          acpi_ps_get_next_package_end
++                                          (parser_state);
++                                      walk_state->aml = parser_state->aml;
++
++                                      ACPI_ERROR((AE_INFO,
++                                                  "Skipping While/If block"));
++                                      if (*walk_state->aml == AML_ELSE_OP) {
++                                              ACPI_ERROR((AE_INFO,
++                                                          "Skipping Else 
block"));
++                                              walk_state->parser_state.aml =
++                                                  walk_state->aml + 1;
++                                              walk_state->parser_state.aml =
++                                                  acpi_ps_get_next_package_end
++                                                  (parser_state);
++                                              walk_state->aml =
++                                                  parser_state->aml;
++                                      }
++                                      ACPI_FREE(acpi_ut_pop_generic_state
++                                                (&walk_state->control_state));
++                              }
++                              op = NULL;
+                               continue;
+                       }
+               }
+diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
+index 7d9d0151ee54..3138e7a00da8 100644
+--- a/drivers/acpi/acpica/psobject.c
++++ b/drivers/acpi/acpica/psobject.c
+@@ -12,6 +12,7 @@
+ #include "acparser.h"
+ #include "amlcode.h"
+ #include "acconvert.h"
++#include "acnamesp.h"
+ 
+ #define _COMPONENT          ACPI_PARSER
+ ACPI_MODULE_NAME("psobject")
+@@ -549,6 +550,21 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ 
+               do {
+                       if (*op) {
++                              /*
++                               * These Opcodes need to be removed from the 
namespace because they
++                               * get created even if these opcodes cannot be 
created due to
++                               * errors.
++                               */
++                              if (((*op)->common.aml_opcode == AML_REGION_OP)
++                                  || ((*op)->common.aml_opcode ==
++                                      AML_DATA_REGION_OP)) {
++                                      acpi_ns_delete_children((*op)->common.
++                                                              node);
++                                      acpi_ns_remove_node((*op)->common.node);
++                                      (*op)->common.node = NULL;
++                                      acpi_ps_delete_parse_tree(*op);
++                              }
++
+                               status2 =
+                                   acpi_ps_complete_this_op(walk_state, *op);
+                               if (ACPI_FAILURE(status2)) {
+@@ -574,6 +590,20 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ #endif
+               walk_state->prev_op = NULL;
+               walk_state->prev_arg_types = walk_state->arg_types;
++
++              if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) {
++                      /*
++                       * There was something that went wrong while executing 
code at the
++                       * module-level. We need to skip parsing whatever 
caused the
++                       * error and keep going. One runtime error during the 
table load
++                       * should not cause the entire table to not be loaded. 
This is
++                       * because there could be correct AML beyond the parts 
that caused
++                       * the runtime error.
++                       */
++                      ACPI_ERROR((AE_INFO,
++                                  "Ignore error and continue table load"));
++                      return_ACPI_STATUS(AE_OK);
++              }
+               return_ACPI_STATUS(status);
+       }
+ 
+diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
+index 12d4a0f6b8d2..5a64ddaed8a3 100644
+--- a/drivers/acpi/acpica/uterror.c
++++ b/drivers/acpi/acpica/uterror.c
+@@ -182,20 +182,20 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
+       switch (lookup_status) {
+       case AE_ALREADY_EXISTS:
+ 
+-              acpi_os_printf(ACPI_MSG_BIOS_ERROR);
++              acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               message = "Failure creating";
+               break;
+ 
+       case AE_NOT_FOUND:
+ 
+-              acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+-              message = "Failure looking up";
++              acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
++              message = "Could not resolve";
+               break;
+ 
+       default:
+ 
+-              acpi_os_printf(ACPI_MSG_ERROR);
+-              message = "Failure looking up";
++              acpi_os_printf("\n" ACPI_MSG_ERROR);
++              message = "Failure resolving";
+               break;
+       }
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 346b163f6e89..9bfd2f7e4542 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4557,9 +4557,6 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+       { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+       { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
+ 
+-      /* Sandisk devices which are known to not handle LPM well */
+-      { "SanDisk SD7UB3Q*G1001",      NULL,   ATA_HORKAGE_NOLPM, },
+-
+       /* devices that don't properly handle queued TRIM commands */
+       { "Micron_M500IT_*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
+index de4ddd0e8550..b3ed8f9953a8 100644
+--- a/drivers/ata/libata-zpodd.c
++++ b/drivers/ata/libata-zpodd.c
+@@ -35,7 +35,7 @@ struct zpodd {
+ static int eject_tray(struct ata_device *dev)
+ {
+       struct ata_taskfile tf;
+-      static const char cdb[] = {  GPCMD_START_STOP_UNIT,
++      static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_START_STOP_UNIT,
+               0, 0, 0,
+               0x02,     /* LoEj */
+               0, 0, 0, 0, 0, 0, 0,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index b610816eb887..d680fd030316 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1467,7 +1467,7 @@ class_dir_create_and_add(struct class *class, struct 
kobject *parent_kobj)
+ 
+       dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+       if (!dir)
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+ 
+       dir->class = class;
+       kobject_init(&dir->kobj, &class_dir_ktype);
+@@ -1477,7 +1477,7 @@ class_dir_create_and_add(struct class *class, struct 
kobject *parent_kobj)
+       retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+       if (retval < 0) {
+               kobject_put(&dir->kobj);
+-              return NULL;
++              return ERR_PTR(retval);
+       }
+       return &dir->kobj;
+ }
+@@ -1784,6 +1784,10 @@ int device_add(struct device *dev)
+ 
+       parent = get_device(dev->parent);
+       kobj = get_device_parent(dev, parent);
++      if (IS_ERR(kobj)) {
++              error = PTR_ERR(kobj);
++              goto parent_error;
++      }
+       if (kobj)
+               dev->kobj.parent = kobj;
+ 
+@@ -1882,6 +1886,7 @@ int device_add(struct device *dev)
+       kobject_del(&dev->kobj);
+  Error:
+       cleanup_glue_dir(dev, glue_dir);
++parent_error:
+       put_device(parent);
+ name_error:
+       kfree(dev->p);
+@@ -2701,6 +2706,11 @@ int device_move(struct device *dev, struct device 
*new_parent,
+       device_pm_lock();
+       new_parent = get_device(new_parent);
+       new_parent_kobj = get_device_parent(dev, new_parent);
++      if (IS_ERR(new_parent_kobj)) {
++              error = PTR_ERR(new_parent_kobj);
++              put_device(new_parent);
++              goto out;
++      }
+ 
+       pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+                __func__, new_parent ? dev_name(new_parent) : "<NULL>");
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index afbc202ca6fd..64278f472efe 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = {
+ static void nbd_dev_remove(struct nbd_device *nbd)
+ {
+       struct gendisk *disk = nbd->disk;
++      struct request_queue *q;
++
+       if (disk) {
++              q = disk->queue;
+               del_gendisk(disk);
+-              blk_cleanup_queue(disk->queue);
++              blk_cleanup_queue(q);
+               blk_mq_free_tag_set(&nbd->tag_set);
+               disk->private_data = NULL;
+               put_disk(disk);
+@@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd)
+ static void nbd_size_update(struct nbd_device *nbd)
+ {
+       struct nbd_config *config = nbd->config;
++      struct block_device *bdev = bdget_disk(nbd->disk, 0);
++
+       blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
+       blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
+       set_capacity(nbd->disk, config->bytesize >> 9);
++      if (bdev) {
++              if (bdev->bd_disk)
++                      bd_set_size(bdev, config->bytesize);
++              else
++                      bdev->bd_invalidated = 1;
++              bdput(bdev);
++      }
+       kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+ }
+ 
+@@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t 
blocksize,
+       struct nbd_config *config = nbd->config;
+       config->blksize = blocksize;
+       config->bytesize = blocksize * nr_blocks;
++      if (nbd->task_recv != NULL)
++              nbd_size_update(nbd);
+ }
+ 
+ static void nbd_complete_rq(struct request *req)
+@@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device 
*nbd, struct block_device *b
+       if (ret)
+               return ret;
+ 
+-      bd_set_size(bdev, config->bytesize);
+       if (max_part)
+               bdev->bd_invalidated = 1;
+       mutex_unlock(&nbd->config_lock);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 075d18f6ba7a..54d4c0f999ec 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -696,6 +696,8 @@ static ssize_t store_##file_name                           
        \
+       struct cpufreq_policy new_policy;                               \
+                                                                       \
+       memcpy(&new_policy, policy, sizeof(*policy));                   \
++      new_policy.min = policy->user_policy.min;                       \
++      new_policy.max = policy->user_policy.max;                       \
+                                                                       \
+       ret = sscanf(buf, "%u", &new_policy.object);                    \
+       if (ret != 1)                                                   \
+diff --git a/drivers/cpufreq/cpufreq_governor.c 
b/drivers/cpufreq/cpufreq_governor.c
+index ca38229b045a..43e14bb512c8 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+                        * calls, so the previous load value can be used then.
+                        */
+                       load = j_cdbs->prev_load;
+-              } else if (unlikely(time_elapsed > 2 * sampling_rate &&
++              } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+                                   j_cdbs->prev_load)) {
+                       /*
+                        * If the CPU had gone completely idle and a task has
+@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+                        * clear prev_load to guarantee that the load will be
+                        * computed again next time.
+                        *
+-                       * Detecting this situation is easy: the governor's
+-                       * utilization update handler would not have run during
+-                       * CPU-idle periods.  Hence, an unusually large
+-                       * 'time_elapsed' (as compared to the sampling rate)
++                       * Detecting this situation is easy: an unusually large
++                       * 'idle_time' (as compared to the sampling rate)
+                        * indicates this scenario.
+                        */
+                       load = j_cdbs->prev_load;
+@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+                       j_cdbs->prev_load = load;
+               }
+ 
+-              if (time_elapsed > 2 * sampling_rate) {
+-                      unsigned int periods = time_elapsed / sampling_rate;
++              if (unlikely((int)idle_time > 2 * sampling_rate)) {
++                      unsigned int periods = idle_time / sampling_rate;
+ 
+                       if (periods < idle_periods)
+                               idle_periods = periods;
+diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
+index 6ba709b6f095..896caba5dfe5 100644
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -226,7 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
+       opp_data->cpu_dev = get_cpu_device(0);
+       if (!opp_data->cpu_dev) {
+               pr_err("%s: Failed to get device for CPU0\n", __func__);
+-              ret = ENODEV;
++              ret = -ENODEV;
+               goto free_opp_data;
+       }
+ 
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c 
b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 582e449be9fe..a2c53ea3b5ed 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
+       kfree(ishtp_dev);
+ }
+ 
+-#ifdef CONFIG_PM
+-static struct device *ish_resume_device;
++static struct device __maybe_unused *ish_resume_device;
+ 
+ /* 50ms to get resume response */
+ #define WAIT_FOR_RESUME_ACK_MS                50
+@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
+  * in that case a simple resume message is enough, others we need
+  * a reset sequence.
+  */
+-static void ish_resume_handler(struct work_struct *work)
++static void __maybe_unused ish_resume_handler(struct work_struct *work)
+ {
+       struct pci_dev *pdev = to_pci_dev(ish_resume_device);
+       struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
+  *
+  * Return: 0 to the pm core
+  */
+-static int ish_suspend(struct device *device)
++static int __maybe_unused ish_suspend(struct device *device)
+ {
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
+       return 0;
+ }
+ 
+-static DECLARE_WORK(resume_work, ish_resume_handler);
++static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
+ /**
+  * ish_resume() - ISH resume callback
+  * @device:   device pointer
+@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
+  *
+  * Return: 0 to the pm core
+  */
+-static int ish_resume(struct device *device)
++static int __maybe_unused ish_resume(struct device *device)
+ {
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
+       return 0;
+ }
+ 
+-static const struct dev_pm_ops ish_pm_ops = {
+-      .suspend = ish_suspend,
+-      .resume = ish_resume,
+-};
+-#define ISHTP_ISH_PM_OPS      (&ish_pm_ops)
+-#else
+-#define ISHTP_ISH_PM_OPS      NULL
+-#endif /* CONFIG_PM */
++static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+ 
+ static struct pci_driver ish_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = ish_pci_tbl,
+       .probe = ish_probe,
+       .remove = ish_remove,
+-      .driver.pm = ISHTP_ISH_PM_OPS,
++      .driver.pm = &ish_pm_ops,
+ };
+ 
+ module_pci_driver(ish_driver);
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index ee7a37eb159a..545986cfb978 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
+               }
+       }
+ 
++      /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
++      if (hdev->vendor == USB_VENDOR_ID_WACOM &&
++          hdev->product == 0x0358 &&
++          WACOM_PEN_FIELD(field) &&
++          wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
++              field->logical_maximum = 43200;
++      }
++
+       switch (usage->hid) {
+       case HID_GD_X:
+               features->x_max = field->logical_maximum;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 8fb8c737fffe..b0b30a568db7 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -4379,7 +4379,7 @@ static void ir_compose_msi_msg(struct irq_data 
*irq_data, struct msi_msg *msg)
+ 
+ static struct irq_chip amd_ir_chip = {
+       .name                   = "AMD-IR",
+-      .irq_ack                = ir_ack_apic_edge,
++      .irq_ack                = apic_ack_irq,
+       .irq_set_affinity       = amd_ir_set_affinity,
+       .irq_set_vcpu_affinity  = amd_ir_set_vcpu_affinity,
+       .irq_compose_msi_msg    = ir_compose_msi_msg,
+diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
+index 3062a154a9fb..967450bd421a 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data 
*data, void *info)
+ 
+ static struct irq_chip intel_ir_chip = {
+       .name                   = "INTEL-IR",
+-      .irq_ack                = ir_ack_apic_edge,
++      .irq_ack                = apic_ack_irq,
+       .irq_set_affinity       = intel_ir_set_affinity,
+       .irq_compose_msi_msg    = intel_ir_compose_msi_msg,
+       .irq_set_vcpu_affinity  = intel_ir_set_vcpu_affinity,
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 496deee3ae3a..7d0f3074d41d 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
+               panic(msg);
+ }
+ 
+-void ir_ack_apic_edge(struct irq_data *data)
+-{
+-      ack_APIC_irq();
+-}
+-
+ /**
+  * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the 
IOMMU
+  *                                 device serving request @info
+diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
+index 039c7af7b190..0afef6e43be4 100644
+--- a/drivers/iommu/irq_remapping.h
++++ b/drivers/iommu/irq_remapping.h
+@@ -65,8 +65,6 @@ struct irq_remap_ops {
+ extern struct irq_remap_ops intel_irq_remap_ops;
+ extern struct irq_remap_ops amd_iommu_irq_ops;
+ 
+-extern void ir_ack_apic_edge(struct irq_data *data);
+-
+ #else  /* CONFIG_IRQ_REMAP */
+ 
+ #define irq_remapping_enabled 0
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index b67be33bd62f..cea7b2d2e60a 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1860,6 +1860,8 @@ int rc_register_device(struct rc_dev *dev)
+                dev->device_name ?: "Unspecified device", path ?: "N/A");
+       kfree(path);
+ 
++      dev->registered = true;
++
+       if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+               rc = rc_setup_rx_device(dev);
+               if (rc)
+@@ -1879,8 +1881,6 @@ int rc_register_device(struct rc_dev *dev)
+                       goto out_lirc;
+       }
+ 
+-      dev->registered = true;
+-
+       dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
+               dev->driver_name ? dev->driver_name : "unknown");
+ 
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c 
b/drivers/media/usb/uvc/uvc_ctrl.c
+index 102594ec3e97..a36b4fb949fa 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1607,14 +1607,12 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
+       ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
+                            info->selector, data, 1);
+       if (!ret)
+-              info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
+-                          | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF
+-                          | (data[0] & UVC_CONTROL_CAP_GET ?
+-                             UVC_CTRL_FLAG_GET_CUR : 0)
+-                          | (data[0] & UVC_CONTROL_CAP_SET ?
+-                             UVC_CTRL_FLAG_SET_CUR : 0)
+-                          | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
+-                             UVC_CTRL_FLAG_AUTO_UPDATE : 0);
++              info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
++                              UVC_CTRL_FLAG_GET_CUR : 0)
++                          |  (data[0] & UVC_CONTROL_CAP_SET ?
++                              UVC_CTRL_FLAG_SET_CUR : 0)
++                          |  (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
++                              UVC_CTRL_FLAG_AUTO_UPDATE : 0);
+ 
+       kfree(data);
+       return ret;
+@@ -1689,6 +1687,9 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
+ 
+       info->size = le16_to_cpup((__le16 *)data);
+ 
++      info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
++                  | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF;
++
+       ret = uvc_ctrl_get_flags(dev, ctrl, info);
+       if (ret < 0) {
+               uvc_trace(UVC_TRACE_CONTROL,
+diff --git a/drivers/net/bonding/bond_options.c 
b/drivers/net/bonding/bond_options.c
+index 58c705f24f96..b594bae1adbd 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
+                                  slave->dev->name);
+                       rcu_assign_pointer(bond->primary_slave, slave);
+                       strcpy(bond->params.primary, slave->dev->name);
++                      bond->force_primary = true;
+                       bond_select_active_slave(bond);
+                       goto out;
+               }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c 
b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+index a50e08bb4748..750007513f9d 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+@@ -267,14 +267,13 @@ static int aq_pci_probe(struct pci_dev *pdev,
+       numvecs = min(numvecs, num_online_cpus());
+       /*enable interrupts */
+ #if !AQ_CFG_FORCE_LEGACY_INT
+-      numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
+-                                      PCI_IRQ_MSIX | PCI_IRQ_MSI |
+-                                      PCI_IRQ_LEGACY);
++      err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
++                                  PCI_IRQ_MSIX | PCI_IRQ_MSI |
++                                  PCI_IRQ_LEGACY);
+ 
+-      if (numvecs < 0) {
+-              err = numvecs;
++      if (err < 0)
+               goto err_hwinit;
+-      }
++      numvecs = err;
+ #endif
+       self->irqvecs = numvecs;
+ 
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index da07ccdf84bf..eb8dccd24abf 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -126,8 +126,10 @@ static int netvsc_open(struct net_device *net)
+       }
+ 
+       rdev = nvdev->extension;
+-      if (!rdev->link_state)
++      if (!rdev->link_state) {
+               netif_carrier_on(net);
++              netif_tx_wake_all_queues(net);
++      }
+ 
+       if (vf_netdev) {
+               /* Setting synthetic device up transparently sets
+diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
+index cd09c3af2117..6e8e42361fd5 100644
+--- a/drivers/net/phy/dp83848.c
++++ b/drivers/net/phy/dp83848.c
+@@ -74,6 +74,25 @@ static int dp83848_config_intr(struct phy_device *phydev)
+       return phy_write(phydev, DP83848_MICR, control);
+ }
+ 
++static int dp83848_config_init(struct phy_device *phydev)
++{
++      int err;
++      int val;
++
++      err = genphy_config_init(phydev);
++      if (err < 0)
++              return err;
++
++      /* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
++       * we check initial value of BMCR Auto negotiation enable bit
++       */
++      val = phy_read(phydev, MII_BMCR);
++      if (!(val & BMCR_ANENABLE))
++              phydev->autoneg = AUTONEG_DISABLE;
++
++      return 0;
++}
++
+ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+       { TI_DP83848C_PHY_ID, 0xfffffff0 },
+       { NS_DP83848C_PHY_ID, 0xfffffff0 },
+@@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = 
{
+ };
+ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ 
+-#define DP83848_PHY_DRIVER(_id, _name)                                \
++#define DP83848_PHY_DRIVER(_id, _name, _config_init)          \
+       {                                                       \
+               .phy_id         = _id,                          \
+               .phy_id_mask    = 0xfffffff0,                   \
+@@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+               .flags          = PHY_HAS_INTERRUPT,            \
+                                                               \
+               .soft_reset     = genphy_soft_reset,            \
+-              .config_init    = genphy_config_init,           \
++              .config_init    = _config_init,                 \
+               .suspend        = genphy_suspend,               \
+               .resume         = genphy_resume,                \
+                                                               \
+@@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+       }
+ 
+ static struct phy_driver dp83848_driver[] = {
+-      DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
+-      DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+-      DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
+-      DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
++      DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
++                         genphy_config_init),
++      DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
++                         genphy_config_init),
++      DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
++                         dp83848_config_init),
++      DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
++                         genphy_config_init),
+ };
+ module_phy_driver(dp83848_driver);
+ 
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 9b6cb780affe..f0f7cd977667 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
+       int total;
+ 
+       if (q->flags & IFF_VNET_HDR) {
++              int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+               struct virtio_net_hdr vnet_hdr;
++
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+               if (iov_iter_count(iter) < vnet_hdr_len)
+                       return -EINVAL;
+ 
+               if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+-                                          tap_is_little_endian(q), true))
++                                          tap_is_little_endian(q), true,
++                                          vlan_hlen))
+                       BUG();
+ 
+               if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 23e9eb66197f..409eb8b74740 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2078,7 +2078,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+                       return -EINVAL;
+ 
+               if (virtio_net_hdr_from_skb(skb, &gso,
+-                                          tun_is_little_endian(tun), true)) {
++                                          tun_is_little_endian(tun), true,
++                                          vlan_hlen)) {
+                       struct skb_shared_info *sinfo = skb_shinfo(skb);
+                       pr_err("unexpected GSO type: "
+                              "0x%x, gso_size %d, hdr_len %d\n",
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 90d07ed224d5..b0e8b9613054 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff 
*skb, __le32 sign)
+        * accordingly. Otherwise, we should check here.
+        */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-              delayed_ndp_size = ctx->max_ndp_size;
++              delayed_ndp_size = ALIGN(ctx->max_ndp_size, 
ctx->tx_ndp_modulus);
+       else
+               delayed_ndp_size = 0;
+ 
+@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff 
*skb, __le32 sign)
+       /* If requested, put NDP at end of frame. */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+               nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+-              cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, 
ctx->tx_curr_size);
++              cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, 
ctx->tx_curr_size - ctx->max_ndp_size);
+               nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+               skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 032e1ac10a30..8c7207535179 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1358,7 +1358,8 @@ static int xmit_skb(struct send_queue *sq, struct 
sk_buff *skb)
+               hdr = skb_vnet_hdr(skb);
+ 
+       if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+-                                  virtio_is_little_endian(vi->vdev), false))
++                                  virtio_is_little_endian(vi->vdev), false,
++                                  0))
+               BUG();
+ 
+       if (vi->mergeable_rx_bufs)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c 
b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+index 1fec8e3a6b35..6afcfd1f0eec 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+@@ -8,6 +8,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -30,6 +31,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime 
*fwrt,
+ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+                              const struct fw_img *image)
+ {
+-      int sec_idx, idx;
++      int sec_idx, idx, ret;
+       u32 offset = 0;
+ 
+       /*
+@@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime 
*fwrt,
+        */
+       if (sec_idx >= image->num_sec - 1) {
+               IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+-              iwl_free_fw_paging(fwrt);
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto err;
+       }
+ 
+       /* copy the CSS block to the dram */
+       IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
+                    sec_idx);
+ 
++      if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
++              IWL_ERR(fwrt, "CSS block is larger than paging size\n");
++              ret = -EINVAL;
++              goto err;
++      }
++
+       memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
+              image->sec[sec_idx].data,
+-             fwrt->fw_paging_db[0].fw_paging_size);
++             image->sec[sec_idx].len);
+       dma_sync_single_for_device(fwrt->trans->dev,
+                                  fwrt->fw_paging_db[0].fw_paging_phys,
+                                  fwrt->fw_paging_db[0].fw_paging_size,
+@@ -221,6 +229,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime 
*fwrt,
+       for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
+               struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ 
++              if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
++                      IWL_ERR(fwrt,
++                              "Paging: paging size is larger than remaining 
data in block %d\n",
++                              idx);
++                      ret = -EINVAL;
++                      goto err;
++              }
++
+               memcpy(page_address(block->fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      block->fw_paging_size);
+@@ -231,19 +247,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime 
*fwrt,
+ 
+               IWL_DEBUG_FW(fwrt,
+                            "Paging: copied %d paging bytes to block %d\n",
+-                           fwrt->fw_paging_db[idx].fw_paging_size,
+-                           idx);
++                           block->fw_paging_size, idx);
+ 
+-              offset += fwrt->fw_paging_db[idx].fw_paging_size;
++              offset += block->fw_paging_size;
++
++              if (offset > image->sec[sec_idx].len) {
++                      IWL_ERR(fwrt,
++                              "Paging: offset goes over section size\n");
++                      ret = -EINVAL;
++                      goto err;
++              }
+       }
+ 
+       /* copy the last paging block */
+       if (fwrt->num_of_pages_in_last_blk > 0) {
+               struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ 
++              if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
++                      IWL_ERR(fwrt,
++                              "Paging: last block is larger than paging 
size\n");
++                      ret = -EINVAL;
++                      goto err;
++              }
++
+               memcpy(page_address(block->fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+-                     FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
++                     image->sec[sec_idx].len - offset);
+               dma_sync_single_for_device(fwrt->trans->dev,
+                                          block->fw_paging_phys,
+                                          block->fw_paging_size,
+@@ -255,6 +284,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime 
*fwrt,
+       }
+ 
+       return 0;
++
++err:
++      iwl_free_fw_paging(fwrt);
++      return ret;
+ }
+ 
+ static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 17a0190bd88f..5dbb0f0c02ef 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2679,8 +2679,15 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev 
*pdev)
+ 
+       dev_info(dev->ctrl.device, "restart after slot reset\n");
+       pci_restore_state(pdev);
+-      nvme_reset_ctrl(&dev->ctrl);
+-      return PCI_ERS_RESULT_RECOVERED;
++      nvme_reset_ctrl_sync(&dev->ctrl);
++
++      switch (dev->ctrl.state) {
++      case NVME_CTRL_LIVE:
++      case NVME_CTRL_ADMIN_ONLY:
++              return PCI_ERS_RESULT_RECOVERED;
++      default:
++              return PCI_ERS_RESULT_DISCONNECT;
++      }
+ }
+ 
+ static void nvme_error_resume(struct pci_dev *pdev)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index f0be5f35ab28..9beefa6ed1ce 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2345,6 +2345,9 @@ struct vhost_msg_node *vhost_new_msg(struct 
vhost_virtqueue *vq, int type)
+       struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+       if (!node)
+               return NULL;
++
++      /* Make sure all padding within the structure is initialized. */
++      memset(&node->msg, 0, sizeof node->msg);
+       node->vq = vq;
+       node->msg.type = type;
+       return node;
+diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
+index 74f2e6e6202a..8851d441e5fd 100644
+--- a/drivers/w1/masters/mxc_w1.c
++++ b/drivers/w1/masters/mxc_w1.c
+@@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
+       if (IS_ERR(mdev->clk))
+               return PTR_ERR(mdev->clk);
+ 
++      err = clk_prepare_enable(mdev->clk);
++      if (err)
++              return err;
++
+       clkrate = clk_get_rate(mdev->clk);
+       if (clkrate < 10000000)
+               dev_warn(&pdev->dev,
+@@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
+ 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mdev->regs = devm_ioremap_resource(&pdev->dev, res);
+-      if (IS_ERR(mdev->regs))
+-              return PTR_ERR(mdev->regs);
+-
+-      err = clk_prepare_enable(mdev->clk);
+-      if (err)
+-              return err;
++      if (IS_ERR(mdev->regs)) {
++              err = PTR_ERR(mdev->regs);
++              goto out_disable_clk;
++      }
+ 
+       /* Software reset 1-Wire module */
+       writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
+@@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
+ 
+       err = w1_add_master_device(&mdev->bus_master);
+       if (err)
+-              clk_disable_unprepare(mdev->clk);
++              goto out_disable_clk;
+ 
++      return 0;
++
++out_disable_clk:
++      clk_disable_unprepare(mdev->clk);
+       return err;
+ }
+ 
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index a41b48f82a70..4de191563261 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, 
size_t count)
+               s = strchr(p, del);
+               if (!s)
+                       goto einval;
+-              *s++ = '\0';
+-              e->offset = simple_strtoul(p, &p, 10);
++              *s = '\0';
++              if (p != s) {
++                      int r = kstrtoint(p, 10, &e->offset);
++                      if (r != 0 || e->offset < 0)
++                              goto einval;
++              }
++              p = s;
+               if (*p++)
+                       goto einval;
+               pr_debug("register: offset: %#x\n", e->offset);
+@@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, 
size_t count)
+               if (e->mask &&
+                   string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
+                       goto einval;
+-              if (e->size + e->offset > BINPRM_BUF_SIZE)
++              if (e->size > BINPRM_BUF_SIZE ||
++                  BINPRM_BUF_SIZE - e->size < e->offset)
+                       goto einval;
+               pr_debug("register: magic/mask length: %i\n", e->size);
+               if (USE_DEBUG) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0b86cf10cf2a..775a0f2d0b45 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1018,8 +1018,10 @@ static noinline int cow_file_range(struct inode *inode,
+                                 ram_size, /* ram_bytes */
+                                 BTRFS_COMPRESS_NONE, /* compress_type */
+                                 BTRFS_ORDERED_REGULAR /* type */);
+-              if (IS_ERR(em))
++              if (IS_ERR(em)) {
++                      ret = PTR_ERR(em);
+                       goto out_reserve;
++              }
+               free_extent_map(em);
+ 
+               ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 632e26d6f7ce..28fed3e8960b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2654,8 +2654,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, 
void __user *arg)
+       }
+ 
+       /* Check for compatibility reject unknown flags */
+-      if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
+-              return -EOPNOTSUPP;
++      if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
++              ret = -EOPNOTSUPP;
++              goto out;
++      }
+ 
+       if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+               ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+@@ -3826,11 +3828,6 @@ static noinline int btrfs_clone_files(struct file 
*file, struct file *file_src,
+           src->i_sb != inode->i_sb)
+               return -EXDEV;
+ 
+-      /* don't make the dst file partly checksummed */
+-      if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+-          (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+-              return -EINVAL;
+-
+       if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
+               return -EISDIR;
+ 
+@@ -3840,6 +3837,13 @@ static noinline int btrfs_clone_files(struct file 
*file, struct file *file_src,
+               inode_lock(src);
+       }
+ 
++      /* don't make the dst file partly checksummed */
++      if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
++          (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
++              ret = -EINVAL;
++              goto out_unlock;
++      }
++
+       /* determine range to clone */
+       ret = -EINVAL;
+       if (off + len > src->i_size || off + len < off)
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 52b39a0924e9..ad8a69ba7f13 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2799,7 +2799,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct 
map_lookup *map,
+                       have_csum = scrub_find_csum(sctx, logical, csum);
+                       if (have_csum == 0)
+                               ++sctx->stat.no_csum;
+-                      if (sctx->is_dev_replace && !have_csum) {
++                      if (0 && sctx->is_dev_replace && !have_csum) {
+                               ret = copy_nocow_pages(sctx, logical, l,
+                                                      mirror_num,
+                                                     physical_for_dev_replace);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 0628092b0b1b..f82152a0cb38 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -323,6 +323,7 @@ enum {
+       Opt_ssd, Opt_nossd,
+       Opt_ssd_spread, Opt_nossd_spread,
+       Opt_subvol,
++      Opt_subvol_empty,
+       Opt_subvolid,
+       Opt_thread_pool,
+       Opt_treelog, Opt_notreelog,
+@@ -388,6 +389,7 @@ static const match_table_t tokens = {
+       {Opt_ssd_spread, "ssd_spread"},
+       {Opt_nossd_spread, "nossd_spread"},
+       {Opt_subvol, "subvol=%s"},
++      {Opt_subvol_empty, "subvol="},
+       {Opt_subvolid, "subvolid=%s"},
+       {Opt_thread_pool, "thread_pool=%u"},
+       {Opt_treelog, "treelog"},
+@@ -461,6 +463,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char 
*options,
+                       btrfs_set_opt(info->mount_opt, DEGRADED);
+                       break;
+               case Opt_subvol:
++              case Opt_subvol_empty:
+               case Opt_subvolid:
+               case Opt_subvolrootid:
+               case Opt_device:
+diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
+index 4f3884835267..dd95a6fa24bf 100644
+--- a/fs/cifs/cifsacl.h
++++ b/fs/cifs/cifsacl.h
+@@ -98,4 +98,18 @@ struct cifs_ace {
+       struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
+ } __attribute__((packed));
+ 
++/*
++ * Minimum security identifier can be one for system defined Users
++ * and Groups such as NULL SID and World or Built-in accounts such
++ * as Administrator and Guest and consists of
++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
++ */
++#define MIN_SID_LEN  (1 + 1 + 6 + 4) /* in bytes */
++
++/*
++ * Minimum security descriptor can be one without any SACL and DACL and can
++ * consist of revision, type, and two sids of minimum size for owner and group
++ */
++#define MIN_SEC_DESC_LEN  (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
++
+ #endif /* _CIFSACL_H */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 9c6d95ffca97..4ee32488ff74 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1277,10 +1277,11 @@ smb2_is_session_expired(char *buf)
+ {
+       struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
+ 
+-      if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
++      if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
++          shdr->Status != STATUS_USER_SESSION_DELETED)
+               return false;
+ 
+-      cifs_dbg(FYI, "Session expired\n");
++      cifs_dbg(FYI, "Session expired or deleted\n");
+       return true;
+ }
+ 
+@@ -1593,8 +1594,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+               oparms.create_options = 0;
+ 
+       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-      if (!utf16_path)
+-              return ERR_PTR(-ENOMEM);
++      if (!utf16_path) {
++              rc = -ENOMEM;
++              free_xid(xid);
++              return ERR_PTR(rc);
++      }
+ 
+       oparms.tcon = tcon;
+       oparms.desired_access = READ_CONTROL;
+@@ -1652,8 +1656,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+               access_flags = WRITE_DAC;
+ 
+       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-      if (!utf16_path)
+-              return -ENOMEM;
++      if (!utf16_path) {
++              rc = -ENOMEM;
++              free_xid(xid);
++              return rc;
++      }
+ 
+       oparms.tcon = tcon;
+       oparms.desired_access = access_flags;
+@@ -1713,15 +1720,21 @@ static long smb3_zero_range(struct file *file, struct 
cifs_tcon *tcon,
+ 
+       /* if file not oplocked can't be sure whether asking to extend size */
+       if (!CIFS_CACHE_READ(cifsi))
+-              if (keep_size == false)
+-                      return -EOPNOTSUPP;
++              if (keep_size == false) {
++                      rc = -EOPNOTSUPP;
++                      free_xid(xid);
++                      return rc;
++              }
+ 
+       /*
+        * Must check if file sparse since fallocate -z (zero range) assumes
+        * non-sparse allocation
+        */
+-      if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
+-              return -EOPNOTSUPP;
++      if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
++              rc = -EOPNOTSUPP;
++              free_xid(xid);
++              return rc;
++      }
+ 
+       /*
+        * need to make sure we are not asked to extend the file since the SMB3
+@@ -1730,8 +1743,11 @@ static long smb3_zero_range(struct file *file, struct 
cifs_tcon *tcon,
+        * which for a non sparse file would zero the newly extended range
+        */
+       if (keep_size == false)
+-              if (i_size_read(inode) < offset + len)
+-                      return -EOPNOTSUPP;
++              if (i_size_read(inode) < offset + len) {
++                      rc = -EOPNOTSUPP;
++                      free_xid(xid);
++                      return rc;
++              }
+ 
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+ 
+@@ -1764,8 +1780,11 @@ static long smb3_punch_hole(struct file *file, struct 
cifs_tcon *tcon,
+ 
+       /* Need to make file sparse, if not already, before freeing range. */
+       /* Consider adding equivalent for compressed since it could also work */
+-      if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
+-              return -EOPNOTSUPP;
++      if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
++              rc = -EOPNOTSUPP;
++              free_xid(xid);
++              return rc;
++      }
+ 
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+ 
+@@ -1796,8 +1815,10 @@ static long smb3_simple_falloc(struct file *file, 
struct cifs_tcon *tcon,
+ 
+       /* if file not oplocked can't be sure whether asking to extend size */
+       if (!CIFS_CACHE_READ(cifsi))
+-              if (keep_size == false)
+-                      return -EOPNOTSUPP;
++              if (keep_size == false) {
++                      free_xid(xid);
++                      return rc;
++              }
+ 
+       /*
+        * Files are non-sparse by default so falloc may be a no-op
+@@ -1806,14 +1827,16 @@ static long smb3_simple_falloc(struct file *file, 
struct cifs_tcon *tcon,
+        */
+       if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
+               if (keep_size == true)
+-                      return 0;
++                      rc = 0;
+               /* check if extending file */
+               else if (i_size_read(inode) >= off + len)
+                       /* not extending file and already not sparse */
+-                      return 0;
++                      rc = 0;
+               /* BB: in future add else clause to extend file */
+               else
+-                      return -EOPNOTSUPP;
++                      rc = -EOPNOTSUPP;
++              free_xid(xid);
++              return rc;
+       }
+ 
+       if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
+@@ -1825,8 +1848,11 @@ static long smb3_simple_falloc(struct file *file, 
struct cifs_tcon *tcon,
+                * ie potentially making a few extra pages at the beginning
+                * or end of the file non-sparse via set_sparse is harmless.
+                */
+-              if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
+-                      return -EOPNOTSUPP;
++              if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
++                      rc = -EOPNOTSUPP;
++                      free_xid(xid);
++                      return rc;
++              }
+ 
+               rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
+       }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 0f48741a0130..32d7fd830aae 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1276,6 +1276,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses 
*ses,
+       sess_data->ses = ses;
+       sess_data->buf0_type = CIFS_NO_BUFFER;
+       sess_data->nls_cp = (struct nls_table *) nls_cp;
++      sess_data->previous_session = ses->Suid;
+ 
+ #ifdef CONFIG_CIFS_SMB311
+       /*
+@@ -2377,8 +2378,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon 
*tcon,
+ 
+       return query_info(xid, tcon, persistent_fid, volatile_fid,
+                         0, SMB2_O_INFO_SECURITY, additional_info,
+-                        SMB2_MAX_BUFFER_SIZE,
+-                        sizeof(struct smb2_file_all_info), data, plen);
++                        SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
+ }
+ 
+ int
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index c32802c956d5..bf7fa1507e81 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode 
*inode,
+               unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
+               int i;
+ 
+-              /* Count number blocks in a subtree under 'partial' */
+-              count = 1;
+-              for (i = 0; partial + i != chain + depth - 1; i++)
+-                      count *= epb;
++              /*
++               * Count number blocks in a subtree under 'partial'. At each
++               * level we count number of complete empty subtrees beyond
++               * current offset and then descend into the subtree only
++               * partially beyond current offset.
++               */
++              count = 0;
++              for (i = partial - chain + 1; i < depth; i++)
++                      count = count * epb + (epb - offsets[i] - 1);
++              count++;
+               /* Fill in size of a hole we found */
+               map->m_pblk = 0;
+               map->m_len = min_t(unsigned int, map->m_len, count);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 70cf4c7b268a..44b4fcdc3755 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -144,6 +144,12 @@ int ext4_find_inline_data_nolock(struct inode *inode)
+               goto out;
+ 
+       if (!is.s.not_found) {
++              if (is.s.here->e_value_inum) {
++                      EXT4_ERROR_INODE(inode, "inline data xattr refers "
++                                       "to an external xattr inode");
++                      error = -EFSCORRUPTED;
++                      goto out;
++              }
+               EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+                                       (void *)ext4_raw_inode(&is.iloc));
+               EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 1e50c5efae67..c73cb9346aee 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4298,28 +4298,28 @@ int ext4_punch_hole(struct inode *inode, loff_t 
offset, loff_t length)
+               EXT4_BLOCK_SIZE_BITS(sb);
+       stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+ 
+-      /* If there are no blocks to remove, return now */
+-      if (first_block >= stop_block)
+-              goto out_stop;
++      /* If there are blocks to remove, do it */
++      if (stop_block > first_block) {
+ 
+-      down_write(&EXT4_I(inode)->i_data_sem);
+-      ext4_discard_preallocations(inode);
++              down_write(&EXT4_I(inode)->i_data_sem);
++              ext4_discard_preallocations(inode);
+ 
+-      ret = ext4_es_remove_extent(inode, first_block,
+-                                  stop_block - first_block);
+-      if (ret) {
+-              up_write(&EXT4_I(inode)->i_data_sem);
+-              goto out_stop;
+-      }
++              ret = ext4_es_remove_extent(inode, first_block,
++                                          stop_block - first_block);
++              if (ret) {
++                      up_write(&EXT4_I(inode)->i_data_sem);
++                      goto out_stop;
++              }
+ 
+-      if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+-              ret = ext4_ext_remove_space(inode, first_block,
+-                                          stop_block - 1);
+-      else
+-              ret = ext4_ind_remove_space(handle, inode, first_block,
+-                                          stop_block);
++              if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++                      ret = ext4_ext_remove_space(inode, first_block,
++                                                  stop_block - 1);
++              else
++                      ret = ext4_ind_remove_space(handle, inode, first_block,
++                                                  stop_block);
+ 
+-      up_write(&EXT4_I(inode)->i_data_sem);
++              up_write(&EXT4_I(inode)->i_data_sem);
++      }
+       if (IS_SYNC(inode))
+               ext4_handle_sync(handle);
+ 
+@@ -4701,19 +4701,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode 
*raw_inode,
+       }
+ }
+ 
+-static inline void ext4_iget_extra_inode(struct inode *inode,
++static inline int ext4_iget_extra_inode(struct inode *inode,
+                                        struct ext4_inode *raw_inode,
+                                        struct ext4_inode_info *ei)
+ {
+       __le32 *magic = (void *)raw_inode +
+                       EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
++
+       if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
+           EXT4_INODE_SIZE(inode->i_sb) &&
+           *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+-              ext4_find_inline_data_nolock(inode);
++              return ext4_find_inline_data_nolock(inode);
+       } else
+               EXT4_I(inode)->i_inline_off = 0;
++      return 0;
+ }
+ 
+ int ext4_get_projid(struct inode *inode, kprojid_t *projid)
+@@ -4893,7 +4895,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned 
long ino)
+                       ei->i_extra_isize = sizeof(struct ext4_inode) -
+                                           EXT4_GOOD_OLD_INODE_SIZE;
+               } else {
+-                      ext4_iget_extra_inode(inode, raw_inode, ei);
++                      ret = ext4_iget_extra_inode(inode, raw_inode, ei);
++                      if (ret)
++                              goto bad_inode;
+               }
+       }
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index b6bec270a8e4..d792b7689d92 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1933,7 +1933,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count)
+               return 0;
+ 
+       n_group = ext4_get_group_number(sb, n_blocks_count - 1);
+-      if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
++      if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+               ext4_warning(sb, "resize would cause inodes_count overflow");
+               return -EINVAL;
+       }
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 499cb4b1fbd2..fc4ced59c565 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1688,7 +1688,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info 
*i,
+ 
+       /* No failures allowed past this point. */
+ 
+-      if (!s->not_found && here->e_value_offs) {
++      if (!s->not_found && here->e_value_size && here->e_value_offs) {
+               /* Remove the old value. */
+               void *first_val = s->base + min_offs;
+               size_t offs = le16_to_cpu(here->e_value_offs);
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index 79c61da8b1bc..c65a51d87cac 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct 
kstat *stat,
+               else
+                       stat->result_mask = STATX_BASIC_STATS &
+                           ~STATX_SIZE;
++
++              stat->attributes_mask = STATX_ATTR_IMMUTABLE |
++                  STATX_ATTR_APPEND;
++              if (inode->i_flags & S_IMMUTABLE)
++                      stat->attributes |= STATX_ATTR_IMMUTABLE;
++              if (inode->i_flags & S_APPEND)
++                      stat->attributes |= STATX_ATTR_APPEND;
+       }
+       return ret;
+ }
+diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
+index 1b5707c44c3f..e026bee02a66 100644
+--- a/fs/orangefs/namei.c
++++ b/fs/orangefs/namei.c
+@@ -326,6 +326,13 @@ static int orangefs_symlink(struct inode *dir,
+               ret = PTR_ERR(inode);
+               goto out;
+       }
++      /*
++       * This is necessary because orangefs_inode_getattr will not
++       * re-read symlink size as it is impossible for it to change.
++       * Invalidating the cache does not help.  orangefs_new_inode
++       * does not set the correct size (it does not know symname).
++       */
++      inode->i_size = strlen(symname);
+ 
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned symlink inode new number of %pU\n",
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 65916a305f3d..4e66378f290b 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -551,7 +551,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
+ #endif
+ 
+ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+-void irq_move_irq(struct irq_data *data);
++void __irq_move_irq(struct irq_data *data);
++static inline void irq_move_irq(struct irq_data *data)
++{
++      if (unlikely(irqd_is_setaffinity_pending(data)))
++              __irq_move_irq(data);
++}
+ void irq_move_masked_irq(struct irq_data *data);
+ void irq_force_complete_move(struct irq_desc *desc);
+ #else
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index f144216febc6..9397628a1967 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+                                         struct virtio_net_hdr *hdr,
+                                         bool little_endian,
+-                                        bool has_data_valid)
++                                        bool has_data_valid,
++                                        int vlan_hlen)
+ {
+       memset(hdr, 0, sizeof(*hdr));   /* no info leak */
+ 
+@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct 
sk_buff *skb,
+ 
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+-              if (skb_vlan_tag_present(skb))
+-                      hdr->csum_start = __cpu_to_virtio16(little_endian,
+-                              skb_checksum_start_offset(skb) + VLAN_HLEN);
+-              else
+-                      hdr->csum_start = __cpu_to_virtio16(little_endian,
+-                              skb_checksum_start_offset(skb));
++              hdr->csum_start = __cpu_to_virtio16(little_endian,
++                      skb_checksum_start_offset(skb) + vlan_hlen);
+               hdr->csum_offset = __cpu_to_virtio16(little_endian,
+                               skb->csum_offset);
+       } else if (has_data_valid &&
+diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
+index c4f5caaf3778..f6a3543e5247 100644
+--- a/include/net/transp_v6.h
++++ b/include/net/transp_v6.h
+@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, 
struct msghdr *msg,
+                         struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+                         struct sockcm_cookie *sockc);
+ 
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-                           __u16 srcp, __u16 destp, int bucket);
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++                             __u16 srcp, __u16 destp, int rqueue, int bucket);
++static inline void
++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
++                      __u16 destp, int bucket)
++{
++      __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
++                                bucket);
++}
+ 
+ #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+ 
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 0676b272f6ac..1db85dcb06f6 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, 
struct sk_buff *skb,
+       return htons((((u64) hash * (max - min)) >> 32) + min);
+ }
+ 
++static inline int udp_rqueue_get(struct sock *sk)
++{
++      return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
++}
++
+ /* net/ipv4/udp.c */
+ void udp_destruct_sock(struct sock *sk);
+ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index e3336d904f64..facfecfc543c 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -204,6 +204,39 @@ int irq_do_set_affinity(struct irq_data *data, const 
struct cpumask *mask,
+       return ret;
+ }
+ 
++#ifdef CONFIG_GENERIC_PENDING_IRQ
++static inline int irq_set_affinity_pending(struct irq_data *data,
++                                         const struct cpumask *dest)
++{
++      struct irq_desc *desc = irq_data_to_desc(data);
++
++      irqd_set_move_pending(data);
++      irq_copy_pending(desc, dest);
++      return 0;
++}
++#else
++static inline int irq_set_affinity_pending(struct irq_data *data,
++                                         const struct cpumask *dest)
++{
++      return -EBUSY;
++}
++#endif
++
++static int irq_try_set_affinity(struct irq_data *data,
++                              const struct cpumask *dest, bool force)
++{
++      int ret = irq_do_set_affinity(data, dest, force);
++
++      /*
++       * In case that the underlying vector management is busy and the
++       * architecture supports the generic pending mechanism then utilize
++       * this to avoid returning an error to user space.
++       */
++      if (ret == -EBUSY && !force)
++              ret = irq_set_affinity_pending(data, dest);
++      return ret;
++}
++
+ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+                           bool force)
+ {
+@@ -214,8 +247,8 @@ int irq_set_affinity_locked(struct irq_data *data, const 
struct cpumask *mask,
+       if (!chip || !chip->irq_set_affinity)
+               return -EINVAL;
+ 
+-      if (irq_can_move_pcntxt(data)) {
+-              ret = irq_do_set_affinity(data, mask, force);
++      if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
++              ret = irq_try_set_affinity(data, mask, force);
+       } else {
+               irqd_set_move_pending(data);
+               irq_copy_pending(desc, mask);
+diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
+index 86ae0eb80b53..def48589ea48 100644
+--- a/kernel/irq/migration.c
++++ b/kernel/irq/migration.c
+@@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool 
force_clear)
+ void irq_move_masked_irq(struct irq_data *idata)
+ {
+       struct irq_desc *desc = irq_data_to_desc(idata);
+-      struct irq_chip *chip = desc->irq_data.chip;
++      struct irq_data *data = &desc->irq_data;
++      struct irq_chip *chip = data->chip;
+ 
+-      if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
++      if (likely(!irqd_is_setaffinity_pending(data)))
+               return;
+ 
+-      irqd_clr_move_pending(&desc->irq_data);
++      irqd_clr_move_pending(data);
+ 
+       /*
+        * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
+        */
+-      if (irqd_is_per_cpu(&desc->irq_data)) {
++      if (irqd_is_per_cpu(data)) {
+               WARN_ON(1);
+               return;
+       }
+@@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
+        * For correct operation this depends on the caller
+        * masking the irqs.
+        */
+-      if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+-              irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
+-
++      if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
++              int ret;
++
++              ret = irq_do_set_affinity(data, desc->pending_mask, false);
++              /*
++               * If the there is a cleanup pending in the underlying
++               * vector management, reschedule the move for the next
++               * interrupt. Leave desc->pending_mask intact.
++               */
++              if (ret == -EBUSY) {
++                      irqd_set_move_pending(data);
++                      return;
++              }
++      }
+       cpumask_clear(desc->pending_mask);
+ }
+ 
+-void irq_move_irq(struct irq_data *idata)
++void __irq_move_irq(struct irq_data *idata)
+ {
+       bool masked;
+ 
+@@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
+        */
+       idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
+ 
+-      if (likely(!irqd_is_setaffinity_pending(idata)))
+-              return;
+-
+       if (unlikely(irqd_irq_disabled(idata)))
+               return;
+ 
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 7441bd93b732..8fe3ebd6ac00 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -412,6 +412,7 @@ static void wb_exit(struct bdi_writeback *wb)
+  * protected.
+  */
+ static DEFINE_SPINLOCK(cgwb_lock);
++static struct workqueue_struct *cgwb_release_wq;
+ 
+ /**
+  * wb_congested_get_create - get or create a wb_congested
+@@ -522,7 +523,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
+ {
+       struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
+                                               refcnt);
+-      schedule_work(&wb->release_work);
++      queue_work(cgwb_release_wq, &wb->release_work);
+ }
+ 
+ static void cgwb_kill(struct bdi_writeback *wb)
+@@ -784,6 +785,21 @@ static void cgwb_bdi_register(struct backing_dev_info 
*bdi)
+       spin_unlock_irq(&cgwb_lock);
+ }
+ 
++static int __init cgwb_init(void)
++{
++      /*
++       * There can be many concurrent release work items overwhelming
++       * system_wq.  Put them in a separate wq and limit concurrency.
++       * There's no point in executing many of these in parallel.
++       */
++      cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
++      if (!cgwb_release_wq)
++              return -ENOMEM;
++
++      return 0;
++}
++subsys_initcall(cgwb_init);
++
+ #else /* CONFIG_CGROUP_WRITEBACK */
+ 
+ static int cgwb_bdi_init(struct backing_dev_info *bdi)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 22320ea27489..d2d0eb9536a3 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4162,7 +4162,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int 
order,
+        * orientated.
+        */
+       if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
+-              ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+               ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       }
+diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
+index 7d20e1f3de28..56197f0d9608 100644
+--- a/net/dsa/tag_trailer.c
++++ b/net/dsa/tag_trailer.c
+@@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, 
struct net_device *dev,
+       if (!skb->dev)
+               return NULL;
+ 
+-      pskb_trim_rcsum(skb, skb->len - 4);
++      if (pskb_trim_rcsum(skb, skb->len - 4))
++              return NULL;
+ 
+       return skb;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index f70586b50838..ef8cd0f7db89 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1689,6 +1689,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
+                       reqsk_put(req);
+                       goto discard_it;
+               }
++              if (tcp_checksum_complete(skb)) {
++                      reqsk_put(req);
++                      goto csum_error;
++              }
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b61a770884fa..5f7bc5c6366a 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2718,7 +2718,7 @@ static void udp4_format_sock(struct sock *sp, struct 
seq_file *f,
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
+               bucket, src, srcp, dest, destp, sp->sk_state,
+               sk_wmem_alloc_get(sp),
+-              sk_rmem_alloc_get(sp),
++              udp_rqueue_get(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
+diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
+index d0390d844ac8..d9ad986c7b2c 100644
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const 
struct nlmsghdr *nlh,
+ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+               void *info)
+ {
+-      r->idiag_rqueue = sk_rmem_alloc_get(sk);
++      r->idiag_rqueue = udp_rqueue_get(sk);
+       r->idiag_wqueue = sk_wmem_alloc_get(sk);
+ }
+ 
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index a02ad100f0d7..2ee08b6a86a4 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -1019,8 +1019,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock 
*sk,
+ }
+ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+ 
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-                           __u16 srcp, __u16 destp, int bucket)
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++                             __u16 srcp, __u16 destp, int rqueue, int bucket)
+ {
+       const struct in6_addr *dest, *src;
+ 
+@@ -1036,7 +1036,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, 
struct sock *sp,
+                  dest->s6_addr32[2], dest->s6_addr32[3], destp,
+                  sp->sk_state,
+                  sk_wmem_alloc_get(sp),
+-                 sk_rmem_alloc_get(sp),
++                 rqueue,
+                  0, 0L, 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 4530a82aaa2e..b94345e657f7 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2149,9 +2149,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, 
const struct sock *sk,
+       const struct in6_addr *daddr, *saddr;
+       struct rt6_info *rt6 = (struct rt6_info *)dst;
+ 
+-      if (rt6->rt6i_flags & RTF_LOCAL)
+-              return;
+-
+       if (dst_metric_locked(dst, RTAX_MTU))
+               return;
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 6d664d83cd16..5d4eb9d2c3a7 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1475,6 +1475,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+                       reqsk_put(req);
+                       goto discard_it;
+               }
++              if (tcp_checksum_complete(skb)) {
++                      reqsk_put(req);
++                      goto csum_error;
++              }
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
+                       inet_csk_reqsk_queue_drop_and_put(sk, req);
+                       goto lookup;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index ea0730028e5d..977bd5a07cab 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1475,7 +1475,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
+               struct inet_sock *inet = inet_sk(v);
+               __u16 srcp = ntohs(inet->inet_sport);
+               __u16 destp = ntohs(inet->inet_dport);
+-              ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
++              __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
++                                        udp_rqueue_get(v), bucket);
+       }
+       return 0;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 60c2a252bdf5..38d132d007ba 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2037,7 +2037,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const 
struct sk_buff *skb,
+               return -EINVAL;
+       *len -= sizeof(vnet_hdr);
+ 
+-      if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
++      if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
+               return -EINVAL;
+ 
+       return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+@@ -2304,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct 
net_device *dev,
+       if (do_vnet) {
+               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                           sizeof(struct virtio_net_hdr),
+-                                          vio_le(), true)) {
++                                          vio_le(), true, 0)) {
+                       spin_lock(&sk->sk_receive_queue.lock);
+                       goto drop_n_account;
+               }
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
+index 9618b4a83cee..98c4afe7c15b 100644
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a)
+       kfree(d->tcfd_defdata);
+ }
+ 
+-static int alloc_defdata(struct tcf_defact *d, char *defdata)
++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
+ {
+       d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
+       if (unlikely(!d->tcfd_defdata))
+               return -ENOMEM;
+-      strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++      nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+       return 0;
+ }
+ 
+-static void reset_policy(struct tcf_defact *d, char *defdata,
++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
+                        struct tc_defact *p)
+ {
+       spin_lock_bh(&d->tcf_lock);
+       d->tcf_action = p->action;
+       memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
+-      strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++      nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+       spin_unlock_bh(&d->tcf_lock);
+ }
+ 
+@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+       struct tcf_defact *d;
+       bool exists = false;
+       int ret = 0, err;
+-      char *defdata;
+ 
+       if (nla == NULL)
+               return -EINVAL;
+@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr 
*nla,
+               return -EINVAL;
+       }
+ 
+-      defdata = nla_data(tb[TCA_DEF_DATA]);
+-
+       if (!exists) {
+               ret = tcf_idr_create(tn, parm->index, est, a,
+                                    &act_simp_ops, bind, false);
+@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr 
*nla,
+                       return ret;
+ 
+               d = to_defact(*a);
+-              ret = alloc_defdata(d, defdata);
++              ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
+               if (ret < 0) {
+                       tcf_idr_release(*a, bind);
+                       return ret;
+@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr 
*nla,
+               if (!ovr)
+                       return -EEXIST;
+ 
+-              reset_policy(d, defdata, parm);
++              reset_policy(d, tb[TCA_DEF_DATA], parm);
+       }
+ 
+       if (ret == ACT_P_CREATED)
+diff --git a/net/socket.c b/net/socket.c
+index f10f1d947c78..d1b02f161429 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -537,7 +537,10 @@ static int sockfs_setattr(struct dentry *dentry, struct 
iattr *iattr)
+       if (!err && (iattr->ia_valid & ATTR_UID)) {
+               struct socket *sock = SOCKET_I(d_inode(dentry));
+ 
+-              sock->sk->sk_uid = iattr->ia_uid;
++              if (sock->sk)
++                      sock->sk->sk_uid = iattr->ia_uid;
++              else
++                      err = -ENOENT;
+       }
+ 
+       return err;
+@@ -586,12 +589,16 @@ EXPORT_SYMBOL(sock_alloc);
+  *    an inode not a file.
+  */
+ 
+-void sock_release(struct socket *sock)
++static void __sock_release(struct socket *sock, struct inode *inode)
+ {
+       if (sock->ops) {
+               struct module *owner = sock->ops->owner;
+ 
++              if (inode)
++                      inode_lock(inode);
+               sock->ops->release(sock);
++              if (inode)
++                      inode_unlock(inode);
+               sock->ops = NULL;
+               module_put(owner);
+       }
+@@ -605,6 +612,11 @@ void sock_release(struct socket *sock)
+       }
+       sock->file = NULL;
+ }
++
++void sock_release(struct socket *sock)
++{
++      __sock_release(sock, NULL);
++}
+ EXPORT_SYMBOL(sock_release);
+ 
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
+@@ -1146,7 +1158,7 @@ static int sock_mmap(struct file *file, struct 
vm_area_struct *vma)
+ 
+ static int sock_close(struct inode *inode, struct file *filp)
+ {
+-      sock_release(SOCKET_I(inode));
++      __sock_release(SOCKET_I(inode), inode);
+       return 0;
+ }
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index e1c93ce74e0f..5fe29121b9a8 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
+ }
+ 
+ static int tls_do_encryption(struct tls_context *tls_ctx,
+-                           struct tls_sw_context *ctx, size_t data_len,
+-                           gfp_t flags)
++                           struct tls_sw_context *ctx,
++                           struct aead_request *aead_req,
++                           size_t data_len)
+ {
+-      unsigned int req_size = sizeof(struct aead_request) +
+-              crypto_aead_reqsize(ctx->aead_send);
+-      struct aead_request *aead_req;
+       int rc;
+ 
+-      aead_req = kzalloc(req_size, flags);
+-      if (!aead_req)
+-              return -ENOMEM;
+-
+       ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
+       ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
+ 
+@@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
+       ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
+       ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+ 
+-      kfree(aead_req);
+       return rc;
+ }
+ 
+@@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
+ {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
++      struct aead_request *req;
+       int rc;
+ 
++      req = kzalloc(sizeof(struct aead_request) +
++                    crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
++      if (!req)
++              return -ENOMEM;
++
+       sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
+       sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
+ 
+@@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
+       tls_ctx->pending_open_record_frags = 0;
+       set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+ 
+-      rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
+-                             sk->sk_allocation);
++      rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
+       if (rc < 0) {
+               /* If we are called from write_space and
+                * we fail, we need to set this SOCK_NOSPACE
+                * to trigger another write_space in the future.
+                */
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+-              return rc;
++              goto out_req;
+       }
+ 
+       free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+@@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
+               tls_err_abort(sk, EBADMSG);
+ 
+       tls_advance_record_sn(sk, &tls_ctx->tx);
++out_req:
++      kfree(req);
+       return rc;
+ }
+ 
+@@ -755,7 +755,7 @@ int tls_sw_recvmsg(struct sock *sk,
+       struct sk_buff *skb;
+       ssize_t copied = 0;
+       bool cmsg = false;
+-      int err = 0;
++      int target, err = 0;
+       long timeo;
+ 
+       flags |= nonblock;
+@@ -765,6 +765,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ 
+       lock_sock(sk);
+ 
++      target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+       do {
+               bool zc = false;
+@@ -857,6 +858,9 @@ int tls_sw_recvmsg(struct sock *sk,
+                                       goto recv_end;
+                       }
+               }
++              /* If we have a new message from strparser, continue now. */
++              if (copied >= target && !ctx->recv_pkt)
++                      break;
+       } while (len);
+ 
+ recv_end:
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index d1eb14842340..a12e594d4e3b 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, 
struct hda_codec *codec,
+               return err;
+       strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
+       apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
+-      if (apcm == NULL)
++      if (apcm == NULL) {
++              snd_device_free(chip->card, pcm);
+               return -ENOMEM;
++      }
+       apcm->chip = chip;
+       apcm->pcm = pcm;
+       apcm->codec = codec;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 5b4dbcec6de8..ba9a7e552183 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
++      SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
++      SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+       SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+       SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", 
CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", 
CXT_FIXUP_HP_MIC_NO_PRESENCE),
++      SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+       SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 01a6643fc7d4..06c2c80a045b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6580,7 +6580,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+-      SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", 
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+       SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+@@ -6752,6 +6751,11 @@ static const struct snd_hda_pin_quirk 
alc269_pin_fixup_tbl[] = {
+               {0x1b, 0x01111010},
+               {0x1e, 0x01451130},
+               {0x21, 0x02211020}),
++      SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", 
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++              {0x12, 0x90a60140},
++              {0x14, 0x90170110},
++              {0x19, 0x02a11030},
++              {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", 
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x14, 0x90170110},
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 754e632a27bd..02b7ad1946db 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3277,6 +3277,10 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+       }
+ },
+ 
++/* disabled due to regression for other devices;
++ * see https://bugzilla.kernel.org/show_bug.cgi?id=199905
++ */
++#if 0
+ {
+       /*
+        * Nura's first gen headphones use Cambridge Silicon Radio's vendor
+@@ -3324,6 +3328,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+               }
+       }
+ },
++#endif /* disabled */
+ 
+ {
+       /*

Reply via email to