commit:     e17f9236d0c06738492d9c80accbb911c2558360
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 29 18:53:52 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 29 18:53:52 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e17f9236

proj/linux-patches: Linux patch 4.14.91

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1090_linux-4.14.91.patch | 1437 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1441 insertions(+)

diff --git a/0000_README b/0000_README
index dc7f560..ca6677a 100644
--- a/0000_README
+++ b/0000_README
@@ -403,6 +403,10 @@ Patch:  1089_4.14.90.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.90
 
+Patch:  1090.14.91.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.91
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1090_linux-4.14.91.patch b/1090_linux-4.14.91.patch
new file mode 100644
index 0000000..90250d3
--- /dev/null
+++ b/1090_linux-4.14.91.patch
@@ -0,0 +1,1437 @@
+diff --git a/Makefile b/Makefile
+index 280c7193e246..a6fb3b158a19 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 90
++SUBLEVEL = 91
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/x86/include/asm/msr-index.h 
b/arch/x86/include/asm/msr-index.h
+index 62c62d3eb0ff..fed3636dce9a 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -372,6 +372,7 @@
+ #define MSR_F15H_NB_PERF_CTR          0xc0010241
+ #define MSR_F15H_PTSC                 0xc0010280
+ #define MSR_F15H_IC_CFG                       0xc0011021
++#define MSR_F15H_EX_CFG                       0xc001102c
+ 
+ /* Fam 10h MSRs */
+ #define MSR_FAM10H_MMIO_CONF_BASE     0xc0010058
+diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
+index 558444b23923..c2987daa6a6b 100644
+--- a/arch/x86/kernel/cpu/mtrr/if.c
++++ b/arch/x86/kernel/cpu/mtrr/if.c
+@@ -173,6 +173,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned 
long __arg)
+       struct mtrr_gentry gentry;
+       void __user *arg = (void __user *) __arg;
+ 
++      memset(&gentry, 0, sizeof(gentry));
++
+       switch (cmd) {
+       case MTRRIOC_ADD_ENTRY:
+       case MTRRIOC_SET_ENTRY:
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 4353580b659a..8eec37d37c3d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10447,6 +10447,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu 
*vcpu,
+                       kunmap(vmx->nested.pi_desc_page);
+                       kvm_release_page_dirty(vmx->nested.pi_desc_page);
+                       vmx->nested.pi_desc_page = NULL;
++                      vmx->nested.pi_desc = NULL;
++                      vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
+               }
+               page = kvm_vcpu_gpa_to_page(vcpu, 
vmcs12->posted_intr_desc_addr);
+               if (is_error_page(page))
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f24329659bea..ac431fa778aa 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2227,6 +2227,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
+       case MSR_AMD64_PATCH_LOADER:
+       case MSR_AMD64_BU_CFG2:
+       case MSR_AMD64_DC_CFG:
++      case MSR_F15H_EX_CFG:
+               break;
+ 
+       case MSR_IA32_UCODE_REV:
+@@ -2508,6 +2509,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
+       case MSR_AMD64_BU_CFG2:
+       case MSR_IA32_PERF_CTL:
+       case MSR_AMD64_DC_CFG:
++      case MSR_F15H_EX_CFG:
+               msr_info->data = 0;
+               break;
+       case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
+diff --git a/block/blk-lib.c b/block/blk-lib.c
+index 2bc544ce3d2e..0bdc77888dc5 100644
+--- a/block/blk-lib.c
++++ b/block/blk-lib.c
+@@ -59,10 +59,18 @@ int __blkdev_issue_discard(struct block_device *bdev, 
sector_t sector,
+               unsigned int req_sects;
+               sector_t end_sect, tmp;
+ 
+-              /* Make sure bi_size doesn't overflow */
+-              req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
++              /*
++               * Issue in chunks of the user defined max discard setting,
++               * ensuring that bi_size doesn't overflow
++               */
++              req_sects = min_t(sector_t, nr_sects,
++                                      q->limits.max_discard_sectors);
++              if (!req_sects)
++                      goto fail;
++              if (req_sects > UINT_MAX >> 9)
++                      req_sects = UINT_MAX >> 9;
+ 
+-              /**
++              /*
+                * If splitting a request, and the next starting sector would be
+                * misaligned, stop the discard at the previous aligned sector.
+                */
+@@ -96,6 +104,14 @@ int __blkdev_issue_discard(struct block_device *bdev, 
sector_t sector,
+ 
+       *biop = bio;
+       return 0;
++
++fail:
++      if (bio) {
++              submit_bio_wait(bio);
++              bio_put(bio);
++      }
++      *biop = NULL;
++      return -EOPNOTSUPP;
+ }
+ EXPORT_SYMBOL(__blkdev_issue_discard);
+ 
+diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
+index 05813fbf3daf..647dfbbc4e1c 100644
+--- a/drivers/gpio/gpio-max7301.c
++++ b/drivers/gpio/gpio-max7301.c
+@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned 
int reg,
+       struct spi_device *spi = to_spi_device(dev);
+       u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
+ 
+-      return spi_write(spi, (const u8 *)&word, sizeof(word));
++      return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
+ }
+ 
+ /* A read from the MAX7301 means two transfers; here, one message each */
+@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned 
int reg)
+       struct spi_device *spi = to_spi_device(dev);
+ 
+       word = 0x8000 | (reg << 8);
+-      ret = spi_write(spi, (const u8 *)&word, sizeof(word));
+-      if (ret)
+-              return ret;
+-      /*
+-       * This relies on the fact, that a transfer with NULL tx_buf shifts out
+-       * zero bytes (=NOOP for MAX7301)
+-       */
+-      ret = spi_read(spi, (u8 *)&word, sizeof(word));
++      ret = spi_write_then_read(spi, &word, sizeof(word), &word,
++                                sizeof(word));
+       if (ret)
+               return ret;
+       return word & 0xff;
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 33d4bd505b5b..57d157e94bd6 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -23,11 +23,28 @@
+ 
+ #include "gpiolib.h"
+ 
++/**
++ * struct acpi_gpio_event - ACPI GPIO event handler data
++ *
++ * @node:       list-entry of the events list of the struct acpi_gpio_chip
++ * @handle:     handle of ACPI method to execute when the IRQ triggers
++ * @handler:    irq_handler to pass to request_irq when requesting the IRQ
++ * @pin:        GPIO pin number on the gpio_chip
++ * @irq:        Linux IRQ number for the event, for request_ / free_irq
++ * @irqflags:     flags to pass to request_irq when requesting the IRQ
++ * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
++ * @is_requested: True if request_irq has been done
++ * @desc:       gpio_desc for the GPIO pin for this event
++ */
+ struct acpi_gpio_event {
+       struct list_head node;
+       acpi_handle handle;
++      irq_handler_t handler;
+       unsigned int pin;
+       unsigned int irq;
++      unsigned long irqflags;
++      bool irq_is_wake;
++      bool irq_requested;
+       struct gpio_desc *desc;
+ };
+ 
+@@ -53,10 +70,10 @@ struct acpi_gpio_chip {
+ 
+ /*
+  * For gpiochips which call acpi_gpiochip_request_interrupts() before 
late_init
+- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
++ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+  * late_initcall_sync handler, so that other builtin drivers can register 
their
+  * OpRegions before the event handlers can run.  This list contains gpiochips
+- * for which the acpi_gpiochip_request_interrupts() has been deferred.
++ * for which the acpi_gpiochip_request_irqs() call has been deferred.
+  */
+ static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+ static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+@@ -194,8 +211,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource 
*ares,
+ }
+ EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
+ 
+-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
+-                                                 void *context)
++static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
++                                    struct acpi_gpio_event *event)
++{
++      int ret, value;
++
++      ret = request_threaded_irq(event->irq, NULL, event->handler,
++                                 event->irqflags, "ACPI:Event", event);
++      if (ret) {
++              dev_err(acpi_gpio->chip->parent,
++                      "Failed to setup interrupt handler for %d\n",
++                      event->irq);
++              return;
++      }
++
++      if (event->irq_is_wake)
++              enable_irq_wake(event->irq);
++
++      event->irq_requested = true;
++
++      /* Make sure we trigger the initial state of edge-triggered IRQs */
++      value = gpiod_get_raw_value_cansleep(event->desc);
++      if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
++          ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
++              event->handler(event->irq, event);
++}
++
++static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
++{
++      struct acpi_gpio_event *event;
++
++      list_for_each_entry(event, &acpi_gpio->events, node)
++              acpi_gpiochip_request_irq(acpi_gpio, event);
++}
++
++static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
++                                           void *context)
+ {
+       struct acpi_gpio_chip *acpi_gpio = context;
+       struct gpio_chip *chip = acpi_gpio->chip;
+@@ -204,8 +255,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct 
acpi_resource *ares,
+       struct acpi_gpio_event *event;
+       irq_handler_t handler = NULL;
+       struct gpio_desc *desc;
+-      unsigned long irqflags;
+-      int ret, pin, irq, value;
++      int ret, pin, irq;
+ 
+       if (!acpi_gpio_get_irq_resource(ares, &agpio))
+               return AE_OK;
+@@ -240,8 +290,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct 
acpi_resource *ares,
+ 
+       gpiod_direction_input(desc);
+ 
+-      value = gpiod_get_value_cansleep(desc);
+-
+       ret = gpiochip_lock_as_irq(chip, pin);
+       if (ret) {
+               dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
+@@ -254,64 +302,42 @@ static acpi_status 
acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
+               goto fail_unlock_irq;
+       }
+ 
+-      irqflags = IRQF_ONESHOT;
++      event = kzalloc(sizeof(*event), GFP_KERNEL);
++      if (!event)
++              goto fail_unlock_irq;
++
++      event->irqflags = IRQF_ONESHOT;
+       if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+               if (agpio->polarity == ACPI_ACTIVE_HIGH)
+-                      irqflags |= IRQF_TRIGGER_HIGH;
++                      event->irqflags |= IRQF_TRIGGER_HIGH;
+               else
+-                      irqflags |= IRQF_TRIGGER_LOW;
++                      event->irqflags |= IRQF_TRIGGER_LOW;
+       } else {
+               switch (agpio->polarity) {
+               case ACPI_ACTIVE_HIGH:
+-                      irqflags |= IRQF_TRIGGER_RISING;
++                      event->irqflags |= IRQF_TRIGGER_RISING;
+                       break;
+               case ACPI_ACTIVE_LOW:
+-                      irqflags |= IRQF_TRIGGER_FALLING;
++                      event->irqflags |= IRQF_TRIGGER_FALLING;
+                       break;
+               default:
+-                      irqflags |= IRQF_TRIGGER_RISING |
+-                                  IRQF_TRIGGER_FALLING;
++                      event->irqflags |= IRQF_TRIGGER_RISING |
++                                         IRQF_TRIGGER_FALLING;
+                       break;
+               }
+       }
+ 
+-      event = kzalloc(sizeof(*event), GFP_KERNEL);
+-      if (!event)
+-              goto fail_unlock_irq;
+-
+       event->handle = evt_handle;
++      event->handler = handler;
+       event->irq = irq;
++      event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+       event->pin = pin;
+       event->desc = desc;
+ 
+-      ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
+-                                 "ACPI:Event", event);
+-      if (ret) {
+-              dev_err(chip->parent,
+-                      "Failed to setup interrupt handler for %d\n",
+-                      event->irq);
+-              goto fail_free_event;
+-      }
+-
+-      if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
+-              enable_irq_wake(irq);
+-
+       list_add_tail(&event->node, &acpi_gpio->events);
+ 
+-      /*
+-       * Make sure we trigger the initial state of the IRQ when using RISING
+-       * or FALLING.  Note we run the handlers on late_init, the AML code
+-       * may refer to OperationRegions from other (builtin) drivers which
+-       * may be probed after us.
+-       */
+-      if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+-          ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+-              handler(event->irq, event);
+-
+       return AE_OK;
+ 
+-fail_free_event:
+-      kfree(event);
+ fail_unlock_irq:
+       gpiochip_unlock_as_irq(chip, pin);
+ fail_free_desc:
+@@ -348,6 +374,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip 
*chip)
+       if (ACPI_FAILURE(status))
+               return;
+ 
++      acpi_walk_resources(handle, "_AEI",
++                          acpi_gpiochip_alloc_event, acpi_gpio);
++
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       defer = !acpi_gpio_deferred_req_irqs_done;
+       if (defer)
+@@ -358,8 +387,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip 
*chip)
+       if (defer)
+               return;
+ 
+-      acpi_walk_resources(handle, "_AEI",
+-                          acpi_gpiochip_request_interrupt, acpi_gpio);
++      acpi_gpiochip_request_irqs(acpi_gpio);
+ }
+ EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
+ 
+@@ -396,10 +424,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip 
*chip)
+       list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
+               struct gpio_desc *desc;
+ 
+-              if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
+-                      disable_irq_wake(event->irq);
++              if (event->irq_requested) {
++                      if (event->irq_is_wake)
++                              disable_irq_wake(event->irq);
++
++                      free_irq(event->irq, event);
++              }
+ 
+-              free_irq(event->irq, event);
+               desc = event->desc;
+               if (WARN_ON(IS_ERR(desc)))
+                       continue;
+@@ -1253,23 +1284,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device 
*adev, const char *con_id)
+       return con_id == NULL;
+ }
+ 
+-/* Run deferred acpi_gpiochip_request_interrupts() */
+-static int acpi_gpio_handle_deferred_request_interrupts(void)
++/* Run deferred acpi_gpiochip_request_irqs() */
++static int acpi_gpio_handle_deferred_request_irqs(void)
+ {
+       struct acpi_gpio_chip *acpi_gpio, *tmp;
+ 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       list_for_each_entry_safe(acpi_gpio, tmp,
+                                &acpi_gpio_deferred_req_irqs_list,
+-                               deferred_req_irqs_list_entry) {
+-              acpi_handle handle;
+-
+-              handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+-              acpi_walk_resources(handle, "_AEI",
+-                                  acpi_gpiochip_request_interrupt, acpi_gpio);
+-
+-              list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+-      }
++                               deferred_req_irqs_list_entry)
++              acpi_gpiochip_request_irqs(acpi_gpio);
+ 
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+@@ -1277,4 +1301,4 @@ static int 
acpi_gpio_handle_deferred_request_interrupts(void)
+       return 0;
+ }
+ /* We must use _sync so that this runs after the first deferred_probe run */
+-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
++late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index a9ae6dd2d593..53f319369de5 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -37,6 +37,7 @@
+ 
+ #include <linux/pci.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ 
+ /**
+  * DOC: getunique and setversion story
+@@ -778,13 +779,17 @@ long drm_ioctl(struct file *filp,
+ 
+       if (is_driver_ioctl) {
+               /* driver ioctl */
+-              if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
++              unsigned int index = nr - DRM_COMMAND_BASE;
++
++              if (index >= dev->driver->num_ioctls)
+                       goto err_i1;
+-              ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
++              index = array_index_nospec(index, dev->driver->num_ioctls);
++              ioctl = &dev->driver->ioctls[index];
+       } else {
+               /* core ioctl */
+               if (nr >= DRM_CORE_IOCTL_COUNT)
+                       goto err_i1;
++              nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
+               ioctl = &drm_ioctls[nr];
+       }
+ 
+@@ -866,6 +871,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+ 
+       if (nr >= DRM_CORE_IOCTL_COUNT)
+               return false;
++      nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
+ 
+       *flags = drm_ioctls[nr].flags;
+       return true;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 2cd134dd94d2..4218a616f1d3 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -300,6 +300,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+       return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+ }
+@@ -313,6 +315,8 @@ static ssize_t out_read_index_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+       return sprintf(buf, "%d\n", outbound.current_read_index);
+ }
+@@ -327,6 +331,8 @@ static ssize_t out_write_index_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+       return sprintf(buf, "%d\n", outbound.current_write_index);
+ }
+@@ -341,6 +347,8 @@ static ssize_t out_read_bytes_avail_show(struct device 
*dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+       return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+ }
+@@ -355,6 +363,8 @@ static ssize_t out_write_bytes_avail_show(struct device 
*dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+       return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+ }
+@@ -368,6 +378,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+ }
+@@ -381,6 +393,8 @@ static ssize_t in_read_index_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       return sprintf(buf, "%d\n", inbound.current_read_index);
+ }
+@@ -394,6 +408,8 @@ static ssize_t in_write_index_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       return sprintf(buf, "%d\n", inbound.current_write_index);
+ }
+@@ -408,6 +424,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+ }
+@@ -422,6 +440,8 @@ static ssize_t in_write_bytes_avail_show(struct device 
*dev,
+ 
+       if (!hv_dev->channel)
+               return -ENODEV;
++      if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
++              return -EINVAL;
+       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+ }
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c 
b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 60105ba77889..47f3f562d86f 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1775,8 +1775,8 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
+ 
+       list_for_each_entry(ch, &sdev->rch_list, list) {
+               if (srpt_disconnect_ch(ch) >= 0)
+-                      pr_info("Closing channel %s-%d because target %s has 
been disabled\n",
+-                              ch->sess_name, ch->qp->qp_num,
++                      pr_info("Closing channel %s because target %s has been 
disabled\n",
++                              ch->sess_name,
+                               sdev->device->name);
+               srpt_close_ch(ch);
+       }
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index bad5c1bf4ed9..814a04e8fdd7 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -30,6 +30,7 @@
+ #include "pwrseq.h"
+ 
+ #define DEFAULT_CMD6_TIMEOUT_MS       500
++#define MIN_CACHE_EN_TIMEOUT_MS 1600
+ 
+ static const unsigned int tran_exp[] = {
+       10000,          100000,         1000000,        10000000,
+@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 
*ext_csd)
+                       card->cid.year += 16;
+ 
+               /* check whether the eMMC card supports BKOPS */
+-              if (!mmc_card_broken_hpi(card) &&
+-                  ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
++              if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+                       card->ext_csd.bkops = 1;
+                       card->ext_csd.man_bkops_en =
+                                       (ext_csd[EXT_CSD_BKOPS_EN] &
+@@ -1755,20 +1755,26 @@ static int mmc_init_card(struct mmc_host *host, u32 
ocr,
+               if (err) {
+                       pr_warn("%s: Enabling HPI failed\n",
+                               mmc_hostname(card->host));
++                      card->ext_csd.hpi_en = 0;
+                       err = 0;
+-              } else
++              } else {
+                       card->ext_csd.hpi_en = 1;
++              }
+       }
+ 
+       /*
+-       * If cache size is higher than 0, this indicates
+-       * the existence of cache and it can be turned on.
++       * If cache size is higher than 0, this indicates the existence of cache
++       * and it can be turned on. Note that some eMMCs from Micron has been
++       * reported to need ~800 ms timeout, while enabling the cache after
++       * sudden power failure tests. Let's extend the timeout to a minimum of
++       * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
+        */
+-      if (!mmc_card_broken_hpi(card) &&
+-          card->ext_csd.cache_size > 0) {
++      if (card->ext_csd.cache_size > 0) {
++              unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
++
++              timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
+               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+-                              EXT_CSD_CACHE_CTRL, 1,
+-                              card->ext_csd.generic_cmd6_time);
++                              EXT_CSD_CACHE_CTRL, 1, timeout_ms);
+               if (err && err != -EBADMSG)
+                       goto free_card;
+ 
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index 9e03fada16dc..3f3ff7530b76 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -2083,7 +2083,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+       mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
+       mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
+       mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+-      mmc->max_seg_size = mmc->max_req_size;
+ 
+       mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
+@@ -2113,6 +2112,17 @@ static int omap_hsmmc_probe(struct platform_device 
*pdev)
+               goto err_irq;
+       }
+ 
++      /*
++       * Limit the maximum segment size to the lower of the request size
++       * and the DMA engine device segment size limits.  In reality, with
++       * 32-bit transfers, the DMA engine can do longer segments than this
++       * but there is no way to represent that in the DMA model - if we
++       * increase this figure here, we get warnings from the DMA API debug.
++       */
++      mmc->max_seg_size = min3(mmc->max_req_size,
++                      dma_get_max_seg_size(host->rx_chan->device->dev),
++                      dma_get_max_seg_size(host->tx_chan->device->dev));
++
+       /* Request IRQ for MMC operations */
+       ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
+                       mmc_hostname(mmc), host);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index d7a3379ea668..18a0952f68a8 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2806,6 +2806,12 @@ static int hso_get_config_data(struct usb_interface 
*interface)
+               return -EIO;
+       }
+ 
++      /* check if we have a valid interface */
++      if (if_num > 16) {
++              kfree(config_data);
++              return -EINVAL;
++      }
++
+       switch (config_data[if_num]) {
+       case 0x0:
+               result = 0;
+@@ -2876,10 +2882,18 @@ static int hso_probe(struct usb_interface *interface,
+ 
+       /* Get the interface/port specification from either driver_info or from
+        * the device itself */
+-      if (id->driver_info)
++      if (id->driver_info) {
++              /* if_num is controlled by the device, driver_info is a 0 
terminated
++               * array. Make sure, the access is in bounds! */
++              for (i = 0; i <= if_num; ++i)
++                      if (((u32 *)(id->driver_info))[i] == 0)
++                              goto exit;
+               port_spec = ((u32 *)(id->driver_info))[if_num];
+-      else
++      } else {
+               port_spec = hso_get_config_data(interface);
++              if (port_spec < 0)
++                      goto exit;
++      }
+ 
+       /* Check if we need to switch to alt interfaces prior to port
+        * configuration */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 
b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index cebf0ce76d27..e9e466cae322 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -952,6 +952,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+       int ret, i, j;
+       u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ 
++      /*
++       * This command is not supported on earlier firmware versions.
++       * Unfortunately, we don't have a TLV API flag to rely on, so
++       * rely on the major version which is in the first byte of
++       * ucode_ver.
++       */
++      if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
++              return 0;
++
+       ret = iwl_mvm_sar_get_wgds_table(mvm);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mvm,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c 
b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 4cbc6cb8bf89..0ff247326d6c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -517,6 +517,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+       {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+ 
+ /* 9000 Series */
++      {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
++      {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index c7b284587365..39754cc90043 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -133,6 +133,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
+ 
+ static struct kmem_cache *sd_cdb_cache;
+ static mempool_t *sd_cdb_pool;
++static mempool_t *sd_page_pool;
+ 
+ static const char *sd_cache_types[] = {
+       "write through", "none", "write back",
+@@ -759,9 +760,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
+       unsigned int data_len = 24;
+       char *buf;
+ 
+-      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
++      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+       if (!rq->special_vec.bv_page)
+               return BLKPREP_DEFER;
++      clear_highpage(rq->special_vec.bv_page);
+       rq->special_vec.bv_offset = 0;
+       rq->special_vec.bv_len = data_len;
+       rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+@@ -792,9 +794,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd 
*cmd, bool unmap)
+       u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+       u32 data_len = sdp->sector_size;
+ 
+-      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
++      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+       if (!rq->special_vec.bv_page)
+               return BLKPREP_DEFER;
++      clear_highpage(rq->special_vec.bv_page);
+       rq->special_vec.bv_offset = 0;
+       rq->special_vec.bv_len = data_len;
+       rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+@@ -822,9 +825,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd 
*cmd, bool unmap)
+       u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+       u32 data_len = sdp->sector_size;
+ 
+-      rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
++      rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+       if (!rq->special_vec.bv_page)
+               return BLKPREP_DEFER;
++      clear_highpage(rq->special_vec.bv_page);
+       rq->special_vec.bv_offset = 0;
+       rq->special_vec.bv_len = data_len;
+       rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+@@ -1299,7 +1303,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
+               sd_zbc_write_unlock_zone(SCpnt);
+ 
+       if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+-              __free_page(rq->special_vec.bv_page);
++              mempool_free(rq->special_vec.bv_page, sd_page_pool);
+ 
+       if (SCpnt->cmnd != scsi_req(rq)->cmd) {
+               cmnd = SCpnt->cmnd;
+@@ -3655,6 +3659,13 @@ static int __init init_sd(void)
+               goto err_out_cache;
+       }
+ 
++      sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
++      if (!sd_page_pool) {
++              printk(KERN_ERR "sd: can't init discard page pool\n");
++              err = -ENOMEM;
++              goto err_out_ppool;
++      }
++
+       err = scsi_register_driver(&sd_template.gendrv);
+       if (err)
+               goto err_out_driver;
+@@ -3662,6 +3673,9 @@ static int __init init_sd(void)
+       return 0;
+ 
+ err_out_driver:
++      mempool_destroy(sd_page_pool);
++
++err_out_ppool:
+       mempool_destroy(sd_cdb_pool);
+ 
+ err_out_cache:
+@@ -3688,6 +3702,7 @@ static void __exit exit_sd(void)
+ 
+       scsi_unregister_driver(&sd_template.gendrv);
+       mempool_destroy(sd_cdb_pool);
++      mempool_destroy(sd_page_pool);
+       kmem_cache_destroy(sd_cdb_cache);
+ 
+       class_unregister(&sd_disk_class);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index d51ca243a028..df18d07d544d 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -72,6 +72,7 @@ struct spi_imx_data;
+ 
+ struct spi_imx_devtype_data {
+       void (*intctrl)(struct spi_imx_data *, int);
++      int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
+       int (*config)(struct spi_device *);
+       void (*trigger)(struct spi_imx_data *);
+       int (*rx_available)(struct spi_imx_data *);
+@@ -439,11 +440,12 @@ static void mx51_ecspi_trigger(struct spi_imx_data 
*spi_imx)
+       writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+ }
+ 
+-static int mx51_ecspi_config(struct spi_device *spi)
++static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
++                                    struct spi_message *msg)
+ {
+-      struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
++      struct spi_device *spi = msg->spi;
+       u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
+-      u32 clk = spi_imx->speed_hz, delay, reg;
++      u32 testreg;
+       u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+ 
+       /*
+@@ -461,14 +463,21 @@ static int mx51_ecspi_config(struct spi_device *spi)
+       if (spi->mode & SPI_READY)
+               ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
+ 
+-      /* set clock speed */
+-      ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
+-      spi_imx->spi_bus_clk = clk;
+-
+       /* set chip select to use */
+       ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
+ 
+-      ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
++      /*
++       * The ctrl register must be written first, with the EN bit set other
++       * registers must not be written to.
++       */
++      writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
++
++      testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
++      if (spi->mode & SPI_LOOP)
++              testreg |= MX51_ECSPI_TESTREG_LBC;
++      else
++              testreg &= ~MX51_ECSPI_TESTREG_LBC;
++      writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
+ 
+       cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ 
+@@ -484,26 +493,38 @@ static int mx51_ecspi_config(struct spi_device *spi)
+               cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
+               cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
+       }
++
+       if (spi->mode & SPI_CS_HIGH)
+               cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
+       else
+               cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
+ 
++      writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
++
++      return 0;
++}
++
++static int mx51_ecspi_config(struct spi_device *spi)
++{
++      struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
++      u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
++      u32 clk = spi_imx->speed_hz, delay;
++
++      /* Clear BL field and set the right value */
++      ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
++      ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
++
++      /* set clock speed */
++      ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
++                0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
++      ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
++      spi_imx->spi_bus_clk = clk;
++
+       if (spi_imx->usedma)
+               ctrl |= MX51_ECSPI_CTRL_SMC;
+ 
+-      /* CTRL register always go first to bring out controller from reset */
+       writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ 
+-      reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
+-      if (spi->mode & SPI_LOOP)
+-              reg |= MX51_ECSPI_TESTREG_LBC;
+-      else
+-              reg &= ~MX51_ECSPI_TESTREG_LBC;
+-      writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);
+-
+-      writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+-
+       /*
+        * Wait until the changes in the configuration register CONFIGREG
+        * propagate into the hardware. It takes exactly one tick of the
+@@ -525,7 +546,6 @@ static int mx51_ecspi_config(struct spi_device *spi)
+        * Configure the DMA register: setup the watermark
+        * and enable DMA request.
+        */
+-
+       writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
+               MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
+               MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
+@@ -599,6 +619,12 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+ }
+ 
++static int mx31_prepare_message(struct spi_imx_data *spi_imx,
++                              struct spi_message *msg)
++{
++      return 0;
++}
++
+ static int mx31_config(struct spi_device *spi)
+ {
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+@@ -695,6 +721,12 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+ }
+ 
++static int mx21_prepare_message(struct spi_imx_data *spi_imx,
++                              struct spi_message *msg)
++{
++      return 0;
++}
++
+ static int mx21_config(struct spi_device *spi)
+ {
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+@@ -764,6 +796,12 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+ }
+ 
++static int mx1_prepare_message(struct spi_imx_data *spi_imx,
++                             struct spi_message *msg)
++{
++      return 0;
++}
++
+ static int mx1_config(struct spi_device *spi)
+ {
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+@@ -798,6 +836,7 @@ static void mx1_reset(struct spi_imx_data *spi_imx)
+ 
+ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
+       .intctrl = mx1_intctrl,
++      .prepare_message = mx1_prepare_message,
+       .config = mx1_config,
+       .trigger = mx1_trigger,
+       .rx_available = mx1_rx_available,
+@@ -810,6 +849,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data 
= {
+ 
+ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
+       .intctrl = mx21_intctrl,
++      .prepare_message = mx21_prepare_message,
+       .config = mx21_config,
+       .trigger = mx21_trigger,
+       .rx_available = mx21_rx_available,
+@@ -823,6 +863,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data 
= {
+ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
+       /* i.mx27 cspi shares the functions with i.mx21 one */
+       .intctrl = mx21_intctrl,
++      .prepare_message = mx21_prepare_message,
+       .config = mx21_config,
+       .trigger = mx21_trigger,
+       .rx_available = mx21_rx_available,
+@@ -835,6 +876,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data 
= {
+ 
+ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
+       .intctrl = mx31_intctrl,
++      .prepare_message = mx31_prepare_message,
+       .config = mx31_config,
+       .trigger = mx31_trigger,
+       .rx_available = mx31_rx_available,
+@@ -848,6 +890,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data 
= {
+ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+       /* i.mx35 and later cspi shares the functions with i.mx31 one */
+       .intctrl = mx31_intctrl,
++      .prepare_message = mx31_prepare_message,
+       .config = mx31_config,
+       .trigger = mx31_trigger,
+       .rx_available = mx31_rx_available,
+@@ -860,6 +903,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data 
= {
+ 
+ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
+       .intctrl = mx51_ecspi_intctrl,
++      .prepare_message = mx51_ecspi_prepare_message,
+       .config = mx51_ecspi_config,
+       .trigger = mx51_ecspi_trigger,
+       .rx_available = mx51_ecspi_rx_available,
+@@ -872,6 +916,7 @@ static struct spi_imx_devtype_data 
imx51_ecspi_devtype_data = {
+ 
+ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
+       .intctrl = mx51_ecspi_intctrl,
++      .prepare_message = mx51_ecspi_prepare_message,
+       .config = mx51_ecspi_config,
+       .trigger = mx51_ecspi_trigger,
+       .rx_available = mx51_ecspi_rx_available,
+@@ -1310,7 +1355,13 @@ spi_imx_prepare_message(struct spi_master *master, 
struct spi_message *msg)
+               return ret;
+       }
+ 
+-      return 0;
++      ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
++      if (ret) {
++              clk_disable(spi_imx->clk_ipg);
++              clk_disable(spi_imx->clk_per);
++      }
++
++      return ret;
+ }
+ 
+ static int
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 6b2f6c41e2a9..997ff183c9cb 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1512,7 +1512,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+               portsc_buf[port_index] = 0;
+ 
+               /* Bail out if a USB3 port has a new device in link training */
+-              if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
++              if ((hcd->speed >= HCD_USB3) &&
++                  (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+                       bus_state->bus_suspended = 0;
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       xhci_dbg(xhci, "Bus suspend bailout, port in 
polling\n");
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 1ccff2d9dee9..cbc91536e512 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1859,6 +1859,8 @@ struct xhci_hcd {
+       unsigned                sw_lpm_support:1;
+       /* support xHCI 1.0 spec USB2 hardware LPM */
+       unsigned                hw_lpm_support:1;
++      /* Broken Suspend flag for SNPS Suspend resume issue */
++      unsigned                broken_suspend:1;
+       /* cached usb2 extened protocol capabilites */
+       u32                     *ext_caps;
+       unsigned int            num_ext_caps;
+@@ -1871,8 +1873,6 @@ struct xhci_hcd {
+ 
+       /* platform-specific data -- must come last */
+       unsigned long           priv[0] __aligned(sizeof(s64));
+-      /* Broken Suspend flag for SNPS Suspend resume issue */
+-      u8                      broken_suspend;
+ };
+ 
+ /* Platform specific overrides to generic XHCI hc_driver ops */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 392fddc80c44..988be9ca2b4f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1167,6 +1167,10 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 
TELIT_PRODUCT_LE920A4_1213, 0xff) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
++      { USB_DEVICE(TELIT_VENDOR_ID, 0x1900),                          /* 
Telit LN940 (QMI) */
++        .driver_info = NCTRL(0) | RSVD(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* 
Telit LN940 (MBIM) */
++        .driver_info = NCTRL(0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 
0xff, 0xff) }, /* ZTE WCDMA products */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 
0xff),
+         .driver_info = RSVD(1) },
+@@ -1331,6 +1335,7 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 
0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 
0xff) },
++      { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },    /* 
GosunCn ZTE WeLink ME3630 (MBIM mode) */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 
0xff),
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 
0xff),
+@@ -1534,6 +1539,7 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = RSVD(2) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 
0xff),  /* Telewell TW-LTE 4G v2 */
+         .driver_info = RSVD(2) },
++      { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* 
GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 
0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 
0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 
0xff) },
+@@ -1761,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 
0xff, 0xff, 0xff) },
+       { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+         .driver_info = RSVD(5) | RSVD(6) },
++      { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },   /* Simcom 
SIM7500/SIM7600 MBIM mode */
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+         .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
+@@ -1942,7 +1949,14 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, 
WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, 
WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, 
WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+-      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* 
HP lt2523 (Novatel E371) */
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) },    
/* HP lt2523 (Novatel E371) */
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) },    
/* HP lt4132 (Huawei ME906s-158) */
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
++      { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
++      { USB_DEVICE(0x1508, 0x1001),                                           
/* Fibocom NL668 */
++        .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+       { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 69309538ffb8..1581e8668b09 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2020,14 +2020,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon 
*tcon, u64 persistent_fid,
+       /* We check for obvious errors in the output buffer length and offset */
+       if (*plen == 0)
+               goto ioctl_exit; /* server returned no data */
+-      else if (*plen > 0xFF00) {
++      else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
+               cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
+               *plen = 0;
+               rc = -EIO;
+               goto ioctl_exit;
+       }
+ 
+-      if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
++      if (get_rfc1002_length(rsp) - *plen < le32_to_cpu(rsp->OutputOffset)) {
+               cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
+                       le32_to_cpu(rsp->OutputOffset));
+               *plen = 0;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 82ac5f682b73..f69c545f5868 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -464,7 +464,7 @@ static struct inode *proc_sys_make_inode(struct 
super_block *sb,
+ 
+       inode = new_inode(sb);
+       if (!inode)
+-              goto out;
++              return ERR_PTR(-ENOMEM);
+ 
+       inode->i_ino = get_next_ino();
+ 
+@@ -474,8 +474,7 @@ static struct inode *proc_sys_make_inode(struct 
super_block *sb,
+       if (unlikely(head->unregistering)) {
+               spin_unlock(&sysctl_lock);
+               iput(inode);
+-              inode = NULL;
+-              goto out;
++              return ERR_PTR(-ENOENT);
+       }
+       ei->sysctl = head;
+       ei->sysctl_entry = table;
+@@ -500,7 +499,6 @@ static struct inode *proc_sys_make_inode(struct 
super_block *sb,
+       if (root->set_ownership)
+               root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+ 
+-out:
+       return inode;
+ }
+ 
+@@ -549,10 +547,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, 
struct dentry *dentry,
+                       goto out;
+       }
+ 
+-      err = ERR_PTR(-ENOMEM);
+       inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
+-      if (!inode)
++      if (IS_ERR(inode)) {
++              err = ERR_CAST(inode);
+               goto out;
++      }
+ 
+       err = NULL;
+       d_set_d_op(dentry, &proc_sys_dentry_operations);
+@@ -685,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file,
+                       return false;
+               if (d_in_lookup(child)) {
+                       inode = proc_sys_make_inode(dir->d_sb, head, table);
+-                      if (!inode) {
++                      if (IS_ERR(inode)) {
+                               d_lookup_done(child);
+                               dput(child);
+                               return false;
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index ef820f803176..4e6e32c0c08a 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1147,8 +1147,7 @@ static int ubifs_symlink(struct inode *dir, struct 
dentry *dentry,
+       struct ubifs_inode *ui;
+       struct ubifs_inode *dir_ui = ubifs_inode(dir);
+       struct ubifs_info *c = dir->i_sb->s_fs_info;
+-      int err, len = strlen(symname);
+-      int sz_change = CALC_DENT_SIZE(len);
++      int err, sz_change, len = strlen(symname);
+       struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+       struct fscrypt_symlink_data *sd = NULL;
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+@@ -1189,6 +1188,8 @@ static int ubifs_symlink(struct inode *dir, struct 
dentry *dentry,
+       if (err)
+               goto out_budg;
+ 
++      sz_change = CALC_DENT_SIZE(fname_len(&nm));
++
+       inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
+index ae5c02f22f3e..d998fbf7de30 100644
+--- a/fs/ubifs/replay.c
++++ b/fs/ubifs/replay.c
+@@ -209,6 +209,38 @@ static int trun_remove_range(struct ubifs_info *c, struct 
replay_entry *r)
+       return ubifs_tnc_remove_range(c, &min_key, &max_key);
+ }
+ 
++/**
++ * inode_still_linked - check whether inode in question will be re-linked.
++ * @c: UBIFS file-system description object
++ * @rino: replay entry to test
++ *
++ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
++ * This case needs special care, otherwise all references to the inode will
++ * be removed upon the first replay entry of an inode with link count 0
++ * is found.
++ */
++static bool inode_still_linked(struct ubifs_info *c, struct replay_entry 
*rino)
++{
++      struct replay_entry *r;
++
++      ubifs_assert(rino->deletion);
++      ubifs_assert(key_type(c, &rino->key) == UBIFS_INO_KEY);
++
++      /*
++       * Find the most recent entry for the inode behind @rino and check
++       * whether it is a deletion.
++       */
++      list_for_each_entry_reverse(r, &c->replay_list, list) {
++              ubifs_assert(r->sqnum >= rino->sqnum);
++              if (key_inum(c, &r->key) == key_inum(c, &rino->key))
++                      return r->deletion == 0;
++
++      }
++
++      ubifs_assert(0);
++      return false;
++}
++
+ /**
+  * apply_replay_entry - apply a replay entry to the TNC.
+  * @c: UBIFS file-system description object
+@@ -239,6 +271,11 @@ static int apply_replay_entry(struct ubifs_info *c, 
struct replay_entry *r)
+                       {
+                               ino_t inum = key_inum(c, &r->key);
+ 
++                              if (inode_still_linked(c, r)) {
++                                      err = 0;
++                                      break;
++                              }
++
+                               err = ubifs_tnc_remove_ino(c, inum);
+                               break;
+                       }
+diff --git a/include/linux/math64.h b/include/linux/math64.h
+index 082de345b73c..3a7a14062668 100644
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -254,4 +254,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 
divisor)
+ }
+ #endif /* mul_u64_u32_div */
+ 
++#define DIV64_U64_ROUND_UP(ll, d)     \
++      ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
++
+ #endif /* _LINUX_MATH64_H */
+diff --git a/kernel/panic.c b/kernel/panic.c
+index bdd18afa19a4..32ff6fd30201 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -14,6 +14,7 @@
+ #include <linux/kmsg_dump.h>
+ #include <linux/kallsyms.h>
+ #include <linux/notifier.h>
++#include <linux/vt_kern.h>
+ #include <linux/module.h>
+ #include <linux/random.h>
+ #include <linux/ftrace.h>
+@@ -230,7 +231,10 @@ void panic(const char *fmt, ...)
+       if (_crash_kexec_post_notifiers)
+               __crash_kexec(NULL);
+ 
+-      bust_spinlocks(0);
++#ifdef CONFIG_VT
++      unblank_screen();
++#endif
++      console_unblank();
+ 
+       /*
+        * We may have ended up stopping the CPU holding the lock (in
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 55d45fe2cc17..d7e478a430e9 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -298,9 +298,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
+ {
+       struct hrtimer *timer = &timr->it.real.timer;
+ 
+-      if (!timr->it_interval)
+-              return;
+-
+       timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+                                           timr->it_interval);
+       hrtimer_restart(timer);
+@@ -326,7 +323,7 @@ void posixtimer_rearm(struct siginfo *info)
+       if (!timr)
+               return;
+ 
+-      if (timr->it_requeue_pending == info->si_sys_private) {
++      if (timr->it_interval && timr->it_requeue_pending == 
info->si_sys_private) {
+               timr->kclock->timer_rearm(timr);
+ 
+               timr->it_active = 1;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index be56e2e1931e..9734e62654fa 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2367,9 +2367,11 @@ out:
+                       /*
+                        * Scan types proportional to swappiness and
+                        * their relative recent reclaim efficiency.
++                       * Make sure we don't miss the last page
++                       * because of a round-off error.
+                        */
+-                      scan = div64_u64(scan * fraction[file],
+-                                       denominator);
++                      scan = DIV64_U64_ROUND_UP(scan * fraction[file],
++                                                denominator);
+                       break;
+               case SCAN_FILE:
+               case SCAN_ANON:
+diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
+index 5b888476d9ff..b728140c79a9 100644
+--- a/sound/soc/codecs/sta32x.c
++++ b/sound/soc/codecs/sta32x.c
+@@ -879,6 +879,9 @@ static int sta32x_probe(struct snd_soc_codec *codec)
+       struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+       struct sta32x_platform_data *pdata = sta32x->pdata;
+       int i, ret = 0, thermal = 0;
++
++      sta32x->codec = codec;
++
+       ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
+                                   sta32x->supplies);
+       if (ret != 0) {
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index b205c1340456..5e53cafe6cf9 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -800,13 +800,10 @@ static int record__synthesize(struct record *rec, bool 
tail)
+               return 0;
+ 
+       if (file->is_pipe) {
+-              err = perf_event__synthesize_features(
+-                      tool, session, rec->evlist, process_synthesized_event);
+-              if (err < 0) {
+-                      pr_err("Couldn't synthesize features.\n");
+-                      return err;
+-              }
+-
++              /*
++               * We need to synthesize events first, because some
++               * features works on top of them (on report side).
++               */
+               err = perf_event__synthesize_attrs(tool, session,
+                                                  process_synthesized_event);
+               if (err < 0) {
+@@ -814,6 +811,13 @@ static int record__synthesize(struct record *rec, bool 
tail)
+                       goto out;
+               }
+ 
++              err = perf_event__synthesize_features(tool, session, 
rec->evlist,
++                                                    
process_synthesized_event);
++              if (err < 0) {
++                      pr_err("Couldn't synthesize features.\n");
++                      return err;
++              }
++
+               if (have_tracepoints(&rec->evlist->entries)) {
+                       /*
+                        * FIXME err <= 0 here actually means that

Reply via email to