commit:     547a1444731be81625b3f2ec0cdc1aa68d9b110a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 15 17:17:27 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 15 17:17:27 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=547a1444

Linux patch 4.10.3

 0000_README             |    4 +
 1002_linux-4.10.3.patch | 3907 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3911 insertions(+)

diff --git a/0000_README b/0000_README
index 8ad9f95..471175a 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.10.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.10.2
 
+Patch:  1002_linux-4.10.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.10.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.10.3.patch b/1002_linux-4.10.3.patch
new file mode 100644
index 0000000..3352128
--- /dev/null
+++ b/1002_linux-4.10.3.patch
@@ -0,0 +1,3907 @@
+diff --git a/Makefile b/Makefile
+index 6e09b3a44e9a..190a684303c1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 10
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/s390/include/asm/processor.h 
b/arch/s390/include/asm/processor.h
+index 6bca916a5ba0..71cac7c43c4b 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -89,7 +89,8 @@ extern void execve_tail(void);
+  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+  */
+ 
+-#define TASK_SIZE_OF(tsk)     ((tsk)->mm->context.asce_limit)
++#define TASK_SIZE_OF(tsk)     ((tsk)->mm ? \
++                               (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
+ #define TASK_UNMAPPED_BASE    (test_thread_flag(TIF_31BIT) ? \
+                                       (1UL << 30) : (1UL << 41))
+ #define TASK_SIZE             TASK_SIZE_OF(current)
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index f9293bfefb7f..408b4f4fda0f 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -329,7 +329,11 @@ static void *nt_init_name(void *buf, Elf64_Word type, 
void *desc, int d_len,
+ 
+ static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
+ {
+-      return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME);
++      const char *note_name = "LINUX";
++
++      if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
++              note_name = KEXEC_CORE_NOTE_NAME;
++      return nt_init_name(buf, type, desc, d_len, note_name);
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 865a48871ca4..5401e79d6c32 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -820,10 +820,10 @@ static void __init setup_randomness(void)
+ {
+       struct sysinfo_3_2_2 *vmms;
+ 
+-      vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
+-      if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+-              add_device_randomness(&vmms, vmms->count);
+-      free_page((unsigned long) vmms);
++      vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++      if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
++              add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * 
vmms->count);
++      memblock_free((unsigned long) vmms, PAGE_SIZE);
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
+index 93dcbae1e98d..ab167620955c 100644
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -466,7 +466,7 @@ void __init topology_init_early(void)
+       set_sched_topology(s390_topology);
+       if (!MACHINE_HAS_TOPOLOGY)
+               goto out;
+-      tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
++      tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
+       info = tl_info;
+       store_topology(info);
+       pr_info("The CPU configuration topology of the machine is:");
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 6484a250021e..ac9eb595f0aa 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+       struct kvm_memory_slot *memslot;
+       int is_dirty = 0;
+ 
++      if (kvm_is_ucontrol(kvm))
++              return -EINVAL;
++
+       mutex_lock(&kvm->slots_lock);
+ 
+       r = -EINVAL;
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 6fa85944af83..fc5abff9b7fd 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long 
addr)
+ 
+ static inline void __flush_tlb_all(void)
+ {
+-      if (static_cpu_has(X86_FEATURE_PGE))
++      if (boot_cpu_has(X86_FEATURE_PGE))
+               __flush_tlb_global();
+       else
+               __flush_tlb();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index a236decb81e4..2c22aef35dbc 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3962,7 +3962,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment 
*save)
+       }
+ 
+       vmcs_write16(sf->selector, var.selector);
+-      vmcs_write32(sf->base, var.base);
++      vmcs_writel(sf->base, var.base);
+       vmcs_write32(sf->limit, var.limit);
+       vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
+ }
+@@ -8350,7 +8350,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
+ static void vmx_dump_sel(char *name, uint32_t sel)
+ {
+       pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
+-             name, vmcs_read32(sel),
++             name, vmcs_read16(sel),
+              vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
+              vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
+              vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index 0d4fb3ebbbac..1680768d392c 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -120,6 +120,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned 
long addr,
+                       return 0;
+               }
+ 
++              if (!pte_allows_gup(pte_val(pte), write)) {
++                      pte_unmap(ptep);
++                      return 0;
++              }
++
+               if (pte_devmap(pte)) {
+                       pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
+                       if (unlikely(!pgmap)) {
+@@ -127,8 +132,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long 
addr,
+                               pte_unmap(ptep);
+                               return 0;
+                       }
+-              } else if (!pte_allows_gup(pte_val(pte), write) ||
+-                         pte_special(pte)) {
++              } else if (pte_special(pte)) {
+                       pte_unmap(ptep);
+                       return 0;
+               }
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 8fd4be610607..75e47e5436e3 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -126,6 +126,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
+ 
+ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
+ 
++#endif /* CONFIG_BLK_DEV_INITRD */
++
+ #ifdef CONFIG_OF
+ 
+ static int __init parse_tag_fdt(const bp_tag_t *tag)
+@@ -138,8 +140,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
+ 
+ #endif /* CONFIG_OF */
+ 
+-#endif /* CONFIG_BLK_DEV_INITRD */
+-
+ static int __init parse_tag_cmdline(const bp_tag_t* tag)
+ {
+       strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 7361d00818e2..662036bdc65e 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1603,7 +1603,7 @@ static size_t sizeof_nfit_set_info(int num_mappings)
+               + num_mappings * sizeof(struct nfit_set_info_map);
+ }
+ 
+-static int cmp_map(const void *m0, const void *m1)
++static int cmp_map_compat(const void *m0, const void *m1)
+ {
+       const struct nfit_set_info_map *map0 = m0;
+       const struct nfit_set_info_map *map1 = m1;
+@@ -1612,6 +1612,14 @@ static int cmp_map(const void *m0, const void *m1)
+                       sizeof(u64));
+ }
+ 
++static int cmp_map(const void *m0, const void *m1)
++{
++      const struct nfit_set_info_map *map0 = m0;
++      const struct nfit_set_info_map *map1 = m1;
++
++      return map0->region_offset - map1->region_offset;
++}
++
+ /* Retrieve the nth entry referencing this spa */
+ static struct acpi_nfit_memory_map *memdev_from_spa(
+               struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
+@@ -1667,6 +1675,12 @@ static int acpi_nfit_init_interleave_set(struct 
acpi_nfit_desc *acpi_desc,
+       sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
+                       cmp_map, NULL);
+       nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
++
++      /* support namespaces created with the wrong sort order */
++      sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
++                      cmp_map_compat, NULL);
++      nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
++
+       ndr_desc->nd_set = nd_set;
+       devm_kfree(dev, info);
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fadba88745dc..b793853ff05f 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
+       { USB_DEVICE(0x04CA, 0x300f) },
+       { USB_DEVICE(0x04CA, 0x3010) },
+       { USB_DEVICE(0x04CA, 0x3014) },
++      { USB_DEVICE(0x04CA, 0x3018) },
+       { USB_DEVICE(0x0930, 0x0219) },
+       { USB_DEVICE(0x0930, 0x021c) },
+       { USB_DEVICE(0x0930, 0x0220) },
+@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++      { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 2f633df9f4e6..dd220fad366c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -209,6 +209,7 @@ static const struct usb_device_id blacklist_table[] = {
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++      { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 723ae682bf25..5a50b3df80ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1252,7 +1252,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+       if (!adev->pm.dpm_enabled)
+               return;
+ 
+-      amdgpu_display_bandwidth_update(adev);
++      if (adev->mode_info.num_crtc)
++              amdgpu_display_bandwidth_update(adev);
+ 
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index a7af5b33a5e3..648f0d7475db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device 
*adev,
+       default:
+               encoder->possible_crtcs = 0x3;
+               break;
++      case 3:
++              encoder->possible_crtcs = 0x7;
++              break;
+       case 4:
+               encoder->possible_crtcs = 0xf;
+               break;
++      case 5:
++              encoder->possible_crtcs = 0x1f;
++              break;
+       case 6:
+               encoder->possible_crtcs = 0x3f;
+               break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index b323f5ef64d2..51bbd6e44dbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -708,290 +708,238 @@ static void gfx_v6_0_tiling_mode_table_init(struct 
amdgpu_device *adev)
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; 
reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 1:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 2:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 3:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK) |
+-                                               
TILE_SPLIT(split_equal_to_row_size));
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 4:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2));
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
++                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 5:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(split_equal_to_row_size) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 6:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(split_equal_to_row_size) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 7:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(split_equal_to_row_size) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 8:
+-                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
++                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 9:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2));
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
++                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 10:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 11:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 12:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 13:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2));
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
++                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 14:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 15:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 16:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 17:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               PIPE_CONFIG(ADDR_SURF_P4_8x16) 
|
++                                               
TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+-                                               
TILE_SPLIT(split_equal_to_row_size));
+-                              break;
+-                      case 18:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2));
+-                              break;
+-                      case 19:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+-                                               
TILE_SPLIT(split_equal_to_row_size));
+-                              break;
+-                      case 20:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+-                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+-                                               
TILE_SPLIT(split_equal_to_row_size));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 21:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 22:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
++                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 23:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 24:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++                                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_8_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 25:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
+-                              break;
+-                      case 26:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
+-                              break;
+-                      case 27:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
+-                              break;
+-                      case 28:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
+-                              break;
+-                      case 29:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
++                              gb_tile_moden = 
(ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++                                               
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++                                               
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+-                                               
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+-                                               
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
+-                              break;
+-                      case 30:
+-                              gb_tile_moden = 
(MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+-                                               
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+-                                               PIPE_CONFIG(ADDR_SURF_P2) |
+-                                               
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
++                                               NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+-                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+-                                               NUM_BANKS(ADDR_SURF_4_BANK));
++                                               
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       default:
+-                              continue;
++                              gb_tile_moden = 0;
++                              break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = 
gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 7abda94fc2cf..3bedcf7ddd2a 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -113,7 +113,11 @@ struct ast_private {
+       struct ttm_bo_kmap_obj cache_kmap;
+       int next_cursor;
+       bool support_wide_screen;
+-      bool DisableP2A;
++      enum {
++              ast_use_p2a,
++              ast_use_dt,
++              ast_use_defaults
++      } config_mode;
+ 
+       enum ast_tx_chip tx_chip_type;
+       u8 dp501_maxclk;
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 533e762d036d..fb9976254224 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+       return ret;
+ }
+ 
++static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
++{
++      struct device_node *np = dev->pdev->dev.of_node;
++      struct ast_private *ast = dev->dev_private;
++      uint32_t data, jregd0, jregd1;
++
++      /* Defaults */
++      ast->config_mode = ast_use_defaults;
++      *scu_rev = 0xffffffff;
++
++      /* Check if we have device-tree properties */
++      if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
++                                      scu_rev)) {
++              /* We do, disable P2A access */
++              ast->config_mode = ast_use_dt;
++              DRM_INFO("Using device-tree for configuration\n");
++              return;
++      }
++
++      /* Not all families have a P2A bridge */
++      if (dev->pdev->device != PCI_CHIP_AST2000)
++              return;
++
++      /*
++       * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
++       * is disabled. We force using P2A if VGA only mode bit
++       * is set D[7]
++       */
++      jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
++      jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
++      if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
++              /* Double check it's actually working */
++              data = ast_read32(ast, 0xf004);
++              if (data != 0xFFFFFFFF) {
++                      /* P2A works, grab silicon revision */
++                      ast->config_mode = ast_use_p2a;
++
++                      DRM_INFO("Using P2A bridge for configuration\n");
++
++                      /* Read SCU7c (silicon revision register) */
++                      ast_write32(ast, 0xf004, 0x1e6e0000);
++                      ast_write32(ast, 0xf000, 0x1);
++                      *scu_rev = ast_read32(ast, 0x1207c);
++                      return;
++              }
++      }
++
++      /* We have a P2A bridge but it's disabled */
++      DRM_INFO("P2A bridge disabled, using default configuration\n");
++}
+ 
+ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ {
+       struct ast_private *ast = dev->dev_private;
+-      uint32_t data, jreg;
++      uint32_t jreg, scu_rev;
++
++      /*
++       * If VGA isn't enabled, we need to enable now or subsequent
++       * access to the scratch registers will fail. We also inform
++       * our caller that it needs to POST the chip
++       * (Assumption: VGA not enabled -> need to POST)
++       */
++      if (!ast_is_vga_enabled(dev)) {
++              ast_enable_vga(dev);
++              DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
++              *need_post = true;
++      } else
++              *need_post = false;
++
++
++      /* Enable extended register access */
++      ast_enable_mmio(dev);
+       ast_open_key(ast);
+ 
++      /* Find out whether P2A works or whether to use device-tree */
++      ast_detect_config_mode(dev, &scu_rev);
++
++      /* Identify chipset */
+       if (dev->pdev->device == PCI_CHIP_AST1180) {
+               ast->chip = AST1100;
+               DRM_INFO("AST 1180 detected\n");
+@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool 
*need_post)
+                       ast->chip = AST2300;
+                       DRM_INFO("AST 2300 detected\n");
+               } else if (dev->pdev->revision >= 0x10) {
+-                      uint32_t data;
+-                      ast_write32(ast, 0xf004, 0x1e6e0000);
+-                      ast_write32(ast, 0xf000, 0x1);
+-
+-                      data = ast_read32(ast, 0x1207c);
+-                      switch (data & 0x0300) {
++                      switch (scu_rev & 0x0300) {
+                       case 0x0200:
+                               ast->chip = AST1100;
+                               DRM_INFO("AST 1100 detected\n");
+@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool 
*need_post)
+               }
+       }
+ 
+-      /*
+-       * If VGA isn't enabled, we need to enable now or subsequent
+-       * access to the scratch registers will fail. We also inform
+-       * our caller that it needs to POST the chip
+-       * (Assumption: VGA not enabled -> need to POST)
+-       */
+-      if (!ast_is_vga_enabled(dev)) {
+-              ast_enable_vga(dev);
+-              ast_enable_mmio(dev);
+-              DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+-              *need_post = true;
+-      } else
+-              *need_post = false;
+-
+-      /* Check P2A Access */
+-      ast->DisableP2A = true;
+-      data = ast_read32(ast, 0xf004);
+-      if (data != 0xFFFFFFFF)
+-              ast->DisableP2A = false;
+-
+       /* Check if we support wide screen */
+       switch (ast->chip) {
+       case AST1180:
+@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool 
*need_post)
+                       ast->support_wide_screen = true;
+               else {
+                       ast->support_wide_screen = false;
+-                      if (ast->DisableP2A == false) {
+-                              /* Read SCU7c (silicon revision register) */
+-                              ast_write32(ast, 0xf004, 0x1e6e0000);
+-                              ast_write32(ast, 0xf000, 0x1);
+-                              data = ast_read32(ast, 0x1207c);
+-                              data &= 0x300;
+-                              if (ast->chip == AST2300 && data == 0x0) /* 
ast1300 */
+-                                      ast->support_wide_screen = true;
+-                              if (ast->chip == AST2400 && data == 0x100) /* 
ast1400 */
+-                                      ast->support_wide_screen = true;
+-                      }
++                      if (ast->chip == AST2300 &&
++                          (scu_rev & 0x300) == 0x0) /* ast1300 */
++                              ast->support_wide_screen = true;
++                      if (ast->chip == AST2400 &&
++                          (scu_rev & 0x300) == 0x100) /* ast1400 */
++                              ast->support_wide_screen = true;
+               }
+               break;
+       }
+@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool 
*need_post)
+ 
+ static int ast_get_dram_info(struct drm_device *dev)
+ {
++      struct device_node *np = dev->pdev->dev.of_node;
+       struct ast_private *ast = dev->dev_private;
+-      uint32_t data, data2;
+-      uint32_t denum, num, div, ref_pll;
++      uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
++      uint32_t denum, num, div, ref_pll, dsel;
+ 
+-      if (ast->DisableP2A)
+-      {
++      switch (ast->config_mode) {
++      case ast_use_dt:
++              /*
++               * If some properties are missing, use reasonable
++               * defaults for AST2400
++               */
++              if (of_property_read_u32(np, "aspeed,mcr-configuration",
++                                       &mcr_cfg))
++                      mcr_cfg = 0x00000577;
++              if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
++                                       &mcr_scu_mpll))
++                      mcr_scu_mpll = 0x000050C0;
++              if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
++                                       &mcr_scu_strap))
++                      mcr_scu_strap = 0;
++              break;
++      case ast_use_p2a:
++              ast_write32(ast, 0xf004, 0x1e6e0000);
++              ast_write32(ast, 0xf000, 0x1);
++              mcr_cfg = ast_read32(ast, 0x10004);
++              mcr_scu_mpll = ast_read32(ast, 0x10120);
++              mcr_scu_strap = ast_read32(ast, 0x10170);
++              break;
++      case ast_use_defaults:
++      default:
+               ast->dram_bus_width = 16;
+               ast->dram_type = AST_DRAM_1Gx16;
+               ast->mclk = 396;
++              return 0;
+       }
+-      else
+-      {
+-              ast_write32(ast, 0xf004, 0x1e6e0000);
+-              ast_write32(ast, 0xf000, 0x1);
+-              data = ast_read32(ast, 0x10004);
+-
+-              if (data & 0x40)
+-                      ast->dram_bus_width = 16;
+-              else
+-                      ast->dram_bus_width = 32;
+ 
+-              if (ast->chip == AST2300 || ast->chip == AST2400) {
+-                      switch (data & 0x03) {
+-                      case 0:
+-                              ast->dram_type = AST_DRAM_512Mx16;
+-                              break;
+-                      default:
+-                      case 1:
+-                              ast->dram_type = AST_DRAM_1Gx16;
+-                              break;
+-                      case 2:
+-                              ast->dram_type = AST_DRAM_2Gx16;
+-                              break;
+-                      case 3:
+-                              ast->dram_type = AST_DRAM_4Gx16;
+-                              break;
+-                      }
+-              } else {
+-                      switch (data & 0x0c) {
+-                      case 0:
+-                      case 4:
+-                              ast->dram_type = AST_DRAM_512Mx16;
+-                              break;
+-                      case 8:
+-                              if (data & 0x40)
+-                                      ast->dram_type = AST_DRAM_1Gx16;
+-                              else
+-                                      ast->dram_type = AST_DRAM_512Mx32;
+-                              break;
+-                      case 0xc:
+-                              ast->dram_type = AST_DRAM_1Gx32;
+-                              break;
+-                      }
+-              }
++      if (mcr_cfg & 0x40)
++              ast->dram_bus_width = 16;
++      else
++              ast->dram_bus_width = 32;
+ 
+-              data = ast_read32(ast, 0x10120);
+-              data2 = ast_read32(ast, 0x10170);
+-              if (data2 & 0x2000)
+-                      ref_pll = 14318;
+-              else
+-                      ref_pll = 12000;
+-
+-              denum = data & 0x1f;
+-              num = (data & 0x3fe0) >> 5;
+-              data = (data & 0xc000) >> 14;
+-              switch (data) {
+-              case 3:
+-                      div = 0x4;
++      if (ast->chip == AST2300 || ast->chip == AST2400) {
++              switch (mcr_cfg & 0x03) {
++              case 0:
++                      ast->dram_type = AST_DRAM_512Mx16;
+                       break;
+-              case 2:
++              default:
+               case 1:
+-                      div = 0x2;
++                      ast->dram_type = AST_DRAM_1Gx16;
+                       break;
+-              default:
+-                      div = 0x1;
++              case 2:
++                      ast->dram_type = AST_DRAM_2Gx16;
++                      break;
++              case 3:
++                      ast->dram_type = AST_DRAM_4Gx16;
++                      break;
++              }
++      } else {
++              switch (mcr_cfg & 0x0c) {
++              case 0:
++              case 4:
++                      ast->dram_type = AST_DRAM_512Mx16;
++                      break;
++              case 8:
++                      if (mcr_cfg & 0x40)
++                              ast->dram_type = AST_DRAM_1Gx16;
++                      else
++                              ast->dram_type = AST_DRAM_512Mx32;
++                      break;
++              case 0xc:
++                      ast->dram_type = AST_DRAM_1Gx32;
+                       break;
+               }
+-              ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+       }
++
++      if (mcr_scu_strap & 0x2000)
++              ref_pll = 14318;
++      else
++              ref_pll = 12000;
++
++      denum = mcr_scu_mpll & 0x1f;
++      num = (mcr_scu_mpll & 0x3fe0) >> 5;
++      dsel = (mcr_scu_mpll & 0xc000) >> 14;
++      switch (dsel) {
++      case 3:
++              div = 0x4;
++              break;
++      case 2:
++      case 1:
++              div = 0x2;
++              break;
++      default:
++              div = 0x1;
++              break;
++      }
++      ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index 5331ee1df086..c7c58becb25d 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev)
+               /* TODO 1180 */
+       } else {
+               ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
+-              if (ch) {
+-                      ast_open_key(ast);
+-                      ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 
0xb6, 0xff);
+-                      return ch & 0x04;
+-              }
++              return !!(ch & 0x01);
+       }
+-      return 0;
++      return false;
+ }
+ 
+ static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+@@ -375,21 +371,18 @@ void ast_post_gpu(struct drm_device *dev)
+       pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+ 
+       ast_enable_vga(dev);
+-      ast_enable_mmio(dev);
+       ast_open_key(ast);
++      ast_enable_mmio(dev);
+       ast_set_def_ext_reg(dev);
+ 
+-      if (ast->DisableP2A == false)
+-      {
++      if (ast->config_mode == ast_use_p2a) {
+               if (ast->chip == AST2300 || ast->chip == AST2400)
+                       ast_init_dram_2300(dev);
+               else
+                       ast_init_dram_reg(dev);
+ 
+               ast_init_3rdtx(dev);
+-      }
+-      else
+-      {
++      } else {
+               if (ast->tx_chip_type != AST_TX_NONE)
+                       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 
0xcf, 0x80);        /* Enable DVO */
+       }
+@@ -1638,12 +1631,44 @@ static void ast_init_dram_2300(struct drm_device *dev)
+               temp |= 0x73;
+               ast_write32(ast, 0x12008, temp);
+ 
++              param.dram_freq = 396;
+               param.dram_type = AST_DDR3;
++              temp = ast_mindwm(ast, 0x1e6e2070);
+               if (temp & 0x01000000)
+                       param.dram_type = AST_DDR2;
+-              param.dram_chipid = ast->dram_type;
+-              param.dram_freq = ast->mclk;
+-              param.vram_size = ast->vram_size;
++                switch (temp & 0x18000000) {
++              case 0:
++                      param.dram_chipid = AST_DRAM_512Mx16;
++                      break;
++              default:
++              case 0x08000000:
++                      param.dram_chipid = AST_DRAM_1Gx16;
++                      break;
++              case 0x10000000:
++                      param.dram_chipid = AST_DRAM_2Gx16;
++                      break;
++              case 0x18000000:
++                      param.dram_chipid = AST_DRAM_4Gx16;
++                      break;
++              }
++                switch (temp & 0x0c) {
++                default:
++              case 0x00:
++                      param.vram_size = AST_VIDMEM_SIZE_8M;
++                      break;
++
++              case 0x04:
++                      param.vram_size = AST_VIDMEM_SIZE_16M;
++                      break;
++
++              case 0x08:
++                      param.vram_size = AST_VIDMEM_SIZE_32M;
++                      break;
++
++              case 0x0c:
++                      param.vram_size = AST_VIDMEM_SIZE_64M;
++                      break;
++              }
+ 
+               if (param.dram_type == AST_DDR3) {
+                       get_ddr3_info(ast, &param);
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c 
b/drivers/gpu/drm/drm_atomic_helper.c
+index 4594477dee00..55e7372ea0a0 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -362,7 +362,7 @@ mode_fixup(struct drm_atomic_state *state)
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
+       int i;
+-      bool ret;
++      int ret;
+ 
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (!crtc_state->mode_changed &&
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 336be31ff3de..ec6474b01dbc 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -145,6 +145,9 @@ static struct edid_quirk {
+ 
+       /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+       { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
++
++      /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
++      { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index e934b541feea..ad531126667c 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -856,6 +856,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+       if (!drm_fbdev_emulation)
+               return;
+ 
++      cancel_work_sync(&fb_helper->resume_work);
++      cancel_work_sync(&fb_helper->dirty_work);
++
+       mutex_lock(&kernel_fb_helper_lock);
+       if (!list_empty(&fb_helper->kernel_fb_list)) {
+               list_del(&fb_helper->kernel_fb_list);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 24b5b046754b..7f4a54b94447 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object 
*resv,
+                       timeout = i915_gem_object_wait_fence(shared[i],
+                                                            flags, timeout,
+                                                            rps);
+-                      if (timeout <= 0)
++                      if (timeout < 0)
+                               break;
+ 
+                       dma_fence_put(shared[i]);
+@@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object 
*resv,
+               excl = reservation_object_get_excl_rcu(resv);
+       }
+ 
+-      if (excl && timeout > 0)
++      if (excl && timeout >= 0)
+               timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+ 
+       dma_fence_put(excl);
+diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c 
b/drivers/gpu/drm/i915/i915_gem_internal.c
+index d09c74973cb3..f7c4376d1136 100644
+--- a/drivers/gpu/drm/i915/i915_gem_internal.c
++++ b/drivers/gpu/drm/i915/i915_gem_internal.c
+@@ -46,24 +46,12 @@ static struct sg_table *
+ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+ {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+-      unsigned int npages = obj->base.size / PAGE_SIZE;
+       struct sg_table *st;
+       struct scatterlist *sg;
++      unsigned int npages;
+       int max_order;
+       gfp_t gfp;
+ 
+-      st = kmalloc(sizeof(*st), GFP_KERNEL);
+-      if (!st)
+-              return ERR_PTR(-ENOMEM);
+-
+-      if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+-              kfree(st);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      sg = st->sgl;
+-      st->nents = 0;
+-
+       max_order = MAX_ORDER;
+ #ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+@@ -85,6 +73,20 @@ i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
+               gfp |= __GFP_DMA32;
+       }
+ 
++create_st:
++      st = kmalloc(sizeof(*st), GFP_KERNEL);
++      if (!st)
++              return ERR_PTR(-ENOMEM);
++
++      npages = obj->base.size / PAGE_SIZE;
++      if (sg_alloc_table(st, npages, GFP_KERNEL)) {
++              kfree(st);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      sg = st->sgl;
++      st->nents = 0;
++
+       do {
+               int order = min(fls(npages) - 1, max_order);
+               struct page *page;
+@@ -112,8 +114,15 @@ i915_gem_object_get_pages_internal(struct 
drm_i915_gem_object *obj)
+               sg = __sg_next(sg);
+       } while (1);
+ 
+-      if (i915_gem_gtt_prepare_pages(obj, st))
++      if (i915_gem_gtt_prepare_pages(obj, st)) {
++              /* Failed to dma-map try again with single page sg segments */
++              if (get_order(st->sgl->length)) {
++                      internal_free_pages(st);
++                      max_order = 0;
++                      goto create_st;
++              }
+               goto err;
++      }
+ 
+       /* Mark the pages as dontneed whilst they are still pinned. As soon
+        * as they are unpinned they are allowed to be reaped by the shrinker,
+diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
+index b8f403faadbb..d7958dc26d21 100644
+--- a/drivers/gpu/drm/i915/i915_gem_request.c
++++ b/drivers/gpu/drm/i915/i915_gem_request.c
+@@ -1011,8 +1011,13 @@ __i915_request_wait_for_execute(struct 
drm_i915_gem_request *request,
+                       break;
+               }
+ 
++              if (!timeout) {
++                      timeout = -ETIME;
++                      break;
++              }
++
+               timeout = io_schedule_timeout(timeout);
+-      } while (timeout);
++      } while (1);
+       finish_wait(&request->execute.wait, &wait);
+ 
+       if (flags & I915_WAIT_LOCKED)
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index abc78bbfc1dc..7325230fff02 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -414,6 +414,11 @@ int i915_gem_init_stolen(struct drm_i915_private 
*dev_priv)
+ 
+       mutex_init(&dev_priv->mm.stolen_lock);
+ 
++      if (intel_vgpu_active(dev_priv)) {
++              DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
++              return 0;
++      }
++
+ #ifdef CONFIG_INTEL_IOMMU
+       if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
+               DRM_INFO("DMAR active, disabling use of stolen memory\n");
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 0b8e8eb85c19..4daf7dda9cca 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2820,6 +2820,9 @@ static void vlv_detach_power_sequencer(struct intel_dp 
*intel_dp)
+       enum pipe pipe = intel_dp->pps_pipe;
+       i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+ 
++      if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
++              return;
++
+       edp_panel_vdd_off_sync(intel_dp);
+ 
+       /*
+@@ -2847,9 +2850,6 @@ static void vlv_steal_power_sequencer(struct drm_device 
*dev,
+ 
+       lockdep_assert_held(&dev_priv->pps_mutex);
+ 
+-      if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+-              return;
+-
+       for_each_intel_encoder(dev, encoder) {
+               struct intel_dp *intel_dp;
+               enum port port;
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c 
b/drivers/gpu/drm/i915/intel_opregion.c
+index f4429f67a4e3..4a862a358c70 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private 
*dev_priv)
+                       opregion->vbt_size = vbt_size;
+               } else {
+                       vbt = base + OPREGION_VBT_OFFSET;
+-                      vbt_size = OPREGION_ASLE_EXT_OFFSET - 
OPREGION_VBT_OFFSET;
++                      /*
++                       * The VBT specification says that if the ASLE ext
++                       * mailbox is not used its area is reserved, but
++                       * on some CHT boards the VBT extends into the
++                       * ASLE ext area. Allow this even though it is
++                       * against the spec, so we do not end up rejecting
++                       * the VBT on those boards (and end up not finding the
++                       * LCD panel because of this).
++                       */
++                      vbt_size = (mboxes & MBOX_ASLE_EXT) ?
++                              OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
++                      vbt_size -= OPREGION_VBT_OFFSET;
+                       if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+                               DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion 
(Mailbox #4)\n");
+                               opregion->vbt = vbt;
+diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
+index 3b602ee33c44..0c6bf12d45b1 100644
+--- a/drivers/gpu/drm/imx/imx-tve.c
++++ b/drivers/gpu/drm/imx/imx-tve.c
+@@ -98,6 +98,8 @@
+ /* TVE_TST_MODE_REG */
+ #define TVE_TVDAC_TEST_MODE_MASK      (0x7 << 0)
+ 
++#define IMX_TVE_DAC_VOLTAGE   2750000
++
+ enum {
+       TVE_MODE_TVOUT,
+       TVE_MODE_VGA,
+@@ -621,9 +623,8 @@ static int imx_tve_bind(struct device *dev, struct device 
*master, void *data)
+ 
+       tve->dac_reg = devm_regulator_get(dev, "dac");
+       if (!IS_ERR(tve->dac_reg)) {
+-              ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+-              if (ret)
+-                      return ret;
++              if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE)
++                      dev_warn(dev, "dac voltage is not %d uV\n", 
IMX_TVE_DAC_VOLTAGE);
+               ret = regulator_enable(tve->dac_reg);
+               if (ret)
+                       return ret;
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c 
b/drivers/gpu/drm/radeon/radeon_bios.c
+index c829cfb02fc4..00cfb5d2875f 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -596,52 +596,58 @@ static bool radeon_read_disabled_bios(struct 
radeon_device *rdev)
+ #ifdef CONFIG_ACPI
+ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ {
+-      bool ret = false;
+       struct acpi_table_header *hdr;
+       acpi_size tbl_size;
+       UEFI_ACPI_VFCT *vfct;
+-      GOP_VBIOS_CONTENT *vbios;
+-      VFCT_IMAGE_HEADER *vhdr;
++      unsigned offset;
+ 
+       if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+               return false;
+       tbl_size = hdr->length;
+       if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+               DRM_ERROR("ACPI VFCT table present but broken (too short 
#1)\n");
+-              goto out_unmap;
++              return false;
+       }
+ 
+       vfct = (UEFI_ACPI_VFCT *)hdr;
+-      if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+-              DRM_ERROR("ACPI VFCT table present but broken (too short 
#2)\n");
+-              goto out_unmap;
+-      }
++      offset = vfct->VBIOSImageOffset;
+ 
+-      vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+-      vhdr = &vbios->VbiosHeader;
+-      DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size 
%d\n",
+-                      vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+-                      vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+-
+-      if (vhdr->PCIBus != rdev->pdev->bus->number ||
+-          vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+-          vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+-          vhdr->VendorID != rdev->pdev->vendor ||
+-          vhdr->DeviceID != rdev->pdev->device) {
+-              DRM_INFO("ACPI VFCT table is not for this card\n");
+-              goto out_unmap;
+-      }
++      while (offset < tbl_size) {
++              GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + 
offset);
++              VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
+ 
+-      if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + 
vhdr->ImageLength > tbl_size) {
+-              DRM_ERROR("ACPI VFCT image truncated\n");
+-              goto out_unmap;
+-      }
++              offset += sizeof(VFCT_IMAGE_HEADER);
++              if (offset > tbl_size) {
++                      DRM_ERROR("ACPI VFCT image header truncated\n");
++                      return false;
++              }
+ 
+-      rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, 
GFP_KERNEL);
+-      ret = !!rdev->bios;
++              offset += vhdr->ImageLength;
++              if (offset > tbl_size) {
++                      DRM_ERROR("ACPI VFCT image truncated\n");
++                      return false;
++              }
++
++              if (vhdr->ImageLength &&
++                  vhdr->PCIBus == rdev->pdev->bus->number &&
++                  vhdr->PCIDevice == PCI_SLOT(rdev->pdev->devfn) &&
++                  vhdr->PCIFunction == PCI_FUNC(rdev->pdev->devfn) &&
++                  vhdr->VendorID == rdev->pdev->vendor &&
++                  vhdr->DeviceID == rdev->pdev->device) {
++                      rdev->bios = kmemdup(&vbios->VbiosContent,
++                                           vhdr->ImageLength,
++                                           GFP_KERNEL);
++
++                      if (!rdev->bios) {
++                              kfree(rdev->bios);
++                              return false;
++                      }
++                      return true;
++              }
++      }
+ 
+-out_unmap:
+-      return ret;
++      DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
++      return false;
+ }
+ #else
+ static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index d5063618efa7..86e3b233b722 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1670,7 +1670,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+       struct ttm_buffer_object *bo;
+       int ret = -EBUSY;
+       int put_count;
+-      uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+ 
+       spin_lock(&glob->lru_lock);
+       list_for_each_entry(bo, &glob->swap_lru, swap) {
+@@ -1701,7 +1700,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+        * Move to system cached
+        */
+ 
+-      if ((bo->mem.placement & swap_placement) != swap_placement) {
++      if (bo->mem.mem_type != TTM_PL_SYSTEM ||
++          bo->ttm->caching_state != tt_cached) {
+               struct ttm_mem_reg evict_mem;
+ 
+               evict_mem = bo->mem;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 18061a4bc2f2..36005bdf3749 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -199,9 +199,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
+       VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+                     vmw_present_readback_ioctl,
+                     DRM_MASTER | DRM_AUTH),
++      /*
++       * The permissions of the below ioctl are overridden in
++       * vmw_generic_ioctl(). We require either
++       * DRM_MASTER or capable(CAP_SYS_ADMIN).
++       */
+       VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+                     vmw_kms_update_layout_ioctl,
+-                    DRM_MASTER | DRM_CONTROL_ALLOW),
++                    DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+                     vmw_shader_define_ioctl,
+                     DRM_AUTH | DRM_RENDER_ALLOW),
+@@ -1125,6 +1130,10 @@ static long vmw_generic_ioctl(struct file *filp, 
unsigned int cmd,
+ 
+                       return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+                                                       _IOC_SIZE(cmd));
++              } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
++                      if (!drm_is_current_master(file_priv) &&
++                          !capable(CAP_SYS_ADMIN))
++                              return -EACCES;
+               }
+ 
+               if (unlikely(ioctl->cmd != cmd))
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 1e59a486bba8..59ff4197173a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -41,9 +41,9 @@
+ #include <drm/ttm/ttm_module.h>
+ #include "vmwgfx_fence.h"
+ 
+-#define VMWGFX_DRIVER_DATE "20160210"
++#define VMWGFX_DRIVER_DATE "20170221"
+ #define VMWGFX_DRIVER_MAJOR 2
+-#define VMWGFX_DRIVER_MINOR 11
++#define VMWGFX_DRIVER_MINOR 12
+ #define VMWGFX_DRIVER_PATCHLEVEL 0
+ #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+ #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index fbd8ce6d7ff3..27228fe57eca 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -220,7 +220,7 @@ int hv_init(void)
+       /* See if the hypercall page is already set */
+       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ 
+-      virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
++      virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
+ 
+       if (!virtaddr)
+               goto cleanup;
+diff --git a/drivers/infiniband/hw/mlx5/srq.c 
b/drivers/infiniband/hw/mlx5/srq.c
+index 6f4397ee1ed6..7cb145f9a6db 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, 
struct mlx5_ib_srq *srq,
+       int err;
+       int i;
+       struct mlx5_wqe_srq_next_seg *next;
+-      int page_shift;
+-      int npages;
+ 
+       err = mlx5_db_alloc(dev->mdev, &srq->db);
+       if (err) {
+@@ -179,7 +177,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, 
struct mlx5_ib_srq *srq,
+               err = -ENOMEM;
+               goto err_db;
+       }
+-      page_shift = srq->buf.page_shift;
+ 
+       srq->head    = 0;
+       srq->tail    = srq->msrq.max - 1;
+@@ -191,10 +188,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, 
struct mlx5_ib_srq *srq,
+                       cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+       }
+ 
+-      npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
+-      mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages 
%d\n",
+-                  buf_size, page_shift, srq->buf.npages, npages);
+-      in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
++      mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
++      in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages);
+       if (!in->pas) {
+               err = -ENOMEM;
+               goto err_buf;
+@@ -208,7 +203,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, 
struct mlx5_ib_srq *srq,
+       }
+       srq->wq_sig = !!srq_signature;
+ 
+-      in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
++      in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+       if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+           in->type == IB_SRQT_XRC)
+               in->user_index = MLX5_IB_DEFAULT_UIDX;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c 
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 096c4f6fbd65..1c7a9a16efc7 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1507,12 +1507,14 @@ static ssize_t set_mode(struct device *d, struct 
device_attribute *attr,
+ 
+       ret = ipoib_set_mode(dev, buf);
+ 
+-      rtnl_unlock();
+-
+-      if (!ret)
+-              return count;
++      /* The assumption is that the function ipoib_set_mode returned
++       * with the rtnl held by it, if not the value -EBUSY returned,
++       * then no need to rtnl_unlock
++       */
++      if (ret != -EBUSY)
++              rtnl_unlock();
+ 
+-      return ret;
++      return (!ret || ret == -EBUSY) ? count : ret;
+ }
+ 
+ static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c 
b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 3ce0765a05ab..4584c03bc355 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -481,8 +481,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
+               priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+ 
+               ipoib_flush_paths(dev);
+-              rtnl_lock();
+-              return 0;
++              return (!rtnl_trylock()) ? -EBUSY : 0;
+       }
+ 
+       if (!strcmp(buf, "datagram\n")) {
+@@ -491,8 +490,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
+               dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+               rtnl_unlock();
+               ipoib_flush_paths(dev);
+-              rtnl_lock();
+-              return 0;
++              return (!rtnl_trylock()) ? -EBUSY : 0;
+       }
+ 
+       return -EINVAL;
+@@ -716,6 +714,14 @@ int ipoib_check_sm_sendonly_fullmember_support(struct 
ipoib_dev_priv *priv)
+       return ret;
+ }
+ 
++static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
++{
++      struct ipoib_pseudo_header *phdr;
++
++      phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
++      memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
++}
++
+ void ipoib_flush_paths(struct net_device *dev)
+ {
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -940,8 +946,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
+                       }
+                       if (skb_queue_len(&neigh->queue) <
+                           IPOIB_MAX_PATH_REC_QUEUE) {
+-                              /* put pseudoheader back on for next time */
+-                              skb_push(skb, IPOIB_PSEUDO_LEN);
++                              push_pseudo_header(skb, neigh->daddr);
+                               __skb_queue_tail(&neigh->queue, skb);
+                       } else {
+                               ipoib_warn(priv, "queue length limit %d. Packet 
drop.\n",
+@@ -959,10 +964,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 
*daddr,
+ 
+               if (!path->query && path_rec_start(dev, path))
+                       goto err_path;
+-              if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
++              if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++                      push_pseudo_header(skb, neigh->daddr);
+                       __skb_queue_tail(&neigh->queue, skb);
+-              else
++              } else {
+                       goto err_drop;
++              }
+       }
+ 
+       spin_unlock_irqrestore(&priv->lock, flags);
+@@ -998,8 +1005,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct 
net_device *dev,
+               }
+               if (path) {
+                       if (skb_queue_len(&path->queue) < 
IPOIB_MAX_PATH_REC_QUEUE) {
+-                              /* put pseudoheader back on for next time */
+-                              skb_push(skb, IPOIB_PSEUDO_LEN);
++                              push_pseudo_header(skb, phdr->hwaddr);
+                               __skb_queue_tail(&path->queue, skb);
+                       } else {
+                               ++dev->stats.tx_dropped;
+@@ -1031,8 +1037,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct 
net_device *dev,
+               return;
+       } else if ((path->query || !path_rec_start(dev, path)) &&
+                  skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+-              /* put pseudoheader back on for next time */
+-              skb_push(skb, IPOIB_PSEUDO_LEN);
++              push_pseudo_header(skb, phdr->hwaddr);
+               __skb_queue_tail(&path->queue, skb);
+       } else {
+               ++dev->stats.tx_dropped;
+@@ -1113,8 +1118,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
+       }
+ 
+       if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+-              /* put pseudoheader back on for next time */
+-              skb_push(skb, sizeof(*phdr));
++              push_pseudo_header(skb, phdr->hwaddr);
+               spin_lock_irqsave(&priv->lock, flags);
+               __skb_queue_tail(&neigh->queue, skb);
+               spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1146,7 +1150,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
+                            unsigned short type,
+                            const void *daddr, const void *saddr, unsigned len)
+ {
+-      struct ipoib_pseudo_header *phdr;
+       struct ipoib_header *header;
+ 
+       header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+@@ -1159,8 +1162,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
+        * destination address into skb hard header so we can figure out where
+        * to send the packet later.
+        */
+-      phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+-      memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
++      push_pseudo_header(skb, daddr);
+ 
+       return IPOIB_HARD_LEN;
+ }
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c 
b/drivers/infiniband/ulp/srp/ib_srp.c
+index 79bf48477ddb..d9b57f5958b5 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -371,7 +371,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct 
ib_device *device,
+       struct srp_fr_desc *d;
+       struct ib_mr *mr;
+       int i, ret = -EINVAL;
+-      enum ib_mr_type mr_type;
+ 
+       if (pool_size <= 0)
+               goto err;
+@@ -385,13 +384,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct 
ib_device *device,
+       spin_lock_init(&pool->lock);
+       INIT_LIST_HEAD(&pool->free_list);
+ 
+-      if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+-              mr_type = IB_MR_TYPE_SG_GAPS;
+-      else
+-              mr_type = IB_MR_TYPE_MEM_REG;
+-
+       for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
+-              mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
++              mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
++                               max_page_list_len);
+               if (IS_ERR(mr)) {
+                       ret = PTR_ERR(mr);
+                       if (ret == -ENOMEM)
+@@ -1889,17 +1884,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, 
struct srp_rsp *rsp)
+       if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
+               spin_lock_irqsave(&ch->lock, flags);
+               ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
++              if (rsp->tag == ch->tsk_mgmt_tag) {
++                      ch->tsk_mgmt_status = -1;
++                      if (be32_to_cpu(rsp->resp_data_len) >= 4)
++                              ch->tsk_mgmt_status = rsp->data[3];
++                      complete(&ch->tsk_mgmt_done);
++              } else {
++                      shost_printk(KERN_ERR, target->scsi_host,
++                                   "Received tsk mgmt response too late for 
tag %#llx\n",
++                                   rsp->tag);
++              }
+               spin_unlock_irqrestore(&ch->lock, flags);
+-
+-              ch->tsk_mgmt_status = -1;
+-              if (be32_to_cpu(rsp->resp_data_len) >= 4)
+-                      ch->tsk_mgmt_status = rsp->data[3];
+-              complete(&ch->tsk_mgmt_done);
+       } else {
+               scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
+-              if (scmnd) {
++              if (scmnd && scmnd->host_scribble) {
+                       req = (void *)scmnd->host_scribble;
+                       scmnd = srp_claim_req(ch, req, NULL, scmnd);
++              } else {
++                      scmnd = NULL;
+               }
+               if (!scmnd) {
+                       shost_printk(KERN_ERR, target->scsi_host,
+@@ -2531,19 +2533,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int 
qdepth)
+ }
+ 
+ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
+-                           u8 func)
++                           u8 func, u8 *status)
+ {
+       struct srp_target_port *target = ch->target;
+       struct srp_rport *rport = target->rport;
+       struct ib_device *dev = target->srp_host->srp_dev->dev;
+       struct srp_iu *iu;
+       struct srp_tsk_mgmt *tsk_mgmt;
++      int res;
+ 
+       if (!ch->connected || target->qp_in_error)
+               return -1;
+ 
+-      init_completion(&ch->tsk_mgmt_done);
+-
+       /*
+        * Lock the rport mutex to avoid that srp_create_ch_ib() is
+        * invoked while a task management function is being sent.
+@@ -2566,10 +2567,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, 
u64 req_tag, u64 lun,
+ 
+       tsk_mgmt->opcode        = SRP_TSK_MGMT;
+       int_to_scsilun(lun, &tsk_mgmt->lun);
+-      tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
+       tsk_mgmt->tsk_mgmt_func = func;
+       tsk_mgmt->task_tag      = req_tag;
+ 
++      spin_lock_irq(&ch->lock);
++      ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
++      tsk_mgmt->tag = ch->tsk_mgmt_tag;
++      spin_unlock_irq(&ch->lock);
++
++      init_completion(&ch->tsk_mgmt_done);
++
+       ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+                                     DMA_TO_DEVICE);
+       if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
+@@ -2578,13 +2585,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, 
u64 req_tag, u64 lun,
+ 
+               return -1;
+       }
++      res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
++                                      msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
++      if (res > 0 && status)
++              *status = ch->tsk_mgmt_status;
+       mutex_unlock(&rport->mutex);
+ 
+-      if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
+-                                       
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+-              return -1;
++      WARN_ON_ONCE(res < 0);
+ 
+-      return 0;
++      return res > 0 ? 0 : -1;
+ }
+ 
+ static int srp_abort(struct scsi_cmnd *scmnd)
+@@ -2610,7 +2619,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+       shost_printk(KERN_ERR, target->scsi_host,
+                    "Sending SRP abort for tag %#x\n", tag);
+       if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+-                            SRP_TSK_ABORT_TASK) == 0)
++                            SRP_TSK_ABORT_TASK, NULL) == 0)
+               ret = SUCCESS;
+       else if (target->rport->state == SRP_RPORT_LOST)
+               ret = FAST_IO_FAIL;
+@@ -2628,14 +2637,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
+       struct srp_target_port *target = host_to_target(scmnd->device->host);
+       struct srp_rdma_ch *ch;
+       int i;
++      u8 status;
+ 
+       shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
+ 
+       ch = &target->ch[0];
+       if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
+-                            SRP_TSK_LUN_RESET))
++                            SRP_TSK_LUN_RESET, &status))
+               return FAILED;
+-      if (ch->tsk_mgmt_status)
++      if (status)
+               return FAILED;
+ 
+       for (i = 0; i < target->ch_count; i++) {
+@@ -2664,9 +2674,8 @@ static int srp_slave_alloc(struct scsi_device *sdev)
+       struct Scsi_Host *shost = sdev->host;
+       struct srp_target_port *target = host_to_target(shost);
+       struct srp_device *srp_dev = target->srp_host->srp_dev;
+-      struct ib_device *ibdev = srp_dev->dev;
+ 
+-      if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
++      if (true)
+               blk_queue_virt_boundary(sdev->request_queue,
+                                       ~srp_dev->mr_page_mask);
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h 
b/drivers/infiniband/ulp/srp/ib_srp.h
+index 21c69695f9d4..32ed40db3ca2 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -163,6 +163,7 @@ struct srp_rdma_ch {
+       int                     max_ti_iu_len;
+       int                     comp_vector;
+ 
++      u64                     tsk_mgmt_tag;
+       struct completion       tsk_mgmt_done;
+       u8                      tsk_mgmt_status;
+       bool                    connected;
+diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
+index 047d6fcdcec2..1eaaa2be8ff2 100644
+--- a/drivers/memory/atmel-ebi.c
++++ b/drivers/memory/atmel-ebi.c
+@@ -93,7 +93,7 @@ static void at91sam9_ebi_get_config(struct at91_ebi_dev 
*ebid,
+                                   struct at91_ebi_dev_config *conf)
+ {
+       struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+-      unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
++      unsigned int clk_period = NSEC_PER_SEC / clk_get_rate(ebid->ebi->clk);
+       struct at91sam9_ebi_dev_config *config = &conf->sam9;
+       struct at91sam9_smc_timings *timings = &config->timings;
+       unsigned int val;
+@@ -102,43 +102,43 @@ static void at91sam9_ebi_get_config(struct at91_ebi_dev 
*ebid,
+       config->mode = val & ~AT91_SMC_TDF;
+ 
+       val = (val & AT91_SMC_TDF) >> 16;
+-      timings->tdf_ns = clk_rate * val;
++      timings->tdf_ns = clk_period * val;
+ 
+       regmap_fields_read(fields->setup, conf->cs, &val);
+       timings->ncs_rd_setup_ns = (val >> 24) & 0x1f;
+       timings->ncs_rd_setup_ns += ((val >> 29) & 0x1) * 128;
+-      timings->ncs_rd_setup_ns *= clk_rate;
++      timings->ncs_rd_setup_ns *= clk_period;
+       timings->nrd_setup_ns = (val >> 16) & 0x1f;
+       timings->nrd_setup_ns += ((val >> 21) & 0x1) * 128;
+-      timings->nrd_setup_ns *= clk_rate;
++      timings->nrd_setup_ns *= clk_period;
+       timings->ncs_wr_setup_ns = (val >> 8) & 0x1f;
+       timings->ncs_wr_setup_ns += ((val >> 13) & 0x1) * 128;
+-      timings->ncs_wr_setup_ns *= clk_rate;
++      timings->ncs_wr_setup_ns *= clk_period;
+       timings->nwe_setup_ns = val & 0x1f;
+       timings->nwe_setup_ns += ((val >> 5) & 0x1) * 128;
+-      timings->nwe_setup_ns *= clk_rate;
++      timings->nwe_setup_ns *= clk_period;
+ 
+       regmap_fields_read(fields->pulse, conf->cs, &val);
+       timings->ncs_rd_pulse_ns = (val >> 24) & 0x3f;
+       timings->ncs_rd_pulse_ns += ((val >> 30) & 0x1) * 256;
+-      timings->ncs_rd_pulse_ns *= clk_rate;
++      timings->ncs_rd_pulse_ns *= clk_period;
+       timings->nrd_pulse_ns = (val >> 16) & 0x3f;
+       timings->nrd_pulse_ns += ((val >> 22) & 0x1) * 256;
+-      timings->nrd_pulse_ns *= clk_rate;
++      timings->nrd_pulse_ns *= clk_period;
+       timings->ncs_wr_pulse_ns = (val >> 8) & 0x3f;
+       timings->ncs_wr_pulse_ns += ((val >> 14) & 0x1) * 256;
+-      timings->ncs_wr_pulse_ns *= clk_rate;
++      timings->ncs_wr_pulse_ns *= clk_period;
+       timings->nwe_pulse_ns = val & 0x3f;
+       timings->nwe_pulse_ns += ((val >> 6) & 0x1) * 256;
+-      timings->nwe_pulse_ns *= clk_rate;
++      timings->nwe_pulse_ns *= clk_period;
+ 
+       regmap_fields_read(fields->cycle, conf->cs, &val);
+       timings->nrd_cycle_ns = (val >> 16) & 0x7f;
+       timings->nrd_cycle_ns += ((val >> 23) & 0x3) * 256;
+-      timings->nrd_cycle_ns *= clk_rate;
++      timings->nrd_cycle_ns *= clk_period;
+       timings->nwe_cycle_ns = val & 0x7f;
+       timings->nwe_cycle_ns += ((val >> 7) & 0x3) * 256;
+-      timings->nwe_cycle_ns *= clk_rate;
++      timings->nwe_cycle_ns *= clk_period;
+ }
+ 
+ static int at91_xlate_timing(struct device_node *np, const char *prop,
+@@ -334,6 +334,7 @@ static int at91sam9_ebi_apply_config(struct at91_ebi_dev 
*ebid,
+                                    struct at91_ebi_dev_config *conf)
+ {
+       unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
++      unsigned int clk_period = NSEC_PER_SEC / clk_rate;
+       struct at91sam9_ebi_dev_config *config = &conf->sam9;
+       struct at91sam9_smc_timings *timings = &config->timings;
+       struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+@@ -376,7 +377,7 @@ static int at91sam9_ebi_apply_config(struct at91_ebi_dev 
*ebid,
+       val |= AT91SAM9_SMC_NWECYCLE(coded_val);
+       regmap_fields_write(fields->cycle, conf->cs, val);
+ 
+-      val = DIV_ROUND_UP(timings->tdf_ns, clk_rate);
++      val = DIV_ROUND_UP(timings->tdf_ns, clk_period);
+       if (val > AT91_SMC_TDF_MAX)
+               val = AT91_SMC_TDF_MAX;
+       regmap_fields_write(fields->mode, conf->cs,
+diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
+index b24d76723fb0..08e7d3a54425 100644
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -419,6 +419,9 @@ struct cxl_afu {
+       struct mutex contexts_lock;
+       spinlock_t afu_cntl_lock;
+ 
++      /* -1: AFU deconfigured/locked, >= 0: number of readers */
++      atomic_t configured_state;
++
+       /* AFU error buffer fields and bin attribute for sysfs */
+       u64 eb_len, eb_offset;
+       struct bin_attribute attr_eb;
+diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
+index 62e0dfb5f15b..cc1706a92ace 100644
+--- a/drivers/misc/cxl/main.c
++++ b/drivers/misc/cxl/main.c
+@@ -268,7 +268,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int 
slice)
+       idr_init(&afu->contexts_idr);
+       mutex_init(&afu->contexts_lock);
+       spin_lock_init(&afu->afu_cntl_lock);
+-
++      atomic_set(&afu->configured_state, -1);
+       afu->prefault_mode = CXL_PREFAULT_NONE;
+       afu->irqs_max = afu->adapter->user_irqs;
+ 
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 80a87ab25b83..34e83219447c 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -1129,6 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct 
cxl *adapter, struct pc
+       if ((rc = cxl_native_register_psl_irq(afu)))
+               goto err2;
+ 
++      atomic_set(&afu->configured_state, 0);
+       return 0;
+ 
+ err2:
+@@ -1141,6 +1142,14 @@ static int pci_configure_afu(struct cxl_afu *afu, 
struct cxl *adapter, struct pc
+ 
+ static void pci_deconfigure_afu(struct cxl_afu *afu)
+ {
++      /*
++       * It's okay to deconfigure when AFU is already locked, otherwise wait
++       * until there are no readers
++       */
++      if (atomic_read(&afu->configured_state) != -1) {
++              while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
++                      schedule();
++      }
+       cxl_native_release_psl_irq(afu);
+       if (afu->adapter->native->sl_ops->release_serr_irq)
+               afu->adapter->native->sl_ops->release_serr_irq(afu);
+diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
+index 3519acebfdab..512a4897dbf6 100644
+--- a/drivers/misc/cxl/vphb.c
++++ b/drivers/misc/cxl/vphb.c
+@@ -76,23 +76,32 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
+       return (bus << 8) + devfn;
+ }
+ 
+-static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
+-                              struct cxl_afu **_afu, int *_record)
++static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
+ {
+-      struct pci_controller *phb;
+-      struct cxl_afu *afu;
+-      int record;
++      struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
+ 
+-      phb = pci_bus_to_host(bus);
+-      if (phb == NULL)
+-              return PCIBIOS_DEVICE_NOT_FOUND;
++      return phb ? phb->private_data : NULL;
++}
++
++static void cxl_afu_configured_put(struct cxl_afu *afu)
++{
++      atomic_dec_if_positive(&afu->configured_state);
++}
++
++static bool cxl_afu_configured_get(struct cxl_afu *afu)
++{
++      return atomic_inc_unless_negative(&afu->configured_state);
++}
++
++static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int 
devfn,
++                                     struct cxl_afu *afu, int *_record)
++{
++      int record;
+ 
+-      afu = (struct cxl_afu *)phb->private_data;
+       record = cxl_pcie_cfg_record(bus->number, devfn);
+       if (record > afu->crs_num)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+ 
+-      *_afu = afu;
+       *_record = record;
+       return 0;
+ }
+@@ -106,9 +115,14 @@ static int cxl_pcie_read_config(struct pci_bus *bus, 
unsigned int devfn,
+       u16 val16;
+       u32 val32;
+ 
+-      rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
++      afu = pci_bus_to_afu(bus);
++      /* Grab a reader lock on afu. */
++      if (afu == NULL || !cxl_afu_configured_get(afu))
++              return PCIBIOS_DEVICE_NOT_FOUND;
++
++      rc = cxl_pcie_config_info(bus, devfn, afu, &record);
+       if (rc)
+-              return rc;
++              goto out;
+ 
+       switch (len) {
+       case 1:
+@@ -127,10 +141,9 @@ static int cxl_pcie_read_config(struct pci_bus *bus, 
unsigned int devfn,
+               WARN_ON(1);
+       }
+ 
+-      if (rc)
+-              return PCIBIOS_DEVICE_NOT_FOUND;
+-
+-      return PCIBIOS_SUCCESSFUL;
++out:
++      cxl_afu_configured_put(afu);
++      return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ }
+ 
+ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+@@ -139,9 +152,14 @@ static int cxl_pcie_write_config(struct pci_bus *bus, 
unsigned int devfn,
+       int rc, record;
+       struct cxl_afu *afu;
+ 
+-      rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
++      afu = pci_bus_to_afu(bus);
++      /* Grab a reader lock on afu. */
++      if (afu == NULL || !cxl_afu_configured_get(afu))
++              return PCIBIOS_DEVICE_NOT_FOUND;
++
++      rc = cxl_pcie_config_info(bus, devfn, afu, &record);
+       if (rc)
+-              return rc;
++              goto out;
+ 
+       switch (len) {
+       case 1:
+@@ -157,10 +175,9 @@ static int cxl_pcie_write_config(struct pci_bus *bus, 
unsigned int devfn,
+               WARN_ON(1);
+       }
+ 
+-      if (rc)
+-              return PCIBIOS_SET_FAILED;
+-
+-      return PCIBIOS_SUCCESSFUL;
++out:
++      cxl_afu_configured_put(afu);
++      return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+ }
+ 
+ static struct pci_ops cxl_pcie_pci_ops =
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c 
b/drivers/net/ethernet/marvell/mvpp2.c
+index 4fe430ceb194..5f1f23e4878d 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -991,7 +991,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu 
*txq_pcpu,
+               txq_pcpu->buffs + txq_pcpu->txq_put_index;
+       tx_buf->skb = skb;
+       tx_buf->size = tx_desc->data_size;
+-      tx_buf->phys = tx_desc->buf_phys_addr;
++      tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+       txq_pcpu->txq_put_index++;
+       if (txq_pcpu->txq_put_index == txq_pcpu->size)
+               txq_pcpu->txq_put_index = 0;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index dfb0658713d9..d2219885071f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -1661,7 +1661,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 
rxseq)
+                                          pfirst->len, pfirst->next,
+                                          pfirst->prev);
+                       skb_unlink(pfirst, &bus->glom);
+-                      if (brcmf_sdio_fromevntchan(pfirst->data))
++                      if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN]))
+                               brcmf_rx_event(bus->sdiodev->dev, pfirst);
+                       else
+                               brcmf_rx_frame(bus->sdiodev->dev, pfirst,
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
+index ce3e8dfa10ad..1b481a5fb966 100644
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1700,6 +1700,7 @@ static int select_pmem_id(struct nd_region *nd_region, 
u8 *pmem_id)
+ struct device *create_namespace_pmem(struct nd_region *nd_region,
+               struct nd_namespace_label *nd_label)
+ {
++      u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
+       u64 cookie = nd_region_interleave_set_cookie(nd_region);
+       struct nd_label_ent *label_ent;
+       struct nd_namespace_pmem *nspm;
+@@ -1718,7 +1719,11 @@ struct device *create_namespace_pmem(struct nd_region 
*nd_region,
+       if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+               dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
+                               nd_label->uuid);
+-              return ERR_PTR(-EAGAIN);
++              if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
++                      return ERR_PTR(-EAGAIN);
++
++              dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
++                              nd_label->uuid);
+       }
+ 
+       nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+@@ -1733,9 +1738,14 @@ struct device *create_namespace_pmem(struct nd_region 
*nd_region,
+       res->name = dev_name(&nd_region->dev);
+       res->flags = IORESOURCE_MEM;
+ 
+-      for (i = 0; i < nd_region->ndr_mappings; i++)
+-              if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
+-                      break;
++      for (i = 0; i < nd_region->ndr_mappings; i++) {
++              if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
++                      continue;
++              if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
++                      continue;
++              break;
++      }
++
+       if (i < nd_region->ndr_mappings) {
+               struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+ 
+diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
+index 35dd75057e16..2a99c83aa19f 100644
+--- a/drivers/nvdimm/nd.h
++++ b/drivers/nvdimm/nd.h
+@@ -328,6 +328,7 @@ struct nd_region *to_nd_region(struct device *dev);
+ int nd_region_to_nstype(struct nd_region *nd_region);
+ int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
+ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region);
++u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
+ void nvdimm_bus_lock(struct device *dev);
+ void nvdimm_bus_unlock(struct device *dev);
+ bool is_nvdimm_bus_locked(struct device *dev);
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 7cd705f3247c..b7cb5066d961 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -505,6 +505,15 @@ u64 nd_region_interleave_set_cookie(struct nd_region 
*nd_region)
+       return 0;
+ }
+ 
++u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
++{
++      struct nd_interleave_set *nd_set = nd_region->nd_set;
++
++      if (nd_set)
++              return nd_set->altcookie;
++      return 0;
++}
++
+ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+ {
+       struct nd_label_ent *label_ent, *e;
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index acb2be0c8c2c..e96973b95e7a 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -82,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref)
+ static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot)
+ {
+ 
+-      if (WARN_ON(!php_slot))
++      if (!php_slot)
+               return;
+ 
+       kref_put(&php_slot->kref, pnv_php_free_slot);
+@@ -436,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, 
bool rescan)
+       if (ret)
+               return ret;
+ 
+-      /* Proceed if there have nothing behind the slot */
+-      if (presence == OPAL_PCI_SLOT_EMPTY)
++      /*
++       * Proceed if there have nothing behind the slot. However,
++       * we should leave the slot in registered state at the
++       * beginning. Otherwise, the PCI devices inserted afterwards
++       * won't be probed and populated.
++       */
++      if (presence == OPAL_PCI_SLOT_EMPTY) {
++              if (!php_slot->power_state_check) {
++                      php_slot->power_state_check = true;
++
++                      return 0;
++              }
++
+               goto scan;
++      }
+ 
+       /*
+        * If the power supply to the slot is off, we can't detect
+@@ -713,8 +725,12 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
+               added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
+       } else if (sts & PCI_EXP_SLTSTA_PDC) {
+               ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+-              if (!ret)
++              if (ret) {
++                      dev_warn(&pdev->dev, "PCI slot [%s] error %d getting 
presence (0x%04x), to retry the operation.\n",
++                               php_slot->name, ret, sts);
+                       return IRQ_HANDLED;
++              }
++
+               added = !!(presence == OPAL_PCI_SLOT_PRESENT);
+       } else {
+               return IRQ_NONE;
+@@ -799,6 +815,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot 
*php_slot)
+       struct pci_dev *pdev = php_slot->pdev;
+       int irq, ret;
+ 
++      /*
++       * The MSI/MSIx interrupt might have been occupied by other
++       * drivers. Don't populate the surprise hotplug capability
++       * in that case.
++       */
++      if (pci_dev_msi_enabled(pdev))
++              return;
++
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
+diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
+index c69568b8543d..528540095c3f 100644
+--- a/drivers/phy/phy-qcom-ufs.c
++++ b/drivers/phy/phy-qcom-ufs.c
+@@ -189,12 +189,12 @@ int ufs_qcom_phy_init_clks(struct ufs_qcom_phy 
*phy_common)
+       if (err)
+               goto out;
+ 
++skip_txrx_clk:
+       err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
+                                  &phy_common->ref_clk_src);
+       if (err)
+               goto out;
+ 
+-skip_txrx_clk:
+       /*
+        * "ref_clk_parent" is optional hence don't abort init if it's not
+        * found.
+@@ -217,12 +217,7 @@ static int __ufs_qcom_phy_init_vreg(struct device *dev,
+ 
+       char prop_name[MAX_PROP_NAME];
+ 
+-      vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
+-      if (!vreg->name) {
+-              err = -ENOMEM;
+-              goto out;
+-      }
+-
++      vreg->name = name;
+       vreg->reg = devm_regulator_get(dev, name);
+       if (IS_ERR(vreg->reg)) {
+               err = PTR_ERR(vreg->reg);
+@@ -265,8 +260,6 @@ static int __ufs_qcom_phy_init_vreg(struct device *dev,
+       }
+ 
+ out:
+-      if (err)
+-              kfree(vreg->name);
+       return err;
+ }
+ 
+diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
+index 117fccf7934a..01a6a83f625d 100644
+--- a/drivers/pwm/pwm-pca9685.c
++++ b/drivers/pwm/pwm-pca9685.c
+@@ -65,7 +65,6 @@
+ #define PCA9685_MAXCHAN               0x10
+ 
+ #define LED_FULL              (1 << 4)
+-#define MODE1_RESTART         (1 << 7)
+ #define MODE1_SLEEP           (1 << 4)
+ #define MODE2_INVRT           (1 << 4)
+ #define MODE2_OUTDRV          (1 << 2)
+@@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, 
struct pwm_device *pwm,
+                       udelay(500);
+ 
+                       pca->period_ns = period_ns;
+-
+-                      /*
+-                       * If the duty cycle did not change, restart PWM with
+-                       * the same duty cycle to period ratio and return.
+-                       */
+-                      if (duty_ns == pca->duty_ns) {
+-                              regmap_update_bits(pca->regmap, PCA9685_MODE1,
+-                                                 MODE1_RESTART, 0x1);
+-                              return 0;
+-                      }
+               } else {
+                       dev_err(chip->dev,
+                               "prescaler not set: period out of bounds!\n");
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index 9d66b4fb174b..415d10a67b7a 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t 
secnum,
+       dev_info = bdev->bd_disk->private_data;
+       if (!dev_info)
+               return -ENODEV;
+-      dev_sz = dev_info->end - dev_info->start;
++      dev_sz = dev_info->end - dev_info->start + 1;
+       offset = secnum * 512;
+       *kaddr = (void *) dev_info->start + offset;
+       *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
+diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
+index 8225da619014..4182f60124da 100644
+--- a/drivers/s390/cio/ioasm.c
++++ b/drivers/s390/cio/ioasm.c
+@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
+ int chsc(void *chsc_area)
+ {
+       typedef struct { char _[4096]; } addr_type;
+-      int cc;
++      int cc = -EIO;
+ 
+       asm volatile(
+               "       .insn   rre,0xb25f0000,%2,0\n"
+-              "       ipm     %0\n"
++              "0:     ipm     %0\n"
+               "       srl     %0,28\n"
+-              : "=d" (cc), "=m" (*(addr_type *) chsc_area)
++              "1:\n"
++              EX_TABLE(0b, 1b)
++              : "+d" (cc), "=m" (*(addr_type *) chsc_area)
+               : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+               : "cc");
+       trace_s390_cio_chsc(chsc_area, cc);
+diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
+index 5d06253c2a7a..30e9fbbff051 100644
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct 
qdio_irq *irq)
+       struct qdio_q *q;
+       int i;
+ 
+-      for_each_input_queue(irq, q, i) {
+-              if (!references_shared_dsci(irq) &&
+-                  has_multiple_inq_on_dsci(irq))
+-                      xchg(q->irq_ptr->dsci, 0);
++      if (!references_shared_dsci(irq) &&
++          has_multiple_inq_on_dsci(irq))
++              xchg(irq->dsci, 0);
+ 
++      for_each_input_queue(irq, q, i) {
+               if (q->u.in.queue_start_poll) {
+                       /* skip if polling is enabled or already in work */
+                       if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+diff --git a/drivers/target/target_core_device.c 
b/drivers/target/target_core_device.c
+index 26929c44d703..03bdaac5c6c9 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -78,12 +78,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 
unpacked_lun)
+                                       &deve->read_bytes);
+ 
+               se_lun = rcu_dereference(deve->se_lun);
++
++              if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
++                      se_lun = NULL;
++                      goto out_unlock;
++              }
++
+               se_cmd->se_lun = rcu_dereference(deve->se_lun);
+               se_cmd->pr_res_key = deve->pr_res_key;
+               se_cmd->orig_fe_lun = unpacked_lun;
+               se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+-
+-              percpu_ref_get(&se_lun->lun_ref);
+               se_cmd->lun_ref_active = true;
+ 
+               if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+@@ -97,6 +101,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 
unpacked_lun)
+                       goto ref_dev;
+               }
+       }
++out_unlock:
+       rcu_read_unlock();
+ 
+       if (!se_lun) {
+@@ -816,6 +821,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, 
const char *name)
+       xcopy_lun = &dev->xcopy_lun;
+       rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
+       init_completion(&xcopy_lun->lun_ref_comp);
++      init_completion(&xcopy_lun->lun_shutdown_comp);
+       INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+       INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+       mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+diff --git a/drivers/target/target_core_tpg.c 
b/drivers/target/target_core_tpg.c
+index d99752c6cd60..2744251178ad 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref 
*ref)
+ {
+       struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+ 
+-      complete(&lun->lun_ref_comp);
++      complete(&lun->lun_shutdown_comp);
+ }
+ 
+ int core_tpg_register(
+@@ -571,6 +571,7 @@ struct se_lun *core_tpg_alloc_lun(
+       lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+       atomic_set(&lun->lun_acl_count, 0);
+       init_completion(&lun->lun_ref_comp);
++      init_completion(&lun->lun_shutdown_comp);
+       INIT_LIST_HEAD(&lun->lun_deve_list);
+       INIT_LIST_HEAD(&lun->lun_dev_link);
+       atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+diff --git a/drivers/target/target_core_transport.c 
b/drivers/target/target_core_transport.c
+index 437591bc7c08..665be670b3f3 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2706,10 +2706,39 @@ void target_wait_for_sess_cmds(struct se_session 
*se_sess)
+ }
+ EXPORT_SYMBOL(target_wait_for_sess_cmds);
+ 
++static void target_lun_confirm(struct percpu_ref *ref)
++{
++      struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
++
++      complete(&lun->lun_ref_comp);
++}
++
+ void transport_clear_lun_ref(struct se_lun *lun)
+ {
+-      percpu_ref_kill(&lun->lun_ref);
++      /*
++       * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
++       * the initial reference and schedule confirm kill to be
++       * executed after one full RCU grace period has completed.
++       */
++      percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
++      /*
++       * The first completion waits for percpu_ref_switch_to_atomic_rcu()
++       * to call target_lun_confirm after lun->lun_ref has been marked
++       * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
++       * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
++       * fails for all new incoming I/O.
++       */
+       wait_for_completion(&lun->lun_ref_comp);
++      /*
++       * The second completion waits for percpu_ref_put_many() to
++       * invoke ->release() after lun->lun_ref has switched to
++       * atomic_t mode, and lun->lun_ref.count has reached zero.
++       *
++       * At this point all target-core lun->lun_ref references have
++       * been dropped via transport_lun_remove_cmd(), and it's safe
++       * to proceed with the remaining LUN shutdown.
++       */
++      wait_for_completion(&lun->lun_shutdown_comp);
+ }
+ 
+ static bool
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index eb278832f5ce..728c8243473b 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -114,7 +114,7 @@
+ #define DEFAULT_TX_BUF_COUNT 3
+ 
+ struct n_hdlc_buf {
+-      struct n_hdlc_buf *link;
++      struct list_head  list_item;
+       int               count;
+       char              buf[1];
+ };
+@@ -122,8 +122,7 @@ struct n_hdlc_buf {
+ #define       N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
+ 
+ struct n_hdlc_buf_list {
+-      struct n_hdlc_buf *head;
+-      struct n_hdlc_buf *tail;
++      struct list_head  list;
+       int               count;
+       spinlock_t        spinlock;
+ };
+@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
+  * @backup_tty - TTY to use if tty gets closed
+  * @tbusy - reentrancy flag for tx wakeup code
+  * @woke_up - FIXME: describe this field
+- * @tbuf - currently transmitting tx buffer
+  * @tx_buf_list - list of pending transmit frame buffers
+  * @rx_buf_list - list of received frame buffers
+  * @tx_free_buf_list - list unused transmit frame buffers
+@@ -149,7 +147,6 @@ struct n_hdlc {
+       struct tty_struct       *backup_tty;
+       int                     tbusy;
+       int                     woke_up;
+-      struct n_hdlc_buf       *tbuf;
+       struct n_hdlc_buf_list  tx_buf_list;
+       struct n_hdlc_buf_list  rx_buf_list;
+       struct n_hdlc_buf_list  tx_free_buf_list;
+@@ -159,6 +156,8 @@ struct n_hdlc {
+ /*
+  * HDLC buffer list manipulation functions
+  */
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++                                              struct n_hdlc_buf *buf);
+ static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+                          struct n_hdlc_buf *buf);
+ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
+@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
+ {
+       struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+       struct n_hdlc_buf *buf;
+-      unsigned long flags;
+ 
+       while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+               n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+-      spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+-      if (n_hdlc->tbuf) {
+-              n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+-              n_hdlc->tbuf = NULL;
+-      }
+-      spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ }
+ 
+ static struct tty_ldisc_ops n_hdlc_ldisc = {
+@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
+               } else
+                       break;
+       }
+-      kfree(n_hdlc->tbuf);
+       kfree(n_hdlc);
+       
+ }     /* end of n_hdlc_release() */
+@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
+       n_hdlc->woke_up = 0;
+       spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ 
+-      /* get current transmit buffer or get new transmit */
+-      /* buffer from list of pending transmit buffers */
+-              
+-      tbuf = n_hdlc->tbuf;
+-      if (!tbuf)
+-              tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+-              
++      tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+       while (tbuf) {
+               if (debuglevel >= DEBUG_LEVEL_INFO)     
+                       printk("%s(%d)sending frame %p, count=%d\n",
+@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
+ 
+               /* rollback was possible and has been done */
+               if (actual == -ERESTARTSYS) {
+-                      n_hdlc->tbuf = tbuf;
++                      n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+                       break;
+               }
+               /* if transmit error, throw frame away by */
+@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
+                                       
+                       /* free current transmit buffer */
+                       n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
+-                      
+-                      /* this tx buffer is done */
+-                      n_hdlc->tbuf = NULL;
+-                      
++
+                       /* wait up sleeping writers */
+                       wake_up_interruptible(&tty->write_wait);
+       
+@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
+                       if (debuglevel >= DEBUG_LEVEL_INFO)     
+                               printk("%s(%d)frame %p pending\n",
+                                       __FILE__,__LINE__,tbuf);
+-                                      
+-                      /* buffer not accepted by driver */
+-                      /* set this buffer as pending buffer */
+-                      n_hdlc->tbuf = tbuf;
++
++                      /*
++                       * the buffer was not accepted by driver,
++                       * return it back into tx queue
++                       */
++                      n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+                       break;
+               }
+       }
+@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct 
file *file,
+       int error = 0;
+       int count;
+       unsigned long flags;
+-      
++      struct n_hdlc_buf *buf = NULL;
++
+       if (debuglevel >= DEBUG_LEVEL_INFO)     
+               printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
+                       __FILE__,__LINE__,cmd);
+@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, 
struct file *file,
+               /* report count of read data available */
+               /* in next available frame (if any) */
+               spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
+-              if (n_hdlc->rx_buf_list.head)
+-                      count = n_hdlc->rx_buf_list.head->count;
++              buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
++                                              struct n_hdlc_buf, list_item);
++              if (buf)
++                      count = buf->count;
+               else
+                       count = 0;
+               spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
+@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, 
struct file *file,
+               count = tty_chars_in_buffer(tty);
+               /* add size of next output frame in queue */
+               spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
+-              if (n_hdlc->tx_buf_list.head)
+-                      count += n_hdlc->tx_buf_list.head->count;
++              buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
++                                              struct n_hdlc_buf, list_item);
++              if (buf)
++                      count += buf->count;
+               spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
+               error = put_user(count, (int __user *)arg);
+               break;
+@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct 
*tty, struct file *filp,
+               poll_wait(filp, &tty->write_wait, wait);
+ 
+               /* set bits for operations that won't block */
+-              if (n_hdlc->rx_buf_list.head)
++              if (!list_empty(&n_hdlc->rx_buf_list.list))
+                       mask |= POLLIN | POLLRDNORM;    /* readable */
+               if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+                       mask |= POLLHUP;
+               if (tty_hung_up_p(filp))
+                       mask |= POLLHUP;
+               if (!tty_is_writelocked(tty) &&
+-                              n_hdlc->tx_free_buf_list.head)
++                              !list_empty(&n_hdlc->tx_free_buf_list.list))
+                       mask |= POLLOUT | POLLWRNORM;   /* writable */
+       }
+       return mask;
+@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
+       spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
+-      
++
++      INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
++      INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
++      INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
++      INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
++
+       /* allocate free rx buffer list */
+       for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
+               buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
+ }     /* end of n_hdlc_alloc() */
+ 
+ /**
++ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified 
list
++ * @buf_list - pointer to the buffer list
++ * @buf - pointer to the buffer
++ */
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++                                              struct n_hdlc_buf *buf)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&buf_list->spinlock, flags);
++
++      list_add(&buf->list_item, &buf_list->list);
++      buf_list->count++;
++
++      spin_unlock_irqrestore(&buf_list->spinlock, flags);
++}
++
++/**
+  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
+- * @list - pointer to buffer list
++ * @buf_list - pointer to buffer list
+  * @buf       - pointer to buffer
+  */
+-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
+                          struct n_hdlc_buf *buf)
+ {
+       unsigned long flags;
+-      spin_lock_irqsave(&list->spinlock,flags);
+-      
+-      buf->link=NULL;
+-      if (list->tail)
+-              list->tail->link = buf;
+-      else
+-              list->head = buf;
+-      list->tail = buf;
+-      (list->count)++;
+-      
+-      spin_unlock_irqrestore(&list->spinlock,flags);
+-      
++
++      spin_lock_irqsave(&buf_list->spinlock, flags);
++
++      list_add_tail(&buf->list_item, &buf_list->list);
++      buf_list->count++;
++
++      spin_unlock_irqrestore(&buf_list->spinlock, flags);
+ }     /* end of n_hdlc_buf_put() */
+ 
+ /**
+  * n_hdlc_buf_get - remove and return an HDLC buffer from list
+- * @list - pointer to HDLC buffer list
++ * @buf_list - pointer to HDLC buffer list
+  * 
+  * Remove and return an HDLC buffer from the head of the specified HDLC buffer
+  * list.
+  * Returns a pointer to HDLC buffer if available, otherwise %NULL.
+  */
+-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
+ {
+       unsigned long flags;
+       struct n_hdlc_buf *buf;
+-      spin_lock_irqsave(&list->spinlock,flags);
+-      
+-      buf = list->head;
++
++      spin_lock_irqsave(&buf_list->spinlock, flags);
++
++      buf = list_first_entry_or_null(&buf_list->list,
++                                              struct n_hdlc_buf, list_item);
+       if (buf) {
+-              list->head = buf->link;
+-              (list->count)--;
++              list_del(&buf->list_item);
++              buf_list->count--;
+       }
+-      if (!list->head)
+-              list->tail = NULL;
+-      
+-      spin_unlock_irqrestore(&list->spinlock,flags);
++
++      spin_unlock_irqrestore(&buf_list->spinlock, flags);
+       return buf;
+-      
+ }     /* end of n_hdlc_buf_get() */
+ 
+ static char hdlc_banner[] __initdata =
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 116436b7fa52..b2fd78ba02bc 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2723,6 +2723,8 @@ enum pci_board_num_t {
+       pbn_b0_4_1152000_200,
+       pbn_b0_8_1152000_200,
+ 
++      pbn_b0_4_1250000,
++
+       pbn_b0_2_1843200,
+       pbn_b0_4_1843200,
+ 
+@@ -2954,6 +2956,13 @@ static struct pciserial_board pci_boards[] = {
+               .uart_offset    = 0x200,
+       },
+ 
++      [pbn_b0_4_1250000] = {
++              .flags          = FL_BASE0,
++              .num_ports      = 4,
++              .base_baud      = 1250000,
++              .uart_offset    = 8,
++      },
++
+       [pbn_b0_2_1843200] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 2,
+@@ -5589,6 +5598,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+       { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
+       { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
+ 
++      /* MKS Tenta SCOM-080x serial cards */
++      { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
++      { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
++
+       /*
+        * These entries match devices with class COMMUNICATION_SERIAL,
+        * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 81dd075356b9..d4fb0afc0097 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -202,7 +202,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct 
dentry *mntpt)
+ 
+       /* try and do the mount */
+       _debug("--- attempting mount %s -o %s ---", devname, options);
+-      mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options);
++      mnt = vfs_submount(mntpt, &afs_fs_type, devname, options);
+       _debug("--- mount result %p ---", mnt);
+ 
+       free_page((unsigned long) devname);
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index 1278335ce366..79fbd85db4ba 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -436,8 +436,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
+               memcpy(&wq->name, &qstr, sizeof(struct qstr));
+               wq->dev = autofs4_get_dev(sbi);
+               wq->ino = autofs4_get_ino(sbi);
+-              wq->uid = current_real_cred()->uid;
+-              wq->gid = current_real_cred()->gid;
++              wq->uid = current_cred()->uid;
++              wq->gid = current_cred()->gid;
+               wq->pid = pid;
+               wq->tgid = tgid;
+               wq->status = -EINTR; /* Status return if interrupted */
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1e861a063721..ec54415fac7d 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4502,19 +4502,8 @@ int btrfs_truncate_inode_items(struct 
btrfs_trans_handle *trans,
+               if (found_type > min_type) {
+                       del_item = 1;
+               } else {
+-                      if (item_end < new_size) {
+-                              /*
+-                               * With NO_HOLES mode, for the following mapping
+-                               *
+-                               * [0-4k][hole][8k-12k]
+-                               *
+-                               * if truncating isize down to 6k, it ends up
+-                               * isize being 8k.
+-                               */
+-                              if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+-                                      last_size = new_size;
++                      if (item_end < new_size)
+                               break;
+-                      }
+                       if (found_key.offset >= new_size)
+                               del_item = 1;
+                       else
+@@ -4697,8 +4686,12 @@ int btrfs_truncate_inode_items(struct 
btrfs_trans_handle *trans,
+                       btrfs_abort_transaction(trans, ret);
+       }
+ error:
+-      if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
++      if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
++              ASSERT(last_size >= new_size);
++              if (!err && last_size > new_size)
++                      last_size = new_size;
+               btrfs_ordered_update_i_size(inode, last_size, NULL);
++      }
+ 
+       btrfs_free_path(path);
+ 
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index c9d2e553a6c4..0021026a2f74 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -628,6 +628,9 @@ static void __unregister_request(struct ceph_mds_client 
*mdsc,
+ {
+       dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+ 
++      /* Never leave an unregistered request on an unsafe list! */
++      list_del_init(&req->r_unsafe_item);
++
+       if (req->r_tid == mdsc->oldest_tid) {
+               struct rb_node *p = rb_next(&req->r_node);
+               mdsc->oldest_tid = 0;
+@@ -1036,7 +1039,6 @@ static void cleanup_session_requests(struct 
ceph_mds_client *mdsc,
+       while (!list_empty(&session->s_unsafe)) {
+               req = list_first_entry(&session->s_unsafe,
+                                      struct ceph_mds_request, r_unsafe_item);
+-              list_del_init(&req->r_unsafe_item);
+               pr_warn_ratelimited(" dropping unsafe request %llu\n",
+                                   req->r_tid);
+               __unregister_request(mdsc, req);
+@@ -2437,7 +2439,6 @@ static void handle_reply(struct ceph_mds_session 
*session, struct ceph_msg *msg)
+                        * useful we could do with a revised return value.
+                        */
+                       dout("got safe reply %llu, mds%d\n", tid, mds);
+-                      list_del_init(&req->r_unsafe_item);
+ 
+                       /* last unsafe request during umount? */
+                       if (mdsc->stopping && !__get_oldest_req(mdsc))
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index ec9dbbcca3b9..9156be545b0f 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -245,7 +245,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+  * @fullpath:         full path in UNC format
+  * @ref:              server's referral
+  */
+-static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb,
++static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
++              struct cifs_sb_info *cifs_sb,
+               const char *fullpath, const struct dfs_info3_param *ref)
+ {
+       struct vfsmount *mnt;
+@@ -259,7 +260,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct 
cifs_sb_info *cifs_sb,
+       if (IS_ERR(mountdata))
+               return (struct vfsmount *)mountdata;
+ 
+-      mnt = vfs_kern_mount(&cifs_fs_type, 0, devname, mountdata);
++      mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
+       kfree(mountdata);
+       kfree(devname);
+       return mnt;
+@@ -334,7 +335,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct 
dentry *mntpt)
+                       mnt = ERR_PTR(-EINVAL);
+                       break;
+               }
+-              mnt = cifs_dfs_do_refmount(cifs_sb,
++              mnt = cifs_dfs_do_refmount(mntpt, cifs_sb,
+                               full_path, referrals + i);
+               cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n",
+                        __func__, referrals[i].node_name, mnt);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index f17fcf89e18e..1e30f74a9527 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -187,9 +187,9 @@ static const struct super_operations 
debugfs_super_operations = {
+ 
+ static struct vfsmount *debugfs_automount(struct path *path)
+ {
+-      struct vfsmount *(*f)(void *);
+-      f = (struct vfsmount *(*)(void *))path->dentry->d_fsdata;
+-      return f(d_inode(path->dentry)->i_private);
++      debugfs_automount_t f;
++      f = (debugfs_automount_t)path->dentry->d_fsdata;
++      return f(path->dentry, d_inode(path->dentry)->i_private);
+ }
+ 
+ static const struct dentry_operations debugfs_dops = {
+@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir);
+  */
+ struct dentry *debugfs_create_automount(const char *name,
+                                       struct dentry *parent,
+-                                      struct vfsmount *(*f)(void *),
++                                      debugfs_automount_t f,
+                                       void *data)
+ {
+       struct dentry *dentry = start_creating(name, parent);
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 338d2f73eb29..a2c05f2ada6d 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1359,6 +1359,16 @@ static int parse_options(struct super_block *sb, char 
*options, int is_vfat,
+       return 0;
+ }
+ 
++static void fat_dummy_inode_init(struct inode *inode)
++{
++      /* Initialize this dummy inode to work as no-op. */
++      MSDOS_I(inode)->mmu_private = 0;
++      MSDOS_I(inode)->i_start = 0;
++      MSDOS_I(inode)->i_logstart = 0;
++      MSDOS_I(inode)->i_attrs = 0;
++      MSDOS_I(inode)->i_pos = 0;
++}
++
+ static int fat_read_root(struct inode *inode)
+ {
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+@@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, 
int silent, int isvfat,
+       fat_inode = new_inode(sb);
+       if (!fat_inode)
+               goto out_fail;
+-      MSDOS_I(fat_inode)->i_pos = 0;
++      fat_dummy_inode_init(fat_inode);
+       sbi->fat_inode = fat_inode;
+ 
+       fsinfo_inode = new_inode(sb);
+       if (!fsinfo_inode)
+               goto out_fail;
++      fat_dummy_inode_init(fsinfo_inode);
+       fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+       sbi->fsinfo_inode = fsinfo_inode;
+       insert_inode_hash(fsinfo_inode);
+diff --git a/fs/mount.h b/fs/mount.h
+index 2c856fc47ae3..2826543a131d 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -89,7 +89,6 @@ static inline int is_mounted(struct vfsmount *mnt)
+ }
+ 
+ extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
+-extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
+ 
+ extern int __legitimize_mnt(struct vfsmount *, unsigned);
+ extern bool legitimize_mnt(struct vfsmount *, unsigned);
+diff --git a/fs/namei.c b/fs/namei.c
+index ad74877e1442..dff5cd3b556f 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1100,7 +1100,6 @@ static int follow_automount(struct path *path, struct 
nameidata *nd,
+                           bool *need_mntput)
+ {
+       struct vfsmount *mnt;
+-      const struct cred *old_cred;
+       int err;
+ 
+       if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
+@@ -1129,9 +1128,7 @@ static int follow_automount(struct path *path, struct 
nameidata *nd,
+       if (nd->total_link_count >= 40)
+               return -ELOOP;
+ 
+-      old_cred = override_creds(&init_cred);
+       mnt = path->dentry->d_op->d_automount(path);
+-      revert_creds(old_cred);
+       if (IS_ERR(mnt)) {
+               /*
+                * The filesystem is allowed to return -EISDIR here to indicate
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 487ba30bb5c6..8bfad42c1ccf 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -637,28 +637,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct 
dentry *dentry)
+ }
+ 
+ /*
+- * find the last mount at @dentry on vfsmount @mnt.
+- * mount_lock must be held.
+- */
+-struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
+-{
+-      struct mount *p, *res = NULL;
+-      p = __lookup_mnt(mnt, dentry);
+-      if (!p)
+-              goto out;
+-      if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+-              res = p;
+-      hlist_for_each_entry_continue(p, mnt_hash) {
+-              if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+-                      break;
+-              if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+-                      res = p;
+-      }
+-out:
+-      return res;
+-}
+-
+-/*
+  * lookup_mnt - Return the first child mount mounted at path
+  *
+  * "First" means first mounted chronologically.  If you create the
+@@ -878,6 +856,13 @@ void mnt_set_mountpoint(struct mount *mnt,
+       hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
+ }
+ 
++static void __attach_mnt(struct mount *mnt, struct mount *parent)
++{
++      hlist_add_head_rcu(&mnt->mnt_hash,
++                         m_hash(&parent->mnt, mnt->mnt_mountpoint));
++      list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++}
++
+ /*
+  * vfsmount lock must be held for write
+  */
+@@ -886,28 +871,45 @@ static void attach_mnt(struct mount *mnt,
+                       struct mountpoint *mp)
+ {
+       mnt_set_mountpoint(parent, mp, mnt);
+-      hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
+-      list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++      __attach_mnt(mnt, parent);
+ }
+ 
+-static void attach_shadowed(struct mount *mnt,
+-                      struct mount *parent,
+-                      struct mount *shadows)
++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, 
struct mount *mnt)
+ {
+-      if (shadows) {
+-              hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
+-              list_add(&mnt->mnt_child, &shadows->mnt_child);
+-      } else {
+-              hlist_add_head_rcu(&mnt->mnt_hash,
+-                              m_hash(&parent->mnt, mnt->mnt_mountpoint));
+-              list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+-      }
++      struct mountpoint *old_mp = mnt->mnt_mp;
++      struct dentry *old_mountpoint = mnt->mnt_mountpoint;
++      struct mount *old_parent = mnt->mnt_parent;
++
++      list_del_init(&mnt->mnt_child);
++      hlist_del_init(&mnt->mnt_mp_list);
++      hlist_del_init_rcu(&mnt->mnt_hash);
++
++      attach_mnt(mnt, parent, mp);
++
++      put_mountpoint(old_mp);
++
++      /*
++       * Safely avoid even the suggestion this code might sleep or
++       * lock the mount hash by taking advantage of the knowledge that
++       * mnt_change_mountpoint will not release the final reference
++       * to a mountpoint.
++       *
++       * During mounting, the mount passed in as the parent mount will
++       * continue to use the old mountpoint and during unmounting, the
++       * old mountpoint will continue to exist until namespace_unlock,
++       * which happens well after mnt_change_mountpoint.
++       */
++      spin_lock(&old_mountpoint->d_lock);
++      old_mountpoint->d_lockref.count--;
++      spin_unlock(&old_mountpoint->d_lock);
++
++      mnt_add_count(old_parent, -1);
+ }
+ 
+ /*
+  * vfsmount lock must be held for write
+  */
+-static void commit_tree(struct mount *mnt, struct mount *shadows)
++static void commit_tree(struct mount *mnt)
+ {
+       struct mount *parent = mnt->mnt_parent;
+       struct mount *m;
+@@ -925,7 +927,7 @@ static void commit_tree(struct mount *mnt, struct mount 
*shadows)
+       n->mounts += n->pending_mounts;
+       n->pending_mounts = 0;
+ 
+-      attach_shadowed(mnt, parent, shadows);
++      __attach_mnt(mnt, parent);
+       touch_mnt_namespace(n);
+ }
+ 
+@@ -989,6 +991,21 @@ vfs_kern_mount(struct file_system_type *type, int flags, 
const char *name, void
+ }
+ EXPORT_SYMBOL_GPL(vfs_kern_mount);
+ 
++struct vfsmount *
++vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
++           const char *name, void *data)
++{
++      /* Until it is worked out how to pass the user namespace
++       * through from the parent mount to the submount don't support
++       * unprivileged mounts with submounts.
++       */
++      if (mountpoint->d_sb->s_user_ns != &init_user_ns)
++              return ERR_PTR(-EPERM);
++
++      return vfs_kern_mount(type, MS_SUBMOUNT, name, data);
++}
++EXPORT_SYMBOL_GPL(vfs_submount);
++
+ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+                                       int flag)
+ {
+@@ -1764,7 +1781,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry 
*dentry,
+                       continue;
+ 
+               for (s = r; s; s = next_mnt(s, r)) {
+-                      struct mount *t = NULL;
+                       if (!(flag & CL_COPY_UNBINDABLE) &&
+                           IS_MNT_UNBINDABLE(s)) {
+                               s = skip_mnt_tree(s);
+@@ -1786,14 +1802,7 @@ struct mount *copy_tree(struct mount *mnt, struct 
dentry *dentry,
+                               goto out;
+                       lock_mount_hash();
+                       list_add_tail(&q->mnt_list, &res->mnt_list);
+-                      mnt_set_mountpoint(parent, p->mnt_mp, q);
+-                      if (!list_empty(&parent->mnt_mounts)) {
+-                              t = list_last_entry(&parent->mnt_mounts,
+-                                      struct mount, mnt_child);
+-                              if (t->mnt_mp != p->mnt_mp)
+-                                      t = NULL;
+-                      }
+-                      attach_shadowed(q, parent, t);
++                      attach_mnt(q, parent, p->mnt_mp);
+                       unlock_mount_hash();
+               }
+       }
+@@ -1992,10 +2001,18 @@ static int attach_recursive_mnt(struct mount 
*source_mnt,
+ {
+       HLIST_HEAD(tree_list);
+       struct mnt_namespace *ns = dest_mnt->mnt_ns;
++      struct mountpoint *smp;
+       struct mount *child, *p;
+       struct hlist_node *n;
+       int err;
+ 
++      /* Preallocate a mountpoint in case the new mounts need
++       * to be tucked under other mounts.
++       */
++      smp = get_mountpoint(source_mnt->mnt.mnt_root);
++      if (IS_ERR(smp))
++              return PTR_ERR(smp);
++
+       /* Is there space to add these mounts to the mount namespace? */
+       if (!parent_path) {
+               err = count_mounts(ns, source_mnt);
+@@ -2022,16 +2039,19 @@ static int attach_recursive_mnt(struct mount 
*source_mnt,
+               touch_mnt_namespace(source_mnt->mnt_ns);
+       } else {
+               mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
+-              commit_tree(source_mnt, NULL);
++              commit_tree(source_mnt);
+       }
+ 
+       hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+               struct mount *q;
+               hlist_del_init(&child->mnt_hash);
+-              q = __lookup_mnt_last(&child->mnt_parent->mnt,
+-                                    child->mnt_mountpoint);
+-              commit_tree(child, q);
++              q = __lookup_mnt(&child->mnt_parent->mnt,
++                               child->mnt_mountpoint);
++              if (q)
++                      mnt_change_mountpoint(child, smp, q);
++              commit_tree(child);
+       }
++      put_mountpoint(smp);
+       unlock_mount_hash();
+ 
+       return 0;
+@@ -2046,6 +2066,11 @@ static int attach_recursive_mnt(struct mount 
*source_mnt,
+       cleanup_group_ids(source_mnt, NULL);
+  out:
+       ns->pending_mounts = 0;
++
++      read_seqlock_excl(&mount_lock);
++      put_mountpoint(smp);
++      read_sequnlock_excl(&mount_lock);
++
+       return err;
+ }
+ 
+@@ -2794,7 +2819,7 @@ long do_mount(const char *dev_name, const char __user 
*dir_name,
+ 
+       flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+                  MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+-                 MS_STRICTATIME | MS_NOREMOTELOCK);
++                 MS_STRICTATIME | MS_NOREMOTELOCK | MS_SUBMOUNT);
+ 
+       if (flags & MS_REMOUNT)
+               retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 5551e8ef67fd..e49d831c4e85 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -226,7 +226,7 @@ static struct vfsmount *nfs_do_clone_mount(struct 
nfs_server *server,
+                                          const char *devname,
+                                          struct nfs_clone_mount *mountdata)
+ {
+-      return vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
++      return vfs_submount(mountdata->dentry, &nfs_xdev_fs_type, devname, 
mountdata);
+ }
+ 
+ /**
+diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
+index d21104912676..d8b040bd9814 100644
+--- a/fs/nfs/nfs4namespace.c
++++ b/fs/nfs/nfs4namespace.c
+@@ -279,7 +279,7 @@ static struct vfsmount *try_location(struct 
nfs_clone_mount *mountdata,
+                               mountdata->hostname,
+                               mountdata->mnt_path);
+ 
+-              mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, 
mountdata);
++              mnt = vfs_submount(mountdata->dentry, &nfs4_referral_fs_type, 
page, mountdata);
+               if (!IS_ERR(mnt))
+                       break;
+       }
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index c48859f16e7b..67c24351a67f 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct 
super_block *sb)
+       return &orangefs_inode->vfs_inode;
+ }
+ 
++static void orangefs_i_callback(struct rcu_head *head)
++{
++      struct inode *inode = container_of(head, struct inode, i_rcu);
++      struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
++      kmem_cache_free(orangefs_inode_cache, orangefs_inode);
++}
++
+ static void orangefs_destroy_inode(struct inode *inode)
+ {
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+@@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode)
+                       "%s: deallocated %p destroying inode %pU\n",
+                       __func__, orangefs_inode, get_khandle_from_ino(inode));
+ 
+-      kmem_cache_free(orangefs_inode_cache, orangefs_inode);
++      call_rcu(&inode->i_rcu, orangefs_i_callback);
+ }
+ 
+ /*
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 06a793f4ae38..5bc7896d122a 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -322,6 +322,21 @@ int propagate_mnt(struct mount *dest_mnt, struct 
mountpoint *dest_mp,
+       return ret;
+ }
+ 
++static struct mount *find_topper(struct mount *mnt)
++{
++      /* If there is exactly one mount covering mnt completely return it. */
++      struct mount *child;
++
++      if (!list_is_singular(&mnt->mnt_mounts))
++              return NULL;
++
++      child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
++      if (child->mnt_mountpoint != mnt->mnt.mnt_root)
++              return NULL;
++
++      return child;
++}
++
+ /*
+  * return true if the refcount is greater than count
+  */
+@@ -342,9 +357,8 @@ static inline int do_refcount_check(struct mount *mnt, int 
count)
+  */
+ int propagate_mount_busy(struct mount *mnt, int refcnt)
+ {
+-      struct mount *m, *child;
++      struct mount *m, *child, *topper;
+       struct mount *parent = mnt->mnt_parent;
+-      int ret = 0;
+ 
+       if (mnt == parent)
+               return do_refcount_check(mnt, refcnt);
+@@ -359,12 +373,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
+ 
+       for (m = propagation_next(parent, parent); m;
+                       m = propagation_next(m, parent)) {
+-              child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
+-              if (child && list_empty(&child->mnt_mounts) &&
+-                  (ret = do_refcount_check(child, 1)))
+-                      break;
++              int count = 1;
++              child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
++              if (!child)
++                      continue;
++
++              /* Is there exactly one mount on the child that covers
++               * it completely whose reference should be ignored?
++               */
++              topper = find_topper(child);
++              if (topper)
++                      count += 1;
++              else if (!list_empty(&child->mnt_mounts))
++                      continue;
++
++              if (do_refcount_check(child, count))
++                      return 1;
+       }
+-      return ret;
++      return 0;
+ }
+ 
+ /*
+@@ -381,7 +407,7 @@ void propagate_mount_unlock(struct mount *mnt)
+ 
+       for (m = propagation_next(parent, parent); m;
+                       m = propagation_next(m, parent)) {
+-              child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
++              child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
+               if (child)
+                       child->mnt.mnt_flags &= ~MNT_LOCKED;
+       }
+@@ -399,9 +425,11 @@ static void mark_umount_candidates(struct mount *mnt)
+ 
+       for (m = propagation_next(parent, parent); m;
+                       m = propagation_next(m, parent)) {
+-              struct mount *child = __lookup_mnt_last(&m->mnt,
++              struct mount *child = __lookup_mnt(&m->mnt,
+                                               mnt->mnt_mountpoint);
+-              if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
++              if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
++                      continue;
++              if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
+                       SET_MNT_MARK(child);
+               }
+       }
+@@ -420,8 +448,8 @@ static void __propagate_umount(struct mount *mnt)
+ 
+       for (m = propagation_next(parent, parent); m;
+                       m = propagation_next(m, parent)) {
+-
+-              struct mount *child = __lookup_mnt_last(&m->mnt,
++              struct mount *topper;
++              struct mount *child = __lookup_mnt(&m->mnt,
+                                               mnt->mnt_mountpoint);
+               /*
+                * umount the child only if the child has no children
+@@ -430,6 +458,15 @@ static void __propagate_umount(struct mount *mnt)
+               if (!child || !IS_MNT_MARKED(child))
+                       continue;
+               CLEAR_MNT_MARK(child);
++
++              /* If there is exactly one mount covering all of child
++               * replace child with that mount.
++               */
++              topper = find_topper(child);
++              if (topper)
++                      mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
++                                            topper);
++
+               if (list_empty(&child->mnt_mounts)) {
+                       list_del_init(&child->mnt_child);
+                       child->mnt.mnt_flags |= MNT_UMOUNT;
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 550f5a8b4fcf..dc87e65becd2 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt, const struct path 
*root);
+ unsigned int mnt_get_count(struct mount *mnt);
+ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+                       struct mount *);
++void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
++                         struct mount *mnt);
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+                        const struct path *root);
+diff --git a/fs/super.c b/fs/super.c
+index 1709ed029a2c..4185844f7a12 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -469,7 +469,7 @@ struct super_block *sget_userns(struct file_system_type 
*type,
+       struct super_block *old;
+       int err;
+ 
+-      if (!(flags & MS_KERNMOUNT) &&
++      if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) &&
+           !(type->fs_flags & FS_USERNS_MOUNT) &&
+           !capable(CAP_SYS_ADMIN))
+               return ERR_PTR(-EPERM);
+@@ -499,7 +499,7 @@ struct super_block *sget_userns(struct file_system_type 
*type,
+       }
+       if (!s) {
+               spin_unlock(&sb_lock);
+-              s = alloc_super(type, flags, user_ns);
++              s = alloc_super(type, (flags & ~MS_SUBMOUNT), user_ns);
+               if (!s)
+                       return ERR_PTR(-ENOMEM);
+               goto retry;
+@@ -540,8 +540,15 @@ struct super_block *sget(struct file_system_type *type,
+ {
+       struct user_namespace *user_ns = current_user_ns();
+ 
++      /* We don't yet pass the user namespace of the parent
++       * mount through to here so always use &init_user_ns
++       * until that changes.
++       */
++      if (flags & MS_SUBMOUNT)
++              user_ns = &init_user_ns;
++
+       /* Ensure the requestor has permissions over the target filesystem */
+-      if (!(flags & MS_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
++      if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) && !ns_capable(user_ns, 
CAP_SYS_ADMIN))
+               return ERR_PTR(-EPERM);
+ 
+       return sget_userns(type, test, set, flags, user_ns, data);
+diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
+index 9a9041784dcf..412906609954 100644
+--- a/include/linux/ceph/osdmap.h
++++ b/include/linux/ceph/osdmap.h
+@@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct 
ceph_pg_pool_info *pool)
+       case CEPH_POOL_TYPE_EC:
+               return false;
+       default:
+-              BUG_ON(1);
++              BUG();
+       }
+ }
+ 
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index 014cc564d1c4..233006be30aa 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -97,9 +97,10 @@ struct dentry *debugfs_create_dir(const char *name, struct 
dentry *parent);
+ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
+                                     const char *dest);
+ 
++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+ struct dentry *debugfs_create_automount(const char *name,
+                                       struct dentry *parent,
+-                                      struct vfsmount *(*f)(void *),
++                                      debugfs_automount_t f,
+                                       void *data);
+ 
+ void debugfs_remove(struct dentry *dentry);
+diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
+index 8458c5351e56..77e7af32543f 100644
+--- a/include/linux/libnvdimm.h
++++ b/include/linux/libnvdimm.h
+@@ -70,6 +70,8 @@ struct nd_cmd_desc {
+ 
+ struct nd_interleave_set {
+       u64 cookie;
++      /* compatibility with initial buggy Linux implementation */
++      u64 altcookie;
+ };
+ 
+ struct nd_mapping_desc {
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index c15373894a42..b37dee3acaba 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct 
svc_rqst *rqstp)
+ static inline int nlm_compare_locks(const struct file_lock *fl1,
+                                   const struct file_lock *fl2)
+ {
+-      return  fl1->fl_pid   == fl2->fl_pid
++      return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
++           && fl1->fl_pid   == fl2->fl_pid
+            && fl1->fl_owner == fl2->fl_owner
+            && fl1->fl_start == fl2->fl_start
+            && fl1->fl_end   == fl2->fl_end
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index c6f55158d5e5..8e0352af06b7 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -90,6 +90,9 @@ struct file_system_type;
+ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+                                     int flags, const char *name,
+                                     void *data);
++extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
++                                   struct file_system_type *type,
++                                   const char *name, void *data);
+ 
+ extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head 
*expiry_list);
+ extern void mark_mounts_for_expiry(struct list_head *mounts);
+diff --git a/include/target/target_core_base.h 
b/include/target/target_core_base.h
+index da854fb4530f..775c2319a72b 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -732,6 +732,7 @@ struct se_lun {
+       struct config_group     lun_group;
+       struct se_port_stat_grps port_stat_grps;
+       struct completion       lun_ref_comp;
++      struct completion       lun_shutdown_comp;
+       struct percpu_ref       lun_ref;
+       struct list_head        lun_dev_link;
+       struct hlist_node       link;
+diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
+index 36da93fbf188..048a85e9f017 100644
+--- a/include/uapi/linux/fs.h
++++ b/include/uapi/linux/fs.h
+@@ -132,6 +132,7 @@ struct inodes_stat_t {
+ #define MS_LAZYTIME   (1<<25) /* Update the on-disk [acm]times lazily */
+ 
+ /* These sb flags are internal to the kernel */
++#define MS_SUBMOUNT     (1<<26)
+ #define MS_NOREMOTELOCK       (1<<27)
+ #define MS_NOSEC      (1<<28)
+ #define MS_BORN               (1<<29)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d7449783987a..310f0ea0d1a2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7503,7 +7503,7 @@ init_tracer_tracefs(struct trace_array *tr, struct 
dentry *d_tracer)
+       ftrace_init_tracefs(tr, d_tracer);
+ }
+ 
+-static struct vfsmount *trace_automount(void *ingore)
++static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
+ {
+       struct vfsmount *mnt;
+       struct file_system_type *type;
+@@ -7516,7 +7516,7 @@ static struct vfsmount *trace_automount(void *ingore)
+       type = get_fs_type("tracefs");
+       if (!type)
+               return NULL;
+-      mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
++      mnt = vfs_submount(mntpt, type, "tracefs", NULL);
+       put_filesystem(type);
+       if (IS_ERR(mnt))
+               return NULL;
+diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
+index e3b488825ae3..e49fbe901cfc 100644
+--- a/kernel/trace/trace_benchmark.c
++++ b/kernel/trace/trace_benchmark.c
+@@ -175,9 +175,9 @@ int trace_benchmark_reg(void)
+ 
+       bm_event_thread = kthread_run(benchmark_event_kthread,
+                                     NULL, "event_benchmark");
+-      if (!bm_event_thread) {
++      if (IS_ERR(bm_event_thread)) {
+               pr_warning("trace benchmark failed to create kernel thread\n");
+-              return -ENOMEM;
++              return PTR_ERR(bm_event_thread);
+       }
+ 
+       return 0;
+diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
+index dae929c02bbb..872e1981f63b 100644
+--- a/mm/kasan/quarantine.c
++++ b/mm/kasan/quarantine.c
+@@ -282,8 +282,15 @@ void quarantine_remove_cache(struct kmem_cache *cache)
+       on_each_cpu(per_cpu_remove_cache, cache, 1);
+ 
+       spin_lock_irqsave(&quarantine_lock, flags);
+-      for (i = 0; i < QUARANTINE_BATCHES; i++)
++      for (i = 0; i < QUARANTINE_BATCHES; i++) {
++              if (qlist_empty(&global_quarantine[i]))
++                      continue;
+               qlist_move_cache(&global_quarantine[i], &to_free, cache);
++              /* Scanning whole quarantine can take a while. */
++              spin_unlock_irqrestore(&quarantine_lock, flags);
++              cond_resched();
++              spin_lock_irqsave(&quarantine_lock, flags);
++      }
+       spin_unlock_irqrestore(&quarantine_lock, flags);
+ 
+       qlist_free_all(&to_free, cache);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index b822e158b319..86c1100bc69e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4132,17 +4132,22 @@ static void free_mem_cgroup_per_node_info(struct 
mem_cgroup *memcg, int node)
+       kfree(memcg->nodeinfo[node]);
+ }
+ 
+-static void mem_cgroup_free(struct mem_cgroup *memcg)
++static void __mem_cgroup_free(struct mem_cgroup *memcg)
+ {
+       int node;
+ 
+-      memcg_wb_domain_exit(memcg);
+       for_each_node(node)
+               free_mem_cgroup_per_node_info(memcg, node);
+       free_percpu(memcg->stat);
+       kfree(memcg);
+ }
+ 
++static void mem_cgroup_free(struct mem_cgroup *memcg)
++{
++      memcg_wb_domain_exit(memcg);
++      __mem_cgroup_free(memcg);
++}
++
+ static struct mem_cgroup *mem_cgroup_alloc(void)
+ {
+       struct mem_cgroup *memcg;
+@@ -4193,7 +4198,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ fail:
+       if (memcg->id.id > 0)
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+-      mem_cgroup_free(memcg);
++      __mem_cgroup_free(memcg);
+       return NULL;
+ }
+ 
+diff --git a/mm/mlock.c b/mm/mlock.c
+index cdbed8aaa426..665ab75b5533 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -441,7 +441,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
+ 
+       while (start < end) {
+               struct page *page;
+-              unsigned int page_mask;
++              unsigned int page_mask = 0;
+               unsigned long page_increm;
+               struct pagevec pvec;
+               struct zone *zone;
+@@ -455,8 +455,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
+                * suits munlock very well (and if somehow an abnormal page
+                * has sneaked into the range, we won't oops here: great).
+                */
+-              page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
+-                              &page_mask);
++              page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
+ 
+               if (page && !IS_ERR(page)) {
+                       if (PageTransTail(page)) {
+@@ -467,8 +466,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
+                               /*
+                                * Any THP page found by follow_page_mask() may
+                                * have gotten split before reaching
+-                               * munlock_vma_page(), so we need to recompute
+-                               * the page_mask here.
++                               * munlock_vma_page(), so we need to compute
++                               * the page_mask here instead.
+                                */
+                               page_mask = munlock_vma_page(page);
+                               unlock_page(page);
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 3b5fd4188f2a..58ad23a44109 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -398,6 +398,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+       tid_agg_rx->timeout = timeout;
+       tid_agg_rx->stored_mpdu_num = 0;
+       tid_agg_rx->auto_seq = auto_seq;
++      tid_agg_rx->started = false;
+       tid_agg_rx->reorder_buf_filtered = 0;
+       status = WLAN_STATUS_SUCCESS;
+ 
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index 28a3a0957c9e..76a8bcd8ef11 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct 
cfg80211_wowlan *wowlan)
+                       break;
+               }
+ 
++              flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+               drv_remove_interface(local, sdata);
+       }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 3090dd4342f6..1109e60e9121 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4,7 +4,7 @@
+  * Copyright 2006-2007        Jiri Benc <[email protected]>
+  * Copyright 2007-2010        Johannes Berg <[email protected]>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
++ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -1034,6 +1034,18 @@ static bool ieee80211_sta_manage_reorder_buf(struct 
ieee80211_sub_if_data *sdata
+       buf_size = tid_agg_rx->buf_size;
+       head_seq_num = tid_agg_rx->head_seq_num;
+ 
++      /*
++       * If the current MPDU's SN is smaller than the SSN, it shouldn't
++       * be reordered.
++       */
++      if (unlikely(!tid_agg_rx->started)) {
++              if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
++                      ret = false;
++                      goto out;
++              }
++              tid_agg_rx->started = true;
++      }
++
+       /* frame with out of date sequence number */
+       if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
+               dev_kfree_skb(skb);
+@@ -4077,15 +4089,17 @@ static void __ieee80211_rx_handle_packet(struct 
ieee80211_hw *hw,
+                    ieee80211_is_beacon(hdr->frame_control)))
+               ieee80211_scan_rx(local, skb);
+ 
+-      if (pubsta) {
+-              rx.sta = container_of(pubsta, struct sta_info, sta);
+-              rx.sdata = rx.sta->sdata;
+-              if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+-                      return;
+-              goto out;
+-      } else if (ieee80211_is_data(fc)) {
++      if (ieee80211_is_data(fc)) {
+               struct sta_info *sta, *prev_sta;
+ 
++              if (pubsta) {
++                      rx.sta = container_of(pubsta, struct sta_info, sta);
++                      rx.sdata = rx.sta->sdata;
++                      if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
++                              return;
++                      goto out;
++              }
++
+               prev_sta = NULL;
+ 
+               for_each_sta_info(local, hdr->addr2, sta, tmp) {
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index dd06ef0b8861..15599c70a38f 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -189,6 +189,7 @@ struct tid_ampdu_tx {
+  * @auto_seq: used for offloaded BA sessions to automatically pick 
head_seq_and
+  *    and ssn.
+  * @removed: this session is removed (but might have been found due to RCU)
++ * @started: this session has started (head ssn or higher was received)
+  *
+  * This structure's lifetime is managed by RCU, assignments to
+  * the array holding it must hold the aggregation mutex.
+@@ -212,8 +213,9 @@ struct tid_ampdu_rx {
+       u16 ssn;
+       u16 buf_size;
+       u16 timeout;
+-      bool auto_seq;
+-      bool removed;
++      u8 auto_seq:1,
++         removed:1,
++         started:1;
+ };
+ 
+ /**
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index ddf71c648cab..ad37b4e58c2f 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -51,7 +51,8 @@ static void ieee80211_handle_filtered_frame(struct 
ieee80211_local *local,
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       int ac;
+ 
+-      if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
++      if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
++                         IEEE80211_TX_CTL_AMPDU)) {
+               ieee80211_free_txskb(&local->hw, skb);
+               return;
+       }
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index be93ab02b490..33f3337019ee 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2629,7 +2629,7 @@ sub do_run_test {
+     }
+ 
+     waitpid $child_pid, 0;
+-    $child_exit = $?;
++    $child_exit = $? >> 8;
+ 
+     my $end_time = time;
+     $test_time = $end_time - $start_time;

Reply via email to