commit:     0f23b605fb69e470f285ce960cb4fd7e0492050e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug  6 19:34:27 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug  6 19:34:27 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f23b605

Linux patch 4.12.5

 0000_README             |   4 +
 1004_linux-4.12.5.patch | 997 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1001 insertions(+)

diff --git a/0000_README b/0000_README
index 09d6e6c..29e1ca2 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-4.12.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.12.4
 
+Patch:  1004_linux-4.12.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.12.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-4.12.5.patch b/1004_linux-4.12.5.patch
new file mode 100644
index 0000000..0b6a672
--- /dev/null
+++ b/1004_linux-4.12.5.patch
@@ -0,0 +1,997 @@
+diff --git a/Makefile b/Makefile
+index bfdc92c2e47a..382e967b0792 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index c32a09095216..85a92db70afc 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long 
vaddr,
+         before it can be accessed through the kernel mapping. */
+       preempt_disable();
+       flush_dcache_page_asm(__pa(vfrom), vaddr);
+-      preempt_enable();
+       copy_page_asm(vto, vfrom);
++      preempt_enable();
+ }
+ EXPORT_SYMBOL(copy_user_page);
+ 
+@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
+       struct vm_area_struct *vma;
+       pgd_t *pgd;
+ 
++      /* Flush the TLB to avoid speculation if coherency is required. */
++      if (parisc_requires_coherency())
++              flush_tlb_all();
++
+       /* Flushing the whole cache on each cpu takes forever on
+          rp3440, etc.  So, avoid it if the mm isn't too big.  */
+       if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm)
+ void flush_cache_range(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end)
+ {
+-      unsigned long addr;
+-      pgd_t *pgd;
+-
+       BUG_ON(!vma->vm_mm->context);
+ 
++      /* Flush the TLB to avoid speculation if coherency is required. */
++      if (parisc_requires_coherency())
++              flush_tlb_range(vma, start, end);
++
+       if ((end - start) >= parisc_cache_flush_threshold) {
+               flush_cache_all();
+               return;
+       }
+ 
+-      if (vma->vm_mm->context == mfsp(3)) {
+-              flush_user_dcache_range_asm(start, end);
+-              if (vma->vm_flags & VM_EXEC)
+-                      flush_user_icache_range_asm(start, end);
+-              return;
+-      }
++      BUG_ON(vma->vm_mm->context != mfsp(3));
+ 
+-      pgd = vma->vm_mm->pgd;
+-      for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+-              unsigned long pfn;
+-              pte_t *ptep = get_ptep(pgd, addr);
+-              if (!ptep)
+-                      continue;
+-              pfn = pte_pfn(*ptep);
+-              if (pfn_valid(pfn))
+-                      __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+-      }
++      flush_user_dcache_range_asm(start, end);
++      if (vma->vm_flags & VM_EXEC)
++              flush_user_icache_range_asm(start, end);
+ }
+ 
+ void
+@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long 
vmaddr, unsigned long
+       BUG_ON(!vma->vm_mm->context);
+ 
+       if (pfn_valid(pfn)) {
+-              flush_tlb_page(vma, vmaddr);
++              if (parisc_requires_coherency())
++                      flush_tlb_page(vma, vmaddr);
+               __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+       }
+ }
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index b64d7d21646e..a45a67d526f8 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -53,6 +53,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/rcupdate.h>
+ #include <linux/random.h>
++#include <linux/nmi.h>
+ 
+ #include <asm/io.h>
+ #include <asm/asm-offsets.h>
+@@ -145,6 +146,7 @@ void machine_power_off(void)
+ 
+       /* prevent soft lockup/stalled CPU messages for endless loop. */
+       rcu_sysrq_start();
++      lockup_detector_suspend();
+       for (;;);
+ }
+ 
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+index 710e491206ed..1c10e26cebbb 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
+               goto out;
+       }
+ 
+-      if (kvm->arch.hpt.virt)
++      if (kvm->arch.hpt.virt) {
+               kvmppc_free_hpt(&kvm->arch.hpt);
++              kvmppc_rmap_reset(kvm);
++      }
+ 
+       err = kvmppc_allocate_hpt(&info, order);
+       if (err < 0)
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 8d1a365b8edc..1d3602f7ec22 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2938,6 +2938,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, 
struct kvm_vcpu *vcpu)
+                       run->fail_entry.hardware_entry_failure_reason = 0;
+                       return -EINVAL;
+               }
++              /* Enable TM so we can read the TM SPRs */
++              mtmsr(mfmsr() | MSR_TM);
+               current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+               current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+               current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+diff --git a/arch/powerpc/platforms/pseries/reconfig.c 
b/arch/powerpc/platforms/pseries/reconfig.c
+index e5bf1e84047f..011ef2180fe6 100644
+--- a/arch/powerpc/platforms/pseries/reconfig.c
++++ b/arch/powerpc/platforms/pseries/reconfig.c
+@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node 
*np)
+ 
+       of_detach_node(np);
+       of_node_put(parent);
+-      of_node_put(np); /* Must decrement the refcount */
+       return 0;
+ }
+ 
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c
+index 6f8f6b86bfe2..0cf5fefdb859 100644
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct 
aead_request *req,
+       u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+       u32 tmp[2];
+ 
++      if (!authsize)
++              goto decrypt;
++
+       /* Move high-order bits of sequence number back. */
+       scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+       scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct 
aead_request *req,
+       if (crypto_memneq(ihash, ohash, authsize))
+               return -EBADMSG;
+ 
++decrypt:
++
+       sg_init_table(areq_ctx->dst, 2);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
+ 
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c 
b/drivers/char/ipmi/ipmi_watchdog.c
+index d165af8abe36..4161d9961a24 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -1163,10 +1163,11 @@ static int wdog_reboot_handler(struct notifier_block 
*this,
+                       ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+                       ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+               } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+-                      /* Set a long timer to let the reboot happens, but
+-                         reboot if it hangs, but only if the watchdog
++                      /* Set a long timer to let the reboot happen or
++                         reset if it hangs, but only if the watchdog
+                          timer was already running. */
+-                      timeout = 120;
++                      if (timeout < 120)
++                              timeout = 120;
+                       pretimeout = 0;
+                       ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+                       ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index fb1e60f5002e..778fc1bcccee 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -629,7 +629,7 @@ source "drivers/crypto/virtio/Kconfig"
+ config CRYPTO_DEV_BCM_SPU
+       tristate "Broadcom symmetric crypto/hash acceleration support"
+       depends on ARCH_BCM_IPROC
+-      depends on BCM_PDC_MBOX
++      depends on MAILBOX
+       default m
+       select CRYPTO_DES
+       select CRYPTO_MD5
+diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
+index ef04c9748317..bf7ac621c591 100644
+--- a/drivers/crypto/bcm/spu2.c
++++ b/drivers/crypto/bcm/spu2.c
+@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode 
hash_mode,
+               break;
+       case HASH_ALG_SHA3_512:
+               *spu2_type = SPU2_HASH_TYPE_SHA3_512;
++              break;
+       case HASH_ALG_LAST:
+       default:
+               err = -EINVAL;
+diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
+index 9106ea32b048..881df8843e66 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9085,6 +9085,13 @@ static bool haswell_get_pipe_config(struct intel_crtc 
*crtc,
+       u64 power_domain_mask;
+       bool active;
+ 
++      if (INTEL_GEN(dev_priv) >= 9) {
++              intel_crtc_init_scalers(crtc, pipe_config);
++
++              pipe_config->scaler_state.scaler_id = -1;
++              pipe_config->scaler_state.scaler_users &= ~(1 << 
SKL_CRTC_INDEX);
++      }
++
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+@@ -9113,13 +9120,6 @@ static bool haswell_get_pipe_config(struct intel_crtc 
*crtc,
+       pipe_config->gamma_mode =
+               I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+ 
+-      if (INTEL_GEN(dev_priv) >= 9) {
+-              intel_crtc_init_scalers(crtc, pipe_config);
+-
+-              pipe_config->scaler_state.scaler_id = -1;
+-              pipe_config->scaler_state.scaler_users &= ~(1 << 
SKL_CRTC_INDEX);
+-      }
+-
+       power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+       if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+               power_domain_mask |= BIT_ULL(power_domain);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+index 1e1de6bfe85a..5893be9788d3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+@@ -27,7 +27,7 @@ struct nv50_disp {
+               u8 type[3];
+       } pior;
+ 
+-      struct nv50_disp_chan *chan[17];
++      struct nv50_disp_chan *chan[21];
+ };
+ 
+ int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+index c794b2c2d21e..6d8f21290aa2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
+ 
+       if (bar->bar[0].mem) {
+               addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+-              nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
++              nvkm_wr32(device, 0x001714, 0x80000000 | addr);
+       }
+ 
+       return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index c7b53d987f06..fefb9d995d2c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+                          struct vmw_sw_context *sw_context,
+                          SVGA3dCmdHeader *header)
+ {
+-      return capable(CAP_SYS_ADMIN) ? : -EINVAL;
++      return -EINVAL;
+ }
+ 
+ static int vmw_cmd_ok(struct vmw_private *dev_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 50be1f034f9e..5284e8d2f7ba 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private 
*dev_priv)
+                * something arbitrarily large and we will reject any layout
+                * that doesn't fit prim_bb_mem later
+                */
+-              dev->mode_config.max_width = 16384;
+-              dev->mode_config.max_height = 16384;
++              dev->mode_config.max_width = 8192;
++              dev->mode_config.max_height = 8192;
+       }
+ 
+       vmw_kms_create_implicit_placement_property(dev_priv, false);
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index 9b856e1890d1..e4c43a17b333 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
+                       if (arg) {
+                               if (copy_from_user(bname, argp, sizeof(bname) - 
1))
+                                       return -EFAULT;
++                              bname[sizeof(bname)-1] = 0;
+                       } else
+                               return -EINVAL;
+                       ret = mutex_lock_interruptible(&dev->mtx);
+diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
+index c151c6daa67e..f63a110b7bcb 100644
+--- a/drivers/isdn/i4l/isdn_net.c
++++ b/drivers/isdn/i4l/isdn_net.c
+@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
+       char newname[10];
+ 
+       if (p) {
+-              /* Slave-Name MUST not be empty */
+-              if (!strlen(p + 1))
++              /* Slave-Name MUST not be empty or overflow 'newname' */
++              if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
+                       return NULL;
+-              strcpy(newname, p + 1);
+               *p = 0;
+               /* Master must already exist */
+               if (!(n = isdn_net_findif(parm)))
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 93b181088168..b68e21c25a17 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1587,16 +1587,18 @@ static void dm_integrity_map_continue(struct 
dm_integrity_io *dio, bool from_map
+       if (likely(ic->mode == 'J')) {
+               if (dio->write) {
+                       unsigned next_entry, i, pos;
+-                      unsigned ws, we;
++                      unsigned ws, we, range_sectors;
+ 
+-                      dio->range.n_sectors = min(dio->range.n_sectors, 
ic->free_sectors);
++                      dio->range.n_sectors = min(dio->range.n_sectors,
++                                                 ic->free_sectors << 
ic->sb->log2_sectors_per_block);
+                       if (unlikely(!dio->range.n_sectors))
+                               goto sleep;
+-                      ic->free_sectors -= dio->range.n_sectors;
++                      range_sectors = dio->range.n_sectors >> 
ic->sb->log2_sectors_per_block;
++                      ic->free_sectors -= range_sectors;
+                       journal_section = ic->free_section;
+                       journal_entry = ic->free_section_entry;
+ 
+-                      next_entry = ic->free_section_entry + 
dio->range.n_sectors;
++                      next_entry = ic->free_section_entry + range_sectors;
+                       ic->free_section_entry = next_entry % 
ic->journal_section_entries;
+                       ic->free_section += next_entry / 
ic->journal_section_entries;
+                       ic->n_uncommitted_sections += next_entry / 
ic->journal_section_entries;
+@@ -3019,6 +3021,11 @@ static int dm_integrity_ctr(struct dm_target *ti, 
unsigned argc, char **argv)
+               ti->error = "Block size doesn't match the information in 
superblock";
+               goto bad;
+       }
++      if (!le32_to_cpu(ic->sb->journal_sections)) {
++              r = -EINVAL;
++              ti->error = "Corrupted superblock, journal_sections is 0";
++              goto bad;
++      }
+       /* make sure that ti->max_io_len doesn't overflow */
+       if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
+           ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 63d342d560b8..33611a91b1d9 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev 
*mddev, struct bio *bio
+ 
+ /* for managing resync I/O pages */
+ struct resync_pages {
+-      unsigned        idx;    /* for get/put page from the pool */
+       void            *raid_bio;
+       struct page     *pages[RESYNC_PAGES];
+ };
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7866563338fa..5de4b3d04eb5 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+                       resync_get_all_pages(rp);
+               }
+ 
+-              rp->idx = 0;
+               rp->raid_bio = r1_bio;
+               bio->bi_private = rp;
+       }
+@@ -492,10 +491,6 @@ static void raid1_end_write_request(struct bio *bio)
+       }
+ 
+       if (behind) {
+-              /* we release behind master bio when all write are done */
+-              if (r1_bio->behind_master_bio == bio)
+-                      to_put = NULL;
+-
+               if (test_bit(WriteMostly, &rdev->flags))
+                       atomic_dec(&r1_bio->behind_remaining);
+ 
+@@ -1088,7 +1083,7 @@ static void unfreeze_array(struct r1conf *conf)
+       wake_up(&conf->wait_barrier);
+ }
+ 
+-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
++static void alloc_behind_master_bio(struct r1bio *r1_bio,
+                                          struct bio *bio)
+ {
+       int size = bio->bi_iter.bi_size;
+@@ -1098,11 +1093,13 @@ static struct bio *alloc_behind_master_bio(struct 
r1bio *r1_bio,
+ 
+       behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
+       if (!behind_bio)
+-              goto fail;
++              return;
+ 
+       /* discard op, we don't support writezero/writesame yet */
+-      if (!bio_has_data(bio))
++      if (!bio_has_data(bio)) {
++              behind_bio->bi_iter.bi_size = size;
+               goto skip_copy;
++      }
+ 
+       while (i < vcnt && size) {
+               struct page *page;
+@@ -1123,14 +1120,13 @@ static struct bio *alloc_behind_master_bio(struct 
r1bio *r1_bio,
+       r1_bio->behind_master_bio = behind_bio;;
+       set_bit(R1BIO_BehindIO, &r1_bio->state);
+ 
+-      return behind_bio;
++      return;
+ 
+ free_pages:
+       pr_debug("%dB behind alloc failed, doing sync I/O\n",
+                bio->bi_iter.bi_size);
+       bio_free_pages(behind_bio);
+-fail:
+-      return behind_bio;
++      bio_put(behind_bio);
+ }
+ 
+ struct raid1_plug_cb {
+@@ -1483,7 +1479,7 @@ static void raid1_write_request(struct mddev *mddev, 
struct bio *bio,
+                           (atomic_read(&bitmap->behind_writes)
+                            < mddev->bitmap_info.max_write_behind) &&
+                           !waitqueue_active(&bitmap->behind_wait)) {
+-                              mbio = alloc_behind_master_bio(r1_bio, bio);
++                              alloc_behind_master_bio(r1_bio, bio);
+                       }
+ 
+                       bitmap_startwrite(bitmap, r1_bio->sector,
+@@ -1493,14 +1489,11 @@ static void raid1_write_request(struct mddev *mddev, 
struct bio *bio,
+                       first_clone = 0;
+               }
+ 
+-              if (!mbio) {
+-                      if (r1_bio->behind_master_bio)
+-                              mbio = bio_clone_fast(r1_bio->behind_master_bio,
+-                                                    GFP_NOIO,
+-                                                    mddev->bio_set);
+-                      else
+-                              mbio = bio_clone_fast(bio, GFP_NOIO, 
mddev->bio_set);
+-              }
++              if (r1_bio->behind_master_bio)
++                      mbio = bio_clone_fast(r1_bio->behind_master_bio,
++                                            GFP_NOIO, mddev->bio_set);
++              else
++                      mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+ 
+               if (r1_bio->behind_master_bio) {
+                       if (test_bit(WriteMostly, 
&conf->mirrors[i].rdev->flags))
+@@ -2368,8 +2361,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int 
i)
+                       wbio = bio_clone_fast(r1_bio->behind_master_bio,
+                                             GFP_NOIO,
+                                             mddev->bio_set);
+-                      /* We really need a _all clone */
+-                      wbio->bi_iter = (struct bvec_iter){ 0 };
+               } else {
+                       wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
+                                             mddev->bio_set);
+@@ -2621,6 +2612,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+       int good_sectors = RESYNC_SECTORS;
+       int min_bad = 0; /* number of sectors that are bad in all devices */
+       int idx = sector_to_idx(sector_nr);
++      int page_idx = 0;
+ 
+       if (!conf->r1buf_pool)
+               if (init_resync(conf))
+@@ -2848,7 +2840,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                       bio = r1_bio->bios[i];
+                       rp = get_resync_pages(bio);
+                       if (bio->bi_end_io) {
+-                              page = resync_fetch_page(rp, rp->idx++);
++                              page = resync_fetch_page(rp, page_idx);
+ 
+                               /*
+                                * won't fail because the vec table is big
+@@ -2860,7 +2852,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+               nr_sectors += len>>9;
+               sector_nr += len>>9;
+               sync_blocks -= (len>>9);
+-      } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < 
RESYNC_PAGES);
++      } while (++page_idx < RESYNC_PAGES);
+ 
+       r1_bio->sectors = nr_sectors;
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 52acffa7a06a..bfc6db236348 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void 
*data)
+                       resync_get_all_pages(rp);
+               }
+ 
+-              rp->idx = 0;
+               rp->raid_bio = r10_bio;
+               bio->bi_private = rp;
+               if (rbio) {
+@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+       sector_t sectors_skipped = 0;
+       int chunks_skipped = 0;
+       sector_t chunk_mask = conf->geo.chunk_mask;
++      int page_idx = 0;
+ 
+       if (!conf->r10buf_pool)
+               if (init_resync(conf))
+@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                       break;
+               for (bio= biolist ; bio ; bio=bio->bi_next) {
+                       struct resync_pages *rp = get_resync_pages(bio);
+-                      page = resync_fetch_page(rp, rp->idx++);
++                      page = resync_fetch_page(rp, page_idx);
+                       /*
+                        * won't fail because the vec table is big enough
+                        * to hold all these pages
+@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+               }
+               nr_sectors += len>>9;
+               sector_nr += len>>9;
+-      } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
++      } while (++page_idx < RESYNC_PAGES);
+       r10_bio->sectors = nr_sectors;
+ 
+       while (biolist) {
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index d524daddc630..e92dd2dc4b5a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6237,6 +6237,8 @@ static void raid5_do_work(struct work_struct *work)
+       pr_debug("%d stripes handled\n", handled);
+ 
+       spin_unlock_irq(&conf->device_lock);
++
++      async_tx_issue_pending_all();
+       blk_finish_plug(&plug);
+ 
+       pr_debug("--- raid5worker inactive\n");
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index d6fa2214aaae..0fb4e4c119e1 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host 
*host,
+       }
+       mmc_writel(host, REG_CLKCR, rval);
+ 
+-      if (host->cfg->needs_new_timings)
+-              mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
++      if (host->cfg->needs_new_timings) {
++              /* Don't touch the delay bits */
++              rval = mmc_readl(host, REG_SD_NTSR);
++              rval |= SDXC_2X_TIMING_MODE;
++              mmc_writel(host, REG_SD_NTSR, rval);
++      }
+ 
+       ret = sunxi_mmc_clk_set_phase(host, ios, rate);
+       if (ret)
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index a2d92f10501b..a3d20e39e5b5 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -404,30 +404,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host 
*host,
+        * Transfer the data
+        */
+       if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
+-              u8 data[4] = { };
++              u32 data = 0;
++              u32 *buf32 = (u32 *)buf;
+ 
+               if (is_read)
+-                      sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
++                      sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
+                                          count >> 2);
+               else
+-                      sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
++                      sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
+                                           count >> 2);
+ 
+               /* if count was multiple of 4 */
+               if (!(count & 0x3))
+                       return;
+ 
+-              buf8 = (u8 *)(buf + (count >> 2));
++              buf32 += count >> 2;
+               count %= 4;
+ 
+               if (is_read) {
+-                      sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
+-                                         (u32 *)data, 1);
+-                      memcpy(buf8, data, count);
++                      sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
++                      memcpy(buf32, &data, count);
+               } else {
+-                      memcpy(data, buf8, count);
+-                      sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
+-                                          (u32 *)data, 1);
++                      memcpy(&data, buf32, count);
++                      sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
+               }
+ 
+               return;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 1161390f4935..736754c5ab63 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry 
*dentry)
+       return dentry->d_name.name != dentry->d_iname;
+ }
+ 
++void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry 
*dentry)
++{
++      spin_lock(&dentry->d_lock);
++      if (unlikely(dname_external(dentry))) {
++              struct external_name *p = external_name(dentry);
++              atomic_inc(&p->u.count);
++              spin_unlock(&dentry->d_lock);
++              name->name = p->name;
++      } else {
++              memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
++              spin_unlock(&dentry->d_lock);
++              name->name = name->inline_name;
++      }
++}
++EXPORT_SYMBOL(take_dentry_name_snapshot);
++
++void release_dentry_name_snapshot(struct name_snapshot *name)
++{
++      if (unlikely(name->name != name->inline_name)) {
++              struct external_name *p;
++              p = container_of(name->name, struct external_name, name[0]);
++              if (unlikely(atomic_dec_and_test(&p->u.count)))
++                      kfree_rcu(p, u.head);
++      }
++}
++EXPORT_SYMBOL(release_dentry_name_snapshot);
++
+ static inline void __d_set_inode_and_type(struct dentry *dentry,
+                                         struct inode *inode,
+                                         unsigned type_flags)
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index e892ae7d89f8..acd3be2cc691 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, 
struct dentry *old_dentry,
+ {
+       int error;
+       struct dentry *dentry = NULL, *trap;
+-      const char *old_name;
++      struct name_snapshot old_name;
+ 
+       trap = lock_rename(new_dir, old_dir);
+       /* Source or destination directories don't exist? */
+@@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, 
struct dentry *old_dentry,
+       if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
+               goto exit;
+ 
+-      old_name = fsnotify_oldname_init(old_dentry->d_name.name);
++      take_dentry_name_snapshot(&old_name, old_dentry);
+ 
+       error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
+                             dentry, 0);
+       if (error) {
+-              fsnotify_oldname_free(old_name);
++              release_dentry_name_snapshot(&old_name);
+               goto exit;
+       }
+       d_move(old_dentry, dentry);
+-      fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
++      fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
+               d_is_dir(old_dentry),
+               NULL, old_dentry);
+-      fsnotify_oldname_free(old_name);
++      release_dentry_name_snapshot(&old_name);
+       unlock_rename(new_dir, old_dir);
+       dput(dentry);
+       return old_dentry;
+diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
+index 7bc186f4ed4d..1be45c8d460d 100644
+--- a/fs/jfs/acl.c
++++ b/fs/jfs/acl.c
+@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, 
int type,
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
+-              if (acl) {
+-                      rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-                      if (rc)
+-                              return rc;
+-                      inode->i_ctime = current_time(inode);
+-                      mark_inode_dirty(inode);
+-              }
+               break;
+       case ACL_TYPE_DEFAULT:
+               ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
+@@ -118,9 +111,17 @@ int jfs_set_acl(struct inode *inode, struct posix_acl 
*acl, int type)
+ 
+       tid = txBegin(inode->i_sb, 0);
+       mutex_lock(&JFS_IP(inode)->commit_mutex);
++      if (type == ACL_TYPE_ACCESS && acl) {
++              rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++              if (rc)
++                      goto end_tx;
++              inode->i_ctime = current_time(inode);
++              mark_inode_dirty(inode);
++      }
+       rc = __jfs_set_acl(tid, inode, type, acl);
+       if (!rc)
+               rc = txCommit(tid, 1, &inode, 0);
++end_tx:
+       txEnd(tid);
+       mutex_unlock(&JFS_IP(inode)->commit_mutex);
+       return rc;
+diff --git a/fs/namei.c b/fs/namei.c
+index 6571a5f5112e..281c1f7fa983 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -4362,11 +4362,11 @@ int vfs_rename(struct inode *old_dir, struct dentry 
*old_dentry,
+ {
+       int error;
+       bool is_dir = d_is_dir(old_dentry);
+-      const unsigned char *old_name;
+       struct inode *source = old_dentry->d_inode;
+       struct inode *target = new_dentry->d_inode;
+       bool new_is_dir = false;
+       unsigned max_links = new_dir->i_sb->s_max_links;
++      struct name_snapshot old_name;
+ 
+       if (source == target)
+               return 0;
+@@ -4413,7 +4413,7 @@ int vfs_rename(struct inode *old_dir, struct dentry 
*old_dentry,
+       if (error)
+               return error;
+ 
+-      old_name = fsnotify_oldname_init(old_dentry->d_name.name);
++      take_dentry_name_snapshot(&old_name, old_dentry);
+       dget(new_dentry);
+       if (!is_dir || (flags & RENAME_EXCHANGE))
+               lock_two_nondirectories(source, target);
+@@ -4468,14 +4468,14 @@ int vfs_rename(struct inode *old_dir, struct dentry 
*old_dentry,
+               inode_unlock(target);
+       dput(new_dentry);
+       if (!error) {
+-              fsnotify_move(old_dir, new_dir, old_name, is_dir,
++              fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
+                             !(flags & RENAME_EXCHANGE) ? target : NULL, 
old_dentry);
+               if (flags & RENAME_EXCHANGE) {
+                       fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
+                                     new_is_dir, NULL, new_dentry);
+               }
+       }
+-      fsnotify_oldname_free(old_name);
++      release_dentry_name_snapshot(&old_name);
+ 
+       return error;
+ }
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 5713eb32a45e..d264363559db 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -750,7 +750,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, 
int is_local)
+        */
+       nfs_sync_mapping(filp->f_mapping);
+       if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+-              nfs_zap_mapping(inode, filp->f_mapping);
++              nfs_zap_caches(inode);
+ out:
+       return status;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index dbfa18900e25..f5a7faac39a7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6441,7 +6441,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, 
struct file_lock *request)
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_irqrestore(&q->lock, flags);
+ 
+-              freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
++              freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
+       }
+ 
+       finish_wait(q, &wait);
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 01a9f0f007d4..0c4583b61717 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct 
dentry *dentry, __u32 mask
+       if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+               __fsnotify_update_child_dentry_flags(p_inode);
+       else if (p_inode->i_fsnotify_mask & mask) {
++              struct name_snapshot name;
++
+               /* we are notifying a parent so come up with the new mask which
+                * specifies these are events which came from a child. */
+               mask |= FS_EVENT_ON_CHILD;
+ 
++              take_dentry_name_snapshot(&name, dentry);
+               if (path)
+                       ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
+-                                     dentry->d_name.name, 0);
++                                     name.name, 0);
+               else
+                       ret = fsnotify(p_inode, mask, dentry->d_inode, 
FSNOTIFY_EVENT_INODE,
+-                                     dentry->d_name.name, 0);
++                                     name.name, 0);
++              release_dentry_name_snapshot(&name);
+       }
+ 
+       dput(parent);
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index d2e38dc6172c..025727bf6797 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct 
dentry *dentry)
+       return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
+ }
+ 
++struct name_snapshot {
++      const char *name;
++      char inline_name[DNAME_INLINE_LEN];
++};
++void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
++void release_dentry_name_snapshot(struct name_snapshot *);
+ 
+ #endif        /* __LINUX_DCACHE_H */
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index b43d3f5bd9ea..b78aa7ac77ce 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, 
unsigned int ia_valid)
+       }
+ }
+ 
+-#if defined(CONFIG_FSNOTIFY)  /* notify helpers */
+-
+-/*
+- * fsnotify_oldname_init - save off the old filename before we change it
+- */
+-static inline const unsigned char *fsnotify_oldname_init(const unsigned char 
*name)
+-{
+-      return kstrdup(name, GFP_KERNEL);
+-}
+-
+-/*
+- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
+- */
+-static inline void fsnotify_oldname_free(const unsigned char *old_name)
+-{
+-      kfree(old_name);
+-}
+-
+-#else /* CONFIG_FSNOTIFY */
+-
+-static inline const char *fsnotify_oldname_init(const unsigned char *name)
+-{
+-      return NULL;
+-}
+-
+-static inline void fsnotify_oldname_free(const unsigned char *old_name)
+-{
+-}
+-
+-#endif        /*  CONFIG_FSNOTIFY */
+-
+ #endif        /* _LINUX_FS_NOTIFY_H */
+diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
+index ec47f95991a3..586cccea46ce 100755
+--- a/scripts/dtc/dtx_diff
++++ b/scripts/dtc/dtx_diff
+@@ -321,7 +321,7 @@ fi
+ cpp_flags="\
+       -nostdinc                                  \
+       -I${srctree}/arch/${ARCH}/boot/dts         \
+-      -I${srctree}/arch/${ARCH}/boot/dts/include \
++      -I${srctree}/scripts/dtc/include-prefixes  \
+       -I${srctree}/drivers/of/testcase-data      \
+       -undef -D__DTS__"
+ 
+diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
+index c47287d79306..a178e0d03088 100644
+--- a/sound/pci/fm801.c
++++ b/sound/pci/fm801.c
+@@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card,
+               }
+       }
+ 
+-      snd_fm801_chip_init(chip);
+-
+       if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
+               if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
+                               IRQF_SHARED, KBUILD_MODNAME, chip)) {
+@@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card,
+               pci_set_master(pci);
+       }
+ 
++      snd_fm801_chip_init(chip);
++
+       if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+               snd_fm801_free(chip);
+               return err;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 63bc894ddf5e..8c1289963c80 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -933,6 +933,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+       SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+       SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", 
CXT_FIXUP_MUTE_LED_GPIO),
++      SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+       SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 90e4ff87445e..c87ff8e5d1d5 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3757,11 +3757,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI",       
patch_atihdmi),
+ HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI",   patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI",   patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI",  patch_generic_hdmi),
++HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI",     patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI",  patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI",  patch_nvhdmi_8ch_7x),
++HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI",    patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI",  patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI",  patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI",  patch_nvhdmi_8ch_7x),
++HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI",     patch_nvhdmi),
+@@ -3788,17 +3792,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP",  
patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI",     patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",     patch_nvhdmi_2ch),
++HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",  patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",  patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",  patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP",   patch_generic_hdmi),

Reply via email to