commit:     76a9bde411289f9d109fa8370cae7ed52a44fec6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 29 17:46:26 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 29 17:46:26 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=76a9bde4

Linux patch 4.9.299

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1298_linux-4.9.299.patch | 807 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 811 insertions(+)

diff --git a/0000_README b/0000_README
index 52cd88fb..671be5e2 100644
--- a/0000_README
+++ b/0000_README
@@ -1235,6 +1235,10 @@ Patch:  1297_linux-4.9.298.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.298
 
+Patch:  1298_linux-4.9.299.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.299
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1298_linux-4.9.299.patch b/1298_linux-4.9.299.patch
new file mode 100644
index 00000000..95feaed3
--- /dev/null
+++ b/1298_linux-4.9.299.patch
@@ -0,0 +1,807 @@
+diff --git a/Documentation/virtual/kvm/mmu.txt 
b/Documentation/virtual/kvm/mmu.txt
+index 481b6a9c25d5a..16ddfd6bd6a1a 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -152,8 +152,8 @@ Shadow pages contain the following information:
+     shadow pages) so role.quadrant takes values in the range 0..3.  Each
+     quadrant maps 1GB virtual address space.
+   role.access:
+-    Inherited guest access permissions in the form uwx.  Note execute
+-    permission is positive, not negative.
++    Inherited guest access permissions from the parent ptes in the form uwx.
++    Note execute permission is positive, not negative.
+   role.invalid:
+     The page is invalid and should not be used.  It is a root page that is
+     currently pinned (by a cpu hardware register pointing to it); once it is
+diff --git a/Makefile b/Makefile
+index b0f683f18df71..99d37c23495ef 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 298
++SUBLEVEL = 299
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index bb8f39fe3a225..8df8cdd093e98 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -15,30 +15,42 @@ config ARM_PTDUMP
+         kernel.
+         If in doubt, say "N"
+ 
+-# RMK wants arm kernels compiled with frame pointers or stack unwinding.
+-# If you know what you are doing and are willing to live without stack
+-# traces, you can get a slightly smaller kernel by setting this option to
+-# n, but then RMK will have to kill you ;).
+-config FRAME_POINTER
+-      bool
+-      depends on !THUMB2_KERNEL
+-      default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
++choice
++      prompt "Choose kernel unwinder"
++      default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
++      default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
++      help
++        This determines which method will be used for unwinding kernel stack
++        traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
++        livepatch, lockdep, and more.
++
++config UNWINDER_FRAME_POINTER
++      bool "Frame pointer unwinder"
++      depends on !THUMB2_KERNEL && !CC_IS_CLANG
++      select ARCH_WANT_FRAME_POINTERS
++      select FRAME_POINTER
+       help
+-        If you say N here, the resulting kernel will be slightly smaller and
+-        faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
+-        when a problem occurs with the kernel, the information that is
+-        reported is severely limited.
++        This option enables the frame pointer unwinder for unwinding
++        kernel stack traces.
+ 
+-config ARM_UNWIND
+-      bool "Enable stack unwinding support (EXPERIMENTAL)"
++config UNWINDER_ARM
++      bool "ARM EABI stack unwinder"
+       depends on AEABI
+-      default y
++      select ARM_UNWIND
+       help
+         This option enables stack unwinding support in the kernel
+         using the information automatically generated by the
+         compiler. The resulting kernel image is slightly bigger but
+         the performance is not affected. Currently, this feature
+-        only works with EABI compilers. If unsure say Y.
++        only works with EABI compilers.
++
++endchoice
++
++config ARM_UNWIND
++      bool
++
++config FRAME_POINTER
++      bool
+ 
+ config OLD_MCOUNT
+       bool
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index e03225e707b26..d92c7758efad1 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -100,8 +100,8 @@ struct guest_walker {
+       gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
+       pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
+       bool pte_writable[PT_MAX_FULL_LEVELS];
+-      unsigned pt_access;
+-      unsigned pte_access;
++      unsigned int pt_access[PT_MAX_FULL_LEVELS];
++      unsigned int pte_access;
+       gfn_t gfn;
+       struct x86_exception fault;
+ };
+@@ -285,9 +285,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker 
*walker,
+       pt_element_t pte;
+       pt_element_t __user *uninitialized_var(ptep_user);
+       gfn_t table_gfn;
+-      unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
++      u64 pt_access, pte_access;
++      unsigned index, accessed_dirty, pte_pkey;
+       gpa_t pte_gpa;
+       int offset;
++      u64 walk_nx_mask = 0;
+       const int write_fault = access & PFERR_WRITE_MASK;
+       const int user_fault  = access & PFERR_USER_MASK;
+       const int fetch_fault = access & PFERR_FETCH_MASK;
+@@ -301,6 +303,7 @@ retry_walk:
+       pte           = mmu->get_cr3(vcpu);
+ 
+ #if PTTYPE == 64
++      walk_nx_mask = 1ULL << PT64_NX_SHIFT;
+       if (walker->level == PT32E_ROOT_LEVEL) {
+               pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
+               trace_kvm_mmu_paging_element(pte, walker->level);
+@@ -312,15 +315,14 @@ retry_walk:
+       walker->max_level = walker->level;
+       ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
+ 
+-      accessed_dirty = PT_GUEST_ACCESSED_MASK;
+-      pt_access = pte_access = ACC_ALL;
++      pte_access = ~0;
+       ++walker->level;
+ 
+       do {
+               gfn_t real_gfn;
+               unsigned long host_addr;
+ 
+-              pt_access &= pte_access;
++              pt_access = pte_access;
+               --walker->level;
+ 
+               index = PT_INDEX(addr, walker->level);
+@@ -363,6 +365,12 @@ retry_walk:
+ 
+               trace_kvm_mmu_paging_element(pte, walker->level);
+ 
++              /*
++               * Inverting the NX it lets us AND it like other
++               * permission bits.
++               */
++              pte_access = pt_access & (pte ^ walk_nx_mask);
++
+               if (unlikely(!FNAME(is_present_gpte)(pte)))
+                       goto error;
+ 
+@@ -371,14 +379,18 @@ retry_walk:
+                       goto error;
+               }
+ 
+-              accessed_dirty &= pte;
+-              pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
+-
+               walker->ptes[walker->level - 1] = pte;
++
++              /* Convert to ACC_*_MASK flags for struct guest_walker.  */
++              walker->pt_access[walker->level - 1] = FNAME(gpte_access)(vcpu, 
pt_access ^ walk_nx_mask);
+       } while (!is_last_gpte(mmu, walker->level, pte));
+ 
+       pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
+-      errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
++      accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
++
++      /* Convert to ACC_*_MASK flags for struct guest_walker.  */
++      walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ 
walk_nx_mask);
++      errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, 
access);
+       if (unlikely(errcode))
+               goto error;
+ 
+@@ -395,7 +407,7 @@ retry_walk:
+       walker->gfn = real_gpa >> PAGE_SHIFT;
+ 
+       if (!write_fault)
+-              FNAME(protect_clean_gpte)(&pte_access, pte);
++              FNAME(protect_clean_gpte)(&walker->pte_access, pte);
+       else
+               /*
+                * On a write fault, fold the dirty bit into accessed_dirty.
+@@ -413,10 +425,9 @@ retry_walk:
+                       goto retry_walk;
+       }
+ 
+-      walker->pt_access = pt_access;
+-      walker->pte_access = pte_access;
+       pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
+-               __func__, (u64)pte, pte_access, pt_access);
++               __func__, (u64)pte, walker->pte_access,
++               walker->pt_access[walker->level - 1]);
+       return 1;
+ 
+ error:
+@@ -444,7 +455,7 @@ error:
+        */
+       if (!(errcode & PFERR_RSVD_MASK)) {
+               vcpu->arch.exit_qualification &= 0x187;
+-              vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
++              vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
+       }
+ #endif
+       walker->fault.address = addr;
+@@ -578,7 +589,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+ {
+       struct kvm_mmu_page *sp = NULL;
+       struct kvm_shadow_walk_iterator it;
+-      unsigned direct_access, access = gw->pt_access;
++      unsigned int direct_access, access;
+       int top_level, ret;
+       gfn_t gfn, base_gfn;
+ 
+@@ -610,6 +621,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+               sp = NULL;
+               if (!is_shadow_present_pte(*it.sptep)) {
+                       table_gfn = gw->table_gfn[it.level - 2];
++                      access = gw->pt_access[it.level - 2];
+                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
+                                             false, access);
+               }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c4f155663ca9a..14cd0a742e794 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1763,6 +1763,8 @@ struct drm_i915_private {
+ 
+       struct intel_uncore uncore;
+ 
++      struct mutex tlb_invalidate_lock;
++
+       struct i915_virtual_gpu vgpu;
+ 
+       struct intel_gvt gvt;
+@@ -2211,7 +2213,8 @@ struct drm_i915_gem_object {
+        * rendering and so a non-zero seqno), and is not set if it i s on
+        * inactive (ready to be unbound) list.
+        */
+-#define I915_BO_ACTIVE_SHIFT 0
++#define I915_BO_WAS_BOUND_BIT    0
++#define I915_BO_ACTIVE_SHIFT 1
+ #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+ #define __I915_BO_ACTIVE(bo) \
+       ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 3fb4f9acacba0..9265ac5774c25 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2185,6 +2185,67 @@ i915_gem_object_put_pages_gtt(struct 
drm_i915_gem_object *obj)
+       kfree(obj->pages);
+ }
+ 
++static int
++__intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
++                           i915_reg_t reg,
++                           const u32 mask,
++                           const u32 value,
++                           const unsigned int timeout_us,
++                           const unsigned int timeout_ms)
++{
++#define done ((I915_READ_FW(reg) & mask) == value)
++      int ret = wait_for_us(done, timeout_us);
++      if (ret)
++              ret = wait_for(done, timeout_ms);
++      return ret;
++#undef done
++}
++
++static void invalidate_tlbs(struct drm_i915_private *dev_priv)
++{
++      static const i915_reg_t gen8_regs[] = {
++              [RCS]  = GEN8_RTCR,
++              [VCS]  = GEN8_M1TCR,
++              [VCS2] = GEN8_M2TCR,
++              [VECS] = GEN8_VTCR,
++              [BCS]  = GEN8_BTCR,
++      };
++      struct intel_engine_cs *engine;
++
++      if (INTEL_GEN(dev_priv) < 8)
++              return;
++
++      assert_rpm_wakelock_held(dev_priv);
++
++      mutex_lock(&dev_priv->tlb_invalidate_lock);
++      intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
++
++      for_each_engine(engine, dev_priv) {
++              /*
++               * HW architecture suggest typical invalidation time at 40us,
++               * with pessimistic cases up to 100us and a recommendation to
++               * cap at 1ms. We go a bit higher just in case.
++               */
++              const unsigned int timeout_us = 100;
++              const unsigned int timeout_ms = 4;
++              const enum intel_engine_id id = engine->id;
++
++              if (WARN_ON_ONCE(id >= ARRAY_SIZE(gen8_regs) ||
++                               !i915_mmio_reg_offset(gen8_regs[id])))
++                      continue;
++
++              I915_WRITE_FW(gen8_regs[id], 1);
++              if (__intel_wait_for_register_fw(dev_priv,
++                                               gen8_regs[id], 1, 0,
++                                               timeout_us, timeout_ms))
++                      DRM_ERROR_RATELIMITED("%s TLB invalidation did not 
complete in %ums!\n",
++                                            engine->name, timeout_ms);
++      }
++
++      intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
++      mutex_unlock(&dev_priv->tlb_invalidate_lock);
++}
++
+ int
+ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+ {
+@@ -2215,6 +2276,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj)
+               obj->mapping = NULL;
+       }
+ 
++      if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
++              struct drm_i915_private *i915 = to_i915(obj->base.dev);
++
++              if (intel_runtime_pm_get_if_in_use(i915)) {
++                      invalidate_tlbs(i915);
++                      intel_runtime_pm_put(i915);
++              }
++      }
++
+       ops->put_pages(obj);
+       obj->pages = NULL;
+ 
+@@ -4627,6 +4697,8 @@ i915_gem_load_init(struct drm_device *dev)
+ 
+       atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+ 
++      mutex_init(&dev_priv->tlb_invalidate_lock);
++
+       spin_lock_init(&dev_priv->fb_tracking.lock);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index 16f56f14f4d06..edaff73b7aa9d 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -3685,6 +3685,10 @@ int i915_vma_bind(struct i915_vma *vma, enum 
i915_cache_level cache_level,
+               return ret;
+ 
+       vma->flags |= bind_flags;
++
++      if (vma->obj)
++              set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 5468e69bf520a..1ff1e33df2c71 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1698,6 +1698,12 @@ enum skl_disp_power_wells {
+ #define GAMT_CHKN_BIT_REG     _MMIO(0x4ab8)
+ #define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING    (1<<28)
+ 
++#define GEN8_RTCR     _MMIO(0x4260)
++#define GEN8_M1TCR    _MMIO(0x4264)
++#define GEN8_M2TCR    _MMIO(0x4268)
++#define GEN8_BTCR     _MMIO(0x426c)
++#define GEN8_VTCR     _MMIO(0x4270)
++
+ #if 0
+ #define PRB0_TAIL     _MMIO(0x2030)
+ #define PRB0_HEAD     _MMIO(0x2034)
+diff --git a/drivers/media/firewire/firedtv-avc.c 
b/drivers/media/firewire/firedtv-avc.c
+index 280b5ffea5922..3a373711f5ad9 100644
+--- a/drivers/media/firewire/firedtv-avc.c
++++ b/drivers/media/firewire/firedtv-avc.c
+@@ -1169,7 +1169,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int 
length)
+               read_pos += program_info_length;
+               write_pos += program_info_length;
+       }
+-      while (read_pos < length) {
++      while (read_pos + 4 < length) {
++              if (write_pos + 4 >= sizeof(c->operand) - 4) {
++                      ret = -EINVAL;
++                      goto out;
++              }
+               c->operand[write_pos++] = msg[read_pos++];
+               c->operand[write_pos++] = msg[read_pos++];
+               c->operand[write_pos++] = msg[read_pos++];
+@@ -1181,13 +1185,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int 
length)
+               c->operand[write_pos++] = es_info_length >> 8;
+               c->operand[write_pos++] = es_info_length & 0xff;
+               if (es_info_length > 0) {
++                      if (read_pos >= length) {
++                              ret = -EINVAL;
++                              goto out;
++                      }
+                       pmt_cmd_id = msg[read_pos++];
+                       if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
+                               dev_err(fdtv->device, "invalid pmt_cmd_id %d "
+                                       "at stream level\n", pmt_cmd_id);
+ 
+-                      if (es_info_length > sizeof(c->operand) - 4 -
+-                                           write_pos) {
++                      if (es_info_length > sizeof(c->operand) - 4 - write_pos 
||
++                          es_info_length > length - read_pos) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+diff --git a/drivers/media/firewire/firedtv-ci.c 
b/drivers/media/firewire/firedtv-ci.c
+index edbb30fdd9d95..93fb4b7312afc 100644
+--- a/drivers/media/firewire/firedtv-ci.c
++++ b/drivers/media/firewire/firedtv-ci.c
+@@ -138,6 +138,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
+       } else {
+               data_length = msg->msg[3];
+       }
++      if (data_length > sizeof(msg->msg) - data_pos)
++              return -EINVAL;
+ 
+       return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
+ }
+diff --git a/drivers/staging/android/ion/ion-ioctl.c 
b/drivers/staging/android/ion/ion-ioctl.c
+index e3596855a7031..a27865b94416b 100644
+--- a/drivers/staging/android/ion/ion-ioctl.c
++++ b/drivers/staging/android/ion/ion-ioctl.c
+@@ -30,6 +30,69 @@ union ion_ioctl_arg {
+       struct ion_heap_query query;
+ };
+ 
++/* Must hold the client lock */
++static void user_ion_handle_get(struct ion_handle *handle)
++{
++      if (handle->user_ref_count++ == 0)
++              kref_get(&handle->ref);
++}
++
++/* Must hold the client lock */
++static struct ion_handle *user_ion_handle_get_check_overflow(
++      struct ion_handle *handle)
++{
++      if (handle->user_ref_count + 1 == 0)
++              return ERR_PTR(-EOVERFLOW);
++      user_ion_handle_get(handle);
++      return handle;
++}
++
++/* passes a kref to the user ref count.
++ * We know we're holding a kref to the object before and
++ * after this call, so no need to reverify handle.
++ */
++static struct ion_handle *pass_to_user(struct ion_handle *handle)
++{
++      struct ion_client *client = handle->client;
++      struct ion_handle *ret;
++
++      mutex_lock(&client->lock);
++      ret = user_ion_handle_get_check_overflow(handle);
++      ion_handle_put_nolock(handle);
++      mutex_unlock(&client->lock);
++      return ret;
++}
++
++/* Must hold the client lock */
++static int user_ion_handle_put_nolock(struct ion_handle *handle)
++{
++      int ret;
++
++      if (--handle->user_ref_count == 0)
++              ret = ion_handle_put_nolock(handle);
++
++      return ret;
++}
++
++static void user_ion_free_nolock(struct ion_client *client,
++                               struct ion_handle *handle)
++{
++      bool valid_handle;
++
++      WARN_ON(client != handle->client);
++
++      valid_handle = ion_handle_validate(client, handle);
++      if (!valid_handle) {
++              WARN(1, "%s: invalid handle passed to free.\n", __func__);
++              return;
++      }
++      if (handle->user_ref_count == 0) {
++              WARN(1, "%s: User does not have access!\n", __func__);
++              return;
++      }
++      user_ion_handle_put_nolock(handle);
++}
++
+ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+ {
+       int ret = 0;
+@@ -96,16 +159,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, 
unsigned long arg)
+       {
+               struct ion_handle *handle;
+ 
+-              handle = ion_alloc(client, data.allocation.len,
+-                                              data.allocation.align,
+-                                              data.allocation.heap_id_mask,
+-                                              data.allocation.flags);
++              handle = __ion_alloc(client, data.allocation.len,
++                                   data.allocation.align,
++                                   data.allocation.heap_id_mask,
++                                   data.allocation.flags, true);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+-
+               data.allocation.handle = handle->id;
+-
+               cleanup_handle = handle;
++              pass_to_user(handle);
+               break;
+       }
+       case ION_IOC_FREE:
+@@ -118,7 +180,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, 
unsigned long arg)
+                       mutex_unlock(&client->lock);
+                       return PTR_ERR(handle);
+               }
+-              ion_free_nolock(client, handle);
++              user_ion_free_nolock(client, handle);
+               ion_handle_put_nolock(handle);
+               mutex_unlock(&client->lock);
+               break;
+@@ -146,10 +208,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, 
unsigned long arg)
+               struct ion_handle *handle;
+ 
+               handle = ion_import_dma_buf_fd(client, data.fd.fd);
+-              if (IS_ERR(handle))
++              if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+-              else
++              } else {
+                       data.handle.handle = handle->id;
++                      handle = pass_to_user(handle);
++                      if (IS_ERR(handle)) {
++                              ret = PTR_ERR(handle);
++                              data.handle.handle = 0;
++                      }
++              }
+               break;
+       }
+       case ION_IOC_SYNC:
+@@ -174,10 +242,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, 
unsigned long arg)
+ 
+       if (dir & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+-                      if (cleanup_handle)
+-                              ion_free(client, cleanup_handle);
++                      if (cleanup_handle) {
++                              mutex_lock(&client->lock);
++                              user_ion_free_nolock(client, cleanup_handle);
++                              ion_handle_put_nolock(cleanup_handle);
++                              mutex_unlock(&client->lock);
++                      }
+                       return -EFAULT;
+               }
+       }
++      if (cleanup_handle)
++              ion_handle_put(cleanup_handle);
+       return ret;
+ }
+diff --git a/drivers/staging/android/ion/ion.c 
b/drivers/staging/android/ion/ion.c
+index aac9b38b8c25c..b272f2ab87e8f 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -363,8 +363,8 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct 
ion_client *client,
+       return ERR_PTR(-EINVAL);
+ }
+ 
+-static bool ion_handle_validate(struct ion_client *client,
+-                              struct ion_handle *handle)
++bool ion_handle_validate(struct ion_client *client,
++                       struct ion_handle *handle)
+ {
+       WARN_ON(!mutex_is_locked(&client->lock));
+       return idr_find(&client->idr, handle->id) == handle;
+@@ -401,9 +401,9 @@ static int ion_handle_add(struct ion_client *client, 
struct ion_handle *handle)
+       return 0;
+ }
+ 
+-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+-                           size_t align, unsigned int heap_id_mask,
+-                           unsigned int flags)
++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
++                             size_t align, unsigned int heap_id_mask,
++                             unsigned int flags, bool grab_handle)
+ {
+       struct ion_handle *handle;
+       struct ion_device *dev = client->dev;
+@@ -453,6 +453,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, 
size_t len,
+               return handle;
+ 
+       mutex_lock(&client->lock);
++      if (grab_handle)
++              ion_handle_get(handle);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+@@ -462,6 +464,13 @@ struct ion_handle *ion_alloc(struct ion_client *client, 
size_t len,
+ 
+       return handle;
+ }
++
++struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
++                           size_t align, unsigned int heap_id_mask,
++                           unsigned int flags)
++{
++      return __ion_alloc(client, len, align, heap_id_mask, flags, false);
++}
+ EXPORT_SYMBOL(ion_alloc);
+ 
+ void ion_free_nolock(struct ion_client *client,
+diff --git a/drivers/staging/android/ion/ion.h 
b/drivers/staging/android/ion/ion.h
+index 93dafb4586e43..cfa50dfb46edc 100644
+--- a/drivers/staging/android/ion/ion.h
++++ b/drivers/staging/android/ion/ion.h
+@@ -109,6 +109,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, 
size_t len,
+                            size_t align, unsigned int heap_id_mask,
+                            unsigned int flags);
+ 
++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
++                             size_t align, unsigned int heap_id_mask,
++                             unsigned int flags, bool grab_handle);
++
+ /**
+  * ion_free - free a handle
+  * @client:   the client
+diff --git a/drivers/staging/android/ion/ion_priv.h 
b/drivers/staging/android/ion/ion_priv.h
+index 760e41885448a..e1dd25eab1dbd 100644
+--- a/drivers/staging/android/ion/ion_priv.h
++++ b/drivers/staging/android/ion/ion_priv.h
+@@ -149,6 +149,7 @@ struct ion_client {
+  */
+ struct ion_handle {
+       struct kref ref;
++      unsigned int user_ref_count;
+       struct ion_client *client;
+       struct ion_buffer *buffer;
+       struct rb_node node;
+@@ -459,6 +460,9 @@ int ion_sync_for_device(struct ion_client *client, int fd);
+ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+                                               int id);
+ 
++bool ion_handle_validate(struct ion_client *client,
++                       struct ion_handle *handle);
++
+ void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+ 
+ int ion_handle_put_nolock(struct ion_handle *handle);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 3ee60c5332179..2fb4633897084 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -177,8 +177,11 @@ void nfs40_shutdown_client(struct nfs_client *clp)
+ 
+ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata 
*cl_init)
+ {
+-      int err;
++      char buf[INET6_ADDRSTRLEN + 1];
++      const char *ip_addr = cl_init->ip_addr;
+       struct nfs_client *clp = nfs_alloc_client(cl_init);
++      int err;
++
+       if (IS_ERR(clp))
+               return clp;
+ 
+@@ -202,6 +205,44 @@ struct nfs_client *nfs4_alloc_client(const struct 
nfs_client_initdata *cl_init)
+ #if IS_ENABLED(CONFIG_NFS_V4_1)
+       init_waitqueue_head(&clp->cl_lock_waitq);
+ #endif
++
++      if (cl_init->minorversion != 0)
++              __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
++      __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
++      __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
++
++      /*
++       * Set up the connection to the server before we add add to the
++       * global list.
++       */
++      err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
++      if (err == -EINVAL)
++              err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
++      if (err < 0)
++              goto error;
++
++      /* If no clientaddr= option was specified, find a usable cb address */
++      if (ip_addr == NULL) {
++              struct sockaddr_storage cb_addr;
++              struct sockaddr *sap = (struct sockaddr *)&cb_addr;
++
++              err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
++              if (err < 0)
++                      goto error;
++              err = rpc_ntop(sap, buf, sizeof(buf));
++              if (err < 0)
++                      goto error;
++              ip_addr = (const char *)buf;
++      }
++      strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
++
++      err = nfs_idmap_new(clp);
++      if (err < 0) {
++              dprintk("%s: failed to create idmapper. Error = %d\n",
++                      __func__, err);
++              goto error;
++      }
++      __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+       return clp;
+ 
+ error:
+@@ -354,8 +395,6 @@ static int nfs4_init_client_minor_version(struct 
nfs_client *clp)
+ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+                                   const struct nfs_client_initdata *cl_init)
+ {
+-      char buf[INET6_ADDRSTRLEN + 1];
+-      const char *ip_addr = cl_init->ip_addr;
+       struct nfs_client *old;
+       int error;
+ 
+@@ -365,43 +404,6 @@ struct nfs_client *nfs4_init_client(struct nfs_client 
*clp,
+               return clp;
+       }
+ 
+-      /* Check NFS protocol revision and initialize RPC op vector */
+-      clp->rpc_ops = &nfs_v4_clientops;
+-
+-      if (clp->cl_minorversion != 0)
+-              __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+-      __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+-      __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+-
+-      error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
+-      if (error == -EINVAL)
+-              error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
+-      if (error < 0)
+-              goto error;
+-
+-      /* If no clientaddr= option was specified, find a usable cb address */
+-      if (ip_addr == NULL) {
+-              struct sockaddr_storage cb_addr;
+-              struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+-
+-              error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+-              if (error < 0)
+-                      goto error;
+-              error = rpc_ntop(sap, buf, sizeof(buf));
+-              if (error < 0)
+-                      goto error;
+-              ip_addr = (const char *)buf;
+-      }
+-      strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+-
+-      error = nfs_idmap_new(clp);
+-      if (error < 0) {
+-              dprintk("%s: failed to create idmapper. Error = %d\n",
+-                      __func__, error);
+-              goto error;
+-      }
+-      __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+-
+       error = nfs4_init_client_minor_version(clp);
+       if (error < 0)
+               goto error;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index bc5ff3a53d4a6..e7addfcd302f4 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1091,7 +1091,7 @@ config LOCKDEP
+       bool
+       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT 
&& LOCKDEP_SUPPORT
+       select STACKTRACE
+-      select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && 
!MICROBLAZE && !ARC && !SCORE
++      select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE 
&& !ARC && !SCORE
+       select KALLSYMS
+       select KALLSYMS_ALL
+ 
+@@ -1670,7 +1670,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
+       depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
+       depends on !X86_64
+       select STACKTRACE
+-      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && 
!ARM_UNWIND && !ARC && !SCORE
++      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM 
&& !ARC && !SCORE
+       help
+         Provide stacktrace filter for fault-injection capabilities
+ 
+@@ -1679,7 +1679,7 @@ config LATENCYTOP
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
+-      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && 
!ARM_UNWIND && !ARC
++      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM 
&& !ARC
+       select KALLSYMS
+       select KALLSYMS_ALL
+       select STACKTRACE

Reply via email to