commit:     0dd711b119384f33b8fd38ccb24275ecea1b33d3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  3 13:02:31 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 29 13:59:02 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0dd711b1

Linux patch 4.14.132

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1131_linux-4.14.132.patch | 1558 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1562 insertions(+)

diff --git a/0000_README b/0000_README
index 562acb4..f51c89c 100644
--- a/0000_README
+++ b/0000_README
@@ -567,6 +567,10 @@ Patch:  1130_linux-4.14.131.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.131
 
+Patch:  1131_linux-4.14.132.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.132
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1131_linux-4.14.132.patch b/1131_linux-4.14.132.patch
new file mode 100644
index 0000000..4119e06
--- /dev/null
+++ b/1131_linux-4.14.132.patch
@@ -0,0 +1,1558 @@
+diff --git a/Documentation/robust-futexes.txt 
b/Documentation/robust-futexes.txt
+index 6c42c75103eb..6361fb01c9c1 100644
+--- a/Documentation/robust-futexes.txt
++++ b/Documentation/robust-futexes.txt
+@@ -218,5 +218,4 @@ All other architectures should build just fine too - but 
they won't have
+ the new syscalls yet.
+ 
+ Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
+-inline function before writing up the syscalls (that function returns
+--ENOSYS right now).
++inline function before writing up the syscalls.
+diff --git a/Makefile b/Makefile
+index 275343cf27f7..23b2916ef0ff 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 131
++SUBLEVEL = 132
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index c7e30a6ed56e..232917e9c1d9 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -134,7 +134,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user 
*_uaddr,
+       : "memory");
+       uaccess_disable();
+ 
+-      *uval = val;
++      if (!ret)
++              *uval = val;
++
+       return ret;
+ }
+ 
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index 4214c38d016b..e3193fd39d8d 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -271,6 +271,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
+ __AARCH64_INSN_FUNCS(prfm,    0x3FC00000, 0x39800000)
+ __AARCH64_INSN_FUNCS(prfm_lit,        0xFF000000, 0xD8000000)
+ __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
++__AARCH64_INSN_FUNCS(ldadd,   0x3F20FC00, 0x38200000)
+ __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+ __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
+ __AARCH64_INSN_FUNCS(ldrsw_lit,       0xFF000000, 0x98000000)
+@@ -383,6 +384,13 @@ u32 aarch64_insn_gen_load_store_ex(enum 
aarch64_insn_register reg,
+                                  enum aarch64_insn_register state,
+                                  enum aarch64_insn_size_type size,
+                                  enum aarch64_insn_ldst_type type);
++u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
++                         enum aarch64_insn_register address,
++                         enum aarch64_insn_register value,
++                         enum aarch64_insn_size_type size);
++u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
++                         enum aarch64_insn_register value,
++                         enum aarch64_insn_size_type size);
+ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+                                enum aarch64_insn_register src,
+                                int imm, enum aarch64_insn_variant variant,
+diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
+index 2718a77da165..4381aa7b071d 100644
+--- a/arch/arm64/kernel/insn.c
++++ b/arch/arm64/kernel/insn.c
+@@ -793,6 +793,46 @@ u32 aarch64_insn_gen_load_store_ex(enum 
aarch64_insn_register reg,
+                                           state);
+ }
+ 
++u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
++                         enum aarch64_insn_register address,
++                         enum aarch64_insn_register value,
++                         enum aarch64_insn_size_type size)
++{
++      u32 insn = aarch64_insn_get_ldadd_value();
++
++      switch (size) {
++      case AARCH64_INSN_SIZE_32:
++      case AARCH64_INSN_SIZE_64:
++              break;
++      default:
++              pr_err("%s: unimplemented size encoding %d\n", __func__, size);
++              return AARCH64_BREAK_FAULT;
++      }
++
++      insn = aarch64_insn_encode_ldst_size(size, insn);
++
++      insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
++                                          result);
++
++      insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
++                                          address);
++
++      return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
++                                          value);
++}
++
++u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
++                         enum aarch64_insn_register value,
++                         enum aarch64_insn_size_type size)
++{
++      /*
++       * STADD is simply encoded as an alias for LDADD with XZR as
++       * the destination register.
++       */
++      return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
++                                    value, size);
++}
++
+ static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
+                                       enum aarch64_insn_prfm_target target,
+                                       enum aarch64_insn_prfm_policy policy,
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index 6c881659ee8a..76606e87233f 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -100,6 +100,10 @@
+ #define A64_STXR(sf, Rt, Rn, Rs) \
+       A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+ 
++/* LSE atomics */
++#define A64_STADD(sf, Rn, Rs) \
++      aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
++
+ /* Add/subtract (immediate) */
+ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+       aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 6110fe344368..b742171bfef7 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -330,7 +330,7 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx)
+       const int i = insn - ctx->prog->insnsi;
+       const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+       const bool isdw = BPF_SIZE(code) == BPF_DW;
+-      u8 jmp_cond;
++      u8 jmp_cond, reg;
+       s32 jmp_offset;
+ 
+ #define check_imm(bits, imm) do {                             \
+@@ -706,18 +706,28 @@ emit_cond_jmp:
+                       break;
+               }
+               break;
++
+       /* STX XADD: lock *(u32 *)(dst + off) += src */
+       case BPF_STX | BPF_XADD | BPF_W:
+       /* STX XADD: lock *(u64 *)(dst + off) += src */
+       case BPF_STX | BPF_XADD | BPF_DW:
+-              emit_a64_mov_i(1, tmp, off, ctx);
+-              emit(A64_ADD(1, tmp, tmp, dst), ctx);
+-              emit(A64_LDXR(isdw, tmp2, tmp), ctx);
+-              emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+-              emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
+-              jmp_offset = -3;
+-              check_imm19(jmp_offset);
+-              emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
++              if (!off) {
++                      reg = dst;
++              } else {
++                      emit_a64_mov_i(1, tmp, off, ctx);
++                      emit(A64_ADD(1, tmp, tmp, dst), ctx);
++                      reg = tmp;
++              }
++              if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
++                      emit(A64_STADD(isdw, reg, src), ctx);
++              } else {
++                      emit(A64_LDXR(isdw, tmp2, reg), ctx);
++                      emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
++                      emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
++                      jmp_offset = -3;
++                      check_imm19(jmp_offset);
++                      emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
++              }
+               break;
+ 
+       /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 2769e0f5c686..3b44d39aca1d 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -820,6 +820,16 @@ static enum ssb_mitigation __init 
__ssb_select_mitigation(void)
+               break;
+       }
+ 
++      /*
++       * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
++       * bit in the mask to allow guests to use the mitigation even in the
++       * case where the host does not enable it.
++       */
++      if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++          static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++              x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
++      }
++
+       /*
+        * We have three CPU feature flags that are in play here:
+        *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+@@ -837,7 +847,6 @@ static enum ssb_mitigation __init 
__ssb_select_mitigation(void)
+                       x86_amd_ssb_disable();
+               } else {
+                       x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+-                      x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+               }
+       }
+diff --git a/arch/x86/kernel/cpu/microcode/core.c 
b/arch/x86/kernel/cpu/microcode/core.c
+index 4a2100ac3423..93c22e7ee424 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -790,13 +790,16 @@ static struct syscore_ops mc_syscore_ops = {
+       .resume                 = mc_bp_resume,
+ };
+ 
+-static int mc_cpu_online(unsigned int cpu)
++static int mc_cpu_starting(unsigned int cpu)
+ {
+-      struct device *dev;
+-
+-      dev = get_cpu_device(cpu);
+       microcode_update_cpu(cpu);
+       pr_debug("CPU%d added\n", cpu);
++      return 0;
++}
++
++static int mc_cpu_online(unsigned int cpu)
++{
++      struct device *dev = get_cpu_device(cpu);
+ 
+       if (sysfs_create_group(&dev->kobj, &mc_attr_group))
+               pr_err("Failed to create group for CPU%d\n", cpu);
+@@ -873,7 +876,9 @@ int __init microcode_init(void)
+               goto out_ucode_group;
+ 
+       register_syscore_ops(&mc_syscore_ops);
+-      cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, 
"x86/microcode:online",
++      cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, 
"x86/microcode:starting",
++                                mc_cpu_starting, NULL);
++      cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+                                 mc_cpu_online, mc_cpu_down_prep);
+ 
+       pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
+diff --git a/block/bio.c b/block/bio.c
+index d01ab919b313..1384f9790882 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -773,7 +773,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio 
*bio, struct page
+                       return 0;
+       }
+ 
+-      if (bio->bi_vcnt >= bio->bi_max_vecs)
++      if (bio_full(bio))
+               return 0;
+ 
+       /*
+@@ -821,65 +821,97 @@ int bio_add_pc_page(struct request_queue *q, struct bio 
*bio, struct page
+ EXPORT_SYMBOL(bio_add_pc_page);
+ 
+ /**
+- *    bio_add_page    -       attempt to add page to bio
+- *    @bio: destination bio
+- *    @page: page to add
+- *    @len: vec entry length
+- *    @offset: vec entry offset
++ * __bio_try_merge_page - try appending data to an existing bvec.
++ * @bio: destination bio
++ * @page: page to add
++ * @len: length of the data to add
++ * @off: offset of the data in @page
+  *
+- *    Attempt to add a page to the bio_vec maplist. This will only fail
+- *    if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
++ * Try to add the data at @page + @off to the last bvec of @bio.  This is a
++ * a useful optimisation for file systems with a block size smaller than the
++ * page size.
++ *
++ * Return %true on success or %false on failure.
+  */
+-int bio_add_page(struct bio *bio, struct page *page,
+-               unsigned int len, unsigned int offset)
++bool __bio_try_merge_page(struct bio *bio, struct page *page,
++              unsigned int len, unsigned int off)
+ {
+-      struct bio_vec *bv;
+-
+-      /*
+-       * cloned bio must not modify vec list
+-       */
+       if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+-              return 0;
++              return false;
+ 
+-      /*
+-       * For filesystems with a blocksize smaller than the pagesize
+-       * we will often be called with the same page as last time and
+-       * a consecutive offset.  Optimize this special case.
+-       */
+       if (bio->bi_vcnt > 0) {
+-              bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
++              struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ 
+-              if (page == bv->bv_page &&
+-                  offset == bv->bv_offset + bv->bv_len) {
++              if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
+                       bv->bv_len += len;
+-                      goto done;
++                      bio->bi_iter.bi_size += len;
++                      return true;
+               }
+       }
++      return false;
++}
++EXPORT_SYMBOL_GPL(__bio_try_merge_page);
+ 
+-      if (bio->bi_vcnt >= bio->bi_max_vecs)
+-              return 0;
++/**
++ * __bio_add_page - add page to a bio in a new segment
++ * @bio: destination bio
++ * @page: page to add
++ * @len: length of the data to add
++ * @off: offset of the data in @page
++ *
++ * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
++ * that @bio has space for another bvec.
++ */
++void __bio_add_page(struct bio *bio, struct page *page,
++              unsigned int len, unsigned int off)
++{
++      struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
+ 
+-      bv              = &bio->bi_io_vec[bio->bi_vcnt];
+-      bv->bv_page     = page;
+-      bv->bv_len      = len;
+-      bv->bv_offset   = offset;
++      WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
++      WARN_ON_ONCE(bio_full(bio));
++
++      bv->bv_page = page;
++      bv->bv_offset = off;
++      bv->bv_len = len;
+ 
+-      bio->bi_vcnt++;
+-done:
+       bio->bi_iter.bi_size += len;
++      bio->bi_vcnt++;
++}
++EXPORT_SYMBOL_GPL(__bio_add_page);
++
++/**
++ *    bio_add_page    -       attempt to add page to bio
++ *    @bio: destination bio
++ *    @page: page to add
++ *    @len: vec entry length
++ *    @offset: vec entry offset
++ *
++ *    Attempt to add a page to the bio_vec maplist. This will only fail
++ *    if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
++ */
++int bio_add_page(struct bio *bio, struct page *page,
++               unsigned int len, unsigned int offset)
++{
++      if (!__bio_try_merge_page(bio, page, len, offset)) {
++              if (bio_full(bio))
++                      return 0;
++              __bio_add_page(bio, page, len, offset);
++      }
+       return len;
+ }
+ EXPORT_SYMBOL(bio_add_page);
+ 
+ /**
+- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+  * @bio: bio to add pages to
+  * @iter: iov iterator describing the region to be mapped
+  *
+- * Pins as many pages from *iter and appends them to @bio's bvec array. The
++ * Pins pages from *iter and appends them to @bio's bvec array. The
+  * pages will have to be released using put_page() when done.
++ * For multi-segment *iter, this function only adds pages from the
++ * the next non-empty segment of the iov iterator.
+  */
+-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ {
+       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+       struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+@@ -916,6 +948,33 @@ int bio_iov_iter_get_pages(struct bio *bio, struct 
iov_iter *iter)
+       iov_iter_advance(iter, size);
+       return 0;
+ }
++
++/**
++ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
++ * @bio: bio to add pages to
++ * @iter: iov iterator describing the region to be mapped
++ *
++ * Pins pages from *iter and appends them to @bio's bvec array. The
++ * pages will have to be released using put_page() when done.
++ * The function tries, but does not guarantee, to pin as many pages as
++ * fit into the bio, or are requested in *iter, whatever is smaller.
++ * If MM encounters an error pinning the requested pages, it stops.
++ * Error is returned only if 0 pages could be pinned.
++ */
++int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
++{
++      unsigned short orig_vcnt = bio->bi_vcnt;
++
++      do {
++              int ret = __bio_iov_iter_get_pages(bio, iter);
++
++              if (unlikely(ret))
++                      return bio->bi_vcnt > orig_vcnt ? 0 : ret;
++
++      } while (iov_iter_count(iter) && !bio_full(bio));
++
++      return 0;
++}
+ EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+ 
+ struct submit_bio_ret {
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c 
b/drivers/infiniband/hw/hfi1/user_sdma.c
+index cbe5ab26d95b..75275f9e363d 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -132,25 +132,22 @@ static int defer_packet_queue(
+       struct hfi1_user_sdma_pkt_q *pq =
+               container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+       struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
+-      struct user_sdma_txreq *tx =
+-              container_of(txreq, struct user_sdma_txreq, txreq);
+ 
+-      if (sdma_progress(sde, seq, txreq)) {
+-              if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
+-                      goto eagain;
+-      }
++      write_seqlock(&dev->iowait_lock);
++      if (sdma_progress(sde, seq, txreq))
++              goto eagain;
+       /*
+        * We are assuming that if the list is enqueued somewhere, it
+        * is to the dmawait list since that is the only place where
+        * it is supposed to be enqueued.
+        */
+       xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
+-      write_seqlock(&dev->iowait_lock);
+       if (list_empty(&pq->busy.list))
+               iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
+       write_sequnlock(&dev->iowait_lock);
+       return -EBUSY;
+ eagain:
++      write_sequnlock(&dev->iowait_lock);
+       return -EAGAIN;
+ }
+ 
+@@ -803,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request 
*req, unsigned maxpkts)
+ 
+               tx->flags = 0;
+               tx->req = req;
+-              tx->busycount = 0;
+               INIT_LIST_HEAD(&tx->list);
+ 
+               /*
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h 
b/drivers/infiniband/hw/hfi1/user_sdma.h
+index 2b5326d6db53..87b0c567f442 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -236,7 +236,6 @@ struct user_sdma_txreq {
+       struct list_head list;
+       struct user_sdma_request *req;
+       u16 flags;
+-      unsigned int busycount;
+       u64 seqnum;
+ };
+ 
+diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
+index 8b80a9ce9ea9..dafedbc28bcc 100644
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -57,6 +57,7 @@
+ 
+ #define WRITE_LOG_VERSION 1ULL
+ #define WRITE_LOG_MAGIC 0x6a736677736872ULL
++#define WRITE_LOG_SUPER_SECTOR 0
+ 
+ /*
+  * The disk format for this is braindead simple.
+@@ -112,6 +113,7 @@ struct log_writes_c {
+       struct list_head logging_blocks;
+       wait_queue_head_t wait;
+       struct task_struct *log_kthread;
++      struct completion super_done;
+ };
+ 
+ struct pending_block {
+@@ -177,6 +179,14 @@ static void log_end_io(struct bio *bio)
+       bio_put(bio);
+ }
+ 
++static void log_end_super(struct bio *bio)
++{
++      struct log_writes_c *lc = bio->bi_private;
++
++      complete(&lc->super_done);
++      log_end_io(bio);
++}
++
+ /*
+  * Meant to be called if there is an error, it will free all the pages
+  * associated with the block.
+@@ -212,7 +222,8 @@ static int write_metadata(struct log_writes_c *lc, void 
*entry,
+       bio->bi_iter.bi_size = 0;
+       bio->bi_iter.bi_sector = sector;
+       bio_set_dev(bio, lc->logdev->bdev);
+-      bio->bi_end_io = log_end_io;
++      bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
++                        log_end_super : log_end_io;
+       bio->bi_private = lc;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ 
+@@ -334,11 +345,18 @@ static int log_super(struct log_writes_c *lc)
+       super.nr_entries = cpu_to_le64(lc->logged_entries);
+       super.sectorsize = cpu_to_le32(lc->sectorsize);
+ 
+-      if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
++      if (write_metadata(lc, &super, sizeof(super), NULL, 0,
++                         WRITE_LOG_SUPER_SECTOR)) {
+               DMERR("Couldn't write super");
+               return -1;
+       }
+ 
++      /*
++       * Super sector should be writen in-order, otherwise the
++       * nr_entries could be rewritten incorrectly by an old bio.
++       */
++      wait_for_completion_io(&lc->super_done);
++
+       return 0;
+ }
+ 
+@@ -447,6 +465,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned 
int argc, char **argv)
+       INIT_LIST_HEAD(&lc->unflushed_blocks);
+       INIT_LIST_HEAD(&lc->logging_blocks);
+       init_waitqueue_head(&lc->wait);
++      init_completion(&lc->super_done);
+       atomic_set(&lc->io_blocks, 0);
+       atomic_set(&lc->pending_blocks, 0);
+ 
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 59dcd97ee3de..6b58ee2e2a25 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -113,22 +113,6 @@ MODULE_PARM_DESC(write_timeout, "Time (in ms) to try 
writes (default 25)");
+       ((1 << AT24_SIZE_FLAGS | (_flags))              \
+           << AT24_SIZE_BYTELEN | ilog2(_len))
+ 
+-/*
+- * Both reads and writes fail if the previous write didn't complete yet. This
+- * macro loops a few times waiting at least long enough for one entire page
+- * write to work while making sure that at least one iteration is run before
+- * checking the break condition.
+- *
+- * It takes two parameters: a variable in which the future timeout in jiffies
+- * will be stored and a temporary variable holding the time of the last
+- * iteration of processing the request. Both should be unsigned integers
+- * holding at least 32 bits.
+- */
+-#define loop_until_timeout(tout, op_time)                             \
+-      for (tout = jiffies + msecs_to_jiffies(write_timeout), op_time = 0; \
+-           op_time ? time_before(op_time, tout) : true;               \
+-           usleep_range(1000, 1500), op_time = jiffies)
+-
+ static const struct i2c_device_id at24_ids[] = {
+       /* needs 8 addresses as A0-A2 are ignored */
+       { "24c00",      AT24_DEVICE_MAGIC(128 / 8,      AT24_FLAG_TAKE8ADDR) },
+@@ -234,7 +218,14 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data 
*at24, char *buf,
+       if (count > I2C_SMBUS_BLOCK_MAX)
+               count = I2C_SMBUS_BLOCK_MAX;
+ 
+-      loop_until_timeout(timeout, read_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              read_time = jiffies;
++
+               status = i2c_smbus_read_i2c_block_data_or_emulated(client,
+                                                                  offset,
+                                                                  count, buf);
+@@ -244,7 +235,9 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data 
*at24, char *buf,
+ 
+               if (status == count)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(read_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -284,7 +277,14 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data 
*at24, char *buf,
+       msg[1].buf = buf;
+       msg[1].len = count;
+ 
+-      loop_until_timeout(timeout, read_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              read_time = jiffies;
++
+               status = i2c_transfer(client->adapter, msg, 2);
+               if (status == 2)
+                       status = count;
+@@ -294,7 +294,9 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data 
*at24, char *buf,
+ 
+               if (status == count)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(read_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -343,11 +345,20 @@ static ssize_t at24_eeprom_read_serial(struct at24_data 
*at24, char *buf,
+       msg[1].buf = buf;
+       msg[1].len = count;
+ 
+-      loop_until_timeout(timeout, read_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              read_time = jiffies;
++
+               status = i2c_transfer(client->adapter, msg, 2);
+               if (status == 2)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(read_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -374,11 +385,20 @@ static ssize_t at24_eeprom_read_mac(struct at24_data 
*at24, char *buf,
+       msg[1].buf = buf;
+       msg[1].len = count;
+ 
+-      loop_until_timeout(timeout, read_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              read_time = jiffies;
++
+               status = i2c_transfer(client->adapter, msg, 2);
+               if (status == 2)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(read_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -420,7 +440,14 @@ static ssize_t at24_eeprom_write_smbus_block(struct 
at24_data *at24,
+       client = at24_translate_offset(at24, &offset);
+       count = at24_adjust_write_count(at24, offset, count);
+ 
+-      loop_until_timeout(timeout, write_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              write_time = jiffies;
++
+               status = i2c_smbus_write_i2c_block_data(client,
+                                                       offset, count, buf);
+               if (status == 0)
+@@ -431,7 +458,9 @@ static ssize_t at24_eeprom_write_smbus_block(struct 
at24_data *at24,
+ 
+               if (status == count)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(write_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -446,7 +475,14 @@ static ssize_t at24_eeprom_write_smbus_byte(struct 
at24_data *at24,
+ 
+       client = at24_translate_offset(at24, &offset);
+ 
+-      loop_until_timeout(timeout, write_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              write_time = jiffies;
++
+               status = i2c_smbus_write_byte_data(client, offset, buf[0]);
+               if (status == 0)
+                       status = count;
+@@ -456,7 +492,9 @@ static ssize_t at24_eeprom_write_smbus_byte(struct 
at24_data *at24,
+ 
+               if (status == count)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(write_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+@@ -485,7 +523,14 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data 
*at24, const char *buf,
+       memcpy(&msg.buf[i], buf, count);
+       msg.len = i + count;
+ 
+-      loop_until_timeout(timeout, write_time) {
++      timeout = jiffies + msecs_to_jiffies(write_timeout);
++      do {
++              /*
++               * The timestamp shall be taken before the actual operation
++               * to avoid a premature timeout in case of high CPU load.
++               */
++              write_time = jiffies;
++
+               status = i2c_transfer(client->adapter, &msg, 1);
+               if (status == 1)
+                       status = count;
+@@ -495,7 +540,9 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data 
*at24, const char *buf,
+ 
+               if (status == count)
+                       return count;
+-      }
++
++              usleep_range(1000, 1500);
++      } while (time_before(write_time, timeout));
+ 
+       return -ETIMEDOUT;
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1edd4ff5382c..8536a75f32e3 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4263,12 +4263,12 @@ void bond_setup(struct net_device *bond_dev)
+       bond_dev->features |= NETIF_F_NETNS_LOCAL;
+ 
+       bond_dev->hw_features = BOND_VLAN_FEATURES |
+-                              NETIF_F_HW_VLAN_CTAG_TX |
+                               NETIF_F_HW_VLAN_CTAG_RX |
+                               NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
+       bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+       bond_dev->features |= bond_dev->hw_features;
++      bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ 
+ /* Destroy a bonding device.
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 08c19ebd5306..41d528fbebb4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -121,7 +121,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 
sec, u32 nsec,
+                * programmed with (2^32 – <new_sec_value>)
+                */
+               if (gmac4)
+-                      sec = (100000000ULL - sec);
++                      sec = -sec;
+ 
+               value = readl(ioaddr + PTP_TCR);
+               if (value & PTP_TCR_TSCTRLSSR)
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index e9a92ed5a308..f3e3e568311a 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2131,12 +2131,12 @@ static void team_setup(struct net_device *dev)
+       dev->features |= NETIF_F_NETNS_LOCAL;
+ 
+       dev->hw_features = TEAM_VLAN_FEATURES |
+-                         NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_HW_VLAN_CTAG_RX |
+                          NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
+       dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+       dev->features |= dev->hw_features;
++      dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ 
+ static int team_newlink(struct net *src_net, struct net_device *dev,
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 3b13d9e4030a..84a33c81b9b7 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -831,18 +831,8 @@ static void tun_net_uninit(struct net_device *dev)
+ /* Net device open. */
+ static int tun_net_open(struct net_device *dev)
+ {
+-      struct tun_struct *tun = netdev_priv(dev);
+-      int i;
+-
+       netif_tx_start_all_queues(dev);
+ 
+-      for (i = 0; i < tun->numqueues; i++) {
+-              struct tun_file *tfile;
+-
+-              tfile = rtnl_dereference(tun->tfiles[i]);
+-              tfile->socket.sk->sk_write_space(tfile->socket.sk);
+-      }
+-
+       return 0;
+ }
+ 
+@@ -2826,6 +2816,7 @@ static int tun_device_event(struct notifier_block 
*unused,
+ {
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct tun_struct *tun = netdev_priv(dev);
++      int i;
+ 
+       if (dev->rtnl_link_ops != &tun_link_ops)
+               return NOTIFY_DONE;
+@@ -2835,6 +2826,14 @@ static int tun_device_event(struct notifier_block 
*unused,
+               if (tun_queue_resize(tun))
+                       return NOTIFY_BAD;
+               break;
++      case NETDEV_UP:
++              for (i = 0; i < tun->numqueues; i++) {
++                      struct tun_file *tfile;
++
++                      tfile = rtnl_dereference(tun->tfiles[i]);
++                      tfile->socket.sk->sk_write_space(tfile->socket.sk);
++              }
++              break;
+       default:
+               break;
+       }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index c2d6c501dd85..063daa3435e4 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1395,14 +1395,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+               return -ENODEV;
+       }
+ 
+-      info = (void *)&id->driver_info;
+-
+       /* Several Quectel modems supports dynamic interface configuration, so
+        * we need to match on class/subclass/protocol. These values are
+        * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+        * different. Ignore the current interface if the number of endpoints
+        * equals the number for the diag interface (two).
+        */
++      info = (void *)id->driver_info;
++
+       if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+               if (desc->bNumEndpoints == 2)
+                       return -ENODEV;
+diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
+index 890b8aaf95e1..64eb8ffb2ddf 100644
+--- a/drivers/scsi/vmw_pvscsi.c
++++ b/drivers/scsi/vmw_pvscsi.c
+@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void 
(*done)(struct scsi_cmnd
+       struct pvscsi_adapter *adapter = shost_priv(host);
+       struct pvscsi_ctx *ctx;
+       unsigned long flags;
++      unsigned char op;
+ 
+       spin_lock_irqsave(&adapter->hw_lock, flags);
+ 
+@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void 
(*done)(struct scsi_cmnd
+       }
+ 
+       cmd->scsi_done = done;
++      op = cmd->cmnd[0];
+ 
+       dev_dbg(&cmd->device->sdev_gendev,
+-              "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
++              "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
+ 
+       spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ 
+-      pvscsi_kick_io(adapter, cmd->cmnd[0]);
++      pvscsi_kick_io(adapter, op);
+ 
+       return 0;
+ }
+diff --git a/fs/9p/acl.c b/fs/9p/acl.c
+index 082d227fa56b..6261719f6f2a 100644
+--- a/fs/9p/acl.c
++++ b/fs/9p/acl.c
+@@ -276,7 +276,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler 
*handler,
+       switch (handler->flags) {
+       case ACL_TYPE_ACCESS:
+               if (acl) {
+-                      struct iattr iattr;
++                      struct iattr iattr = { 0 };
+                       struct posix_acl *old_acl = acl;
+ 
+                       retval = posix_acl_update_mode(inode, &iattr.ia_mode, 
&acl);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 5d6b94475f27..78b5bac82559 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -856,9 +856,14 @@ err:
+ 
+ static int load_flat_shared_library(int id, struct lib_info *libs)
+ {
++      /*
++       * This is a fake bprm struct; only the members "buf", "file" and
++       * "filename" are actually used.
++       */
+       struct linux_binprm bprm;
+       int res;
+       char buf[16];
++      loff_t pos = 0;
+ 
+       memset(&bprm, 0, sizeof(bprm));
+ 
+@@ -872,25 +877,11 @@ static int load_flat_shared_library(int id, struct 
lib_info *libs)
+       if (IS_ERR(bprm.file))
+               return res;
+ 
+-      bprm.cred = prepare_exec_creds();
+-      res = -ENOMEM;
+-      if (!bprm.cred)
+-              goto out;
+-
+-      /* We don't really care about recalculating credentials at this point
+-       * as we're past the point of no return and are dealing with shared
+-       * libraries.
+-       */
+-      bprm.called_set_creds = 1;
++      res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos);
+ 
+-      res = prepare_binprm(&bprm);
+-
+-      if (!res)
++      if (res >= 0)
+               res = load_flat_file(&bprm, libs, id, NULL);
+ 
+-      abort_creds(bprm.cred);
+-
+-out:
+       allow_write_access(bprm.file);
+       fput(bprm.file);
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c 
b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 9f69e83810ca..2de6e87e5ee5 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -18,7 +18,7 @@
+ 
+ #define NFSDBG_FACILITY               NFSDBG_PNFS_LD
+ 
+-static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
++static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
+ static unsigned int dataserver_retrans;
+ 
+ static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 4ac811e1a26c..37c7ed0dc820 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -448,7 +448,7 @@ static int do_task_stat(struct seq_file *m, struct 
pid_namespace *ns,
+                * a program is not able to use ptrace(2) in that case. It is
+                * safe because the task has stopped executing permanently.
+                */
+-              if (permitted && (task->flags & PF_DUMPCORE)) {
++              if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
+                       if (try_get_task_stack(task)) {
+                               eip = KSTK_EIP(task);
+                               esp = KSTK_ESP(task);
+diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
+index fcb61b4659b3..8666fe7f35d7 100644
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
+@@ -23,7 +23,9 @@
+  *
+  * Return:
+  * 0 - On success
+- * <0 - On error
++ * -EFAULT - User access resulted in a page fault
++ * -EAGAIN - Atomic operation was unable to complete due to contention
++ * -ENOSYS - Operation not supported
+  */
+ static inline int
+ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+@@ -85,7 +87,9 @@ out_pagefault_enable:
+  *
+  * Return:
+  * 0 - On success
+- * <0 - On error
++ * -EFAULT - User access resulted in a page fault
++ * -EAGAIN - Atomic operation was unable to complete due to contention
++ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
+  */
+ static inline int
+ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index d4b39caf081d..e260f000b9ac 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -123,6 +123,11 @@ static inline void *bio_data(struct bio *bio)
+       return NULL;
+ }
+ 
++static inline bool bio_full(struct bio *bio)
++{
++      return bio->bi_vcnt >= bio->bi_max_vecs;
++}
++
+ /*
+  * will die
+  */
+@@ -459,6 +464,10 @@ void bio_chain(struct bio *, struct bio *);
+ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned 
int);
+ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page 
*,
+                          unsigned int, unsigned int);
++bool __bio_try_merge_page(struct bio *bio, struct page *page,
++              unsigned int len, unsigned int off);
++void __bio_add_page(struct bio *bio, struct page *page,
++              unsigned int len, unsigned int off);
+ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
+ struct rq_map_data;
+ extern struct bio *bio_map_user_iov(struct request_queue *,
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 67c3934fb9ed..a704d032713b 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -119,10 +119,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, 
int val,
+ # define ASM_UNREACHABLE
+ #endif
+ #ifndef unreachable
+-# define unreachable() do {           \
+-      annotate_unreachable();         \
+-      __builtin_unreachable();        \
+-} while (0)
++# define unreachable() do { annotate_reachable(); do { } while (1); } while 
(0)
+ #endif
+ 
+ /*
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 127a69b8b192..f370a0f43005 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2308,6 +2308,9 @@ static int __init mitigations_parse_cmdline(char *arg)
+               cpu_mitigations = CPU_MITIGATIONS_AUTO;
+       else if (!strcmp(arg, "auto,nosmt"))
+               cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
++      else
++              pr_crit("Unsupported mitigations=%s, system may still be 
vulnerable\n",
++                      arg);
+ 
+       return 0;
+ }
+diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
+index 3ea65cdff30d..4ad967453b6f 100644
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -205,8 +205,6 @@ void trace_likely_condition(struct ftrace_likely_data *f, 
int val, int expect)
+ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+                         int expect, int is_constant)
+ {
+-      unsigned long flags = user_access_save();
+-
+       /* A constant is always correct */
+       if (is_constant) {
+               f->constant++;
+@@ -225,8 +223,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, 
int val,
+               f->data.correct++;
+       else
+               f->data.incorrect++;
+-
+-      user_access_restore(flags);
+ }
+ EXPORT_SYMBOL(ftrace_likely_update);
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 6ca0225335eb..a37cfa88669e 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -305,7 +305,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, 
const nodemask_t *nodes)
+       else {
+               nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
+                                                               *nodes);
+-              pol->w.cpuset_mems_allowed = tmp;
++              pol->w.cpuset_mems_allowed = *nodes;
+       }
+ 
+       if (nodes_empty(tmp))
+diff --git a/mm/page_idle.c b/mm/page_idle.c
+index e412a63b2b74..504684181827 100644
+--- a/mm/page_idle.c
++++ b/mm/page_idle.c
+@@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, 
struct kobject *kobj,
+ 
+       end_pfn = pfn + count * BITS_PER_BYTE;
+       if (end_pfn > max_pfn)
+-              end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
++              end_pfn = max_pfn;
+ 
+       for (; pfn < end_pfn; pfn++) {
+               bit = pfn % BITMAP_CHUNK_BITS;
+@@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, 
struct kobject *kobj,
+ 
+       end_pfn = pfn + count * BITS_PER_BYTE;
+       if (end_pfn > max_pfn)
+-              end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
++              end_pfn = max_pfn;
+ 
+       for (; pfn < end_pfn; pfn++) {
+               bit = pfn % BITMAP_CHUNK_BITS;
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index 766d1ef4640a..1885403c9a3e 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -622,13 +622,19 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int 
len,
+       if (ret) {
+               p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
+               trace_9p_protocol_dump(clnt, &fake_pdu);
+-              goto out;
++              return ret;
+       }
+ 
+-      strcpy(dirent->d_name, nameptr);
++      ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
++      if (ret < 0) {
++              p9_debug(P9_DEBUG_ERROR,
++                       "On the wire dirent name too long: %s\n",
++                       nameptr);
++              kfree(nameptr);
++              return ret;
++      }
+       kfree(nameptr);
+ 
+-out:
+       return fake_pdu.offset;
+ }
+ EXPORT_SYMBOL(p9dirent_read);
+diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
+index 38aa6345bdfa..9c0c894b56f8 100644
+--- a/net/9p/trans_common.c
++++ b/net/9p/trans_common.c
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/mm.h>
+ #include <linux/module.h>
++#include "trans_common.h"
+ 
+ /**
+  *  p9_release_req_pages - Release pages after the transaction.
+diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
+index f58467a49090..16a4a31f16e0 100644
+--- a/net/9p/trans_rdma.c
++++ b/net/9p/trans_rdma.c
+@@ -276,8 +276,7 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct 
rdma_cm_event *event)
+       case RDMA_CM_EVENT_DISCONNECTED:
+               if (rdma)
+                       rdma->state = P9_RDMA_CLOSED;
+-              if (c)
+-                      c->status = Disconnected;
++              c->status = Disconnected;
+               break;
+ 
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+@@ -476,7 +475,7 @@ static int rdma_request(struct p9_client *client, struct 
p9_req_t *req)
+ 
+       err = post_recv(client, rpl_context);
+       if (err) {
+-              p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
++              p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
+               goto recv_error;
+       }
+       /* remove posted receive buffer from request structure */
+@@ -545,7 +544,7 @@ dont_need_post_recv:
+  recv_error:
+       kfree(rpl_context);
+       spin_lock_irqsave(&rdma->req_lock, flags);
+-      if (rdma->state < P9_RDMA_CLOSING) {
++      if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
+               rdma->state = P9_RDMA_CLOSING;
+               spin_unlock_irqrestore(&rdma->req_lock, flags);
+               rdma_disconnect(rdma->cm_id);
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index c10bdf63eae7..389eb635ec2c 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -392,8 +392,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
+       unsigned int max_rings, max_ring_order, len = 0;
+ 
+       versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+-      if (!len)
+-              return -EINVAL;
++      if (IS_ERR(versions))
++              return PTR_ERR(versions);
+       if (strcmp(versions, "1")) {
+               kfree(versions);
+               return -EINVAL;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index a88579589946..c8d39092e8bf 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1358,9 +1358,6 @@ int sock_getsockopt(struct socket *sock, int level, int 
optname,
+       {
+               u32 meminfo[SK_MEMINFO_VARS];
+ 
+-              if (get_user(len, optlen))
+-                      return -EFAULT;
+-
+               sk_get_meminfo(sk, meminfo);
+ 
+               len = min_t(unsigned int, len, sizeof(meminfo));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 115d9fd413e2..53a11894f9e4 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -202,7 +202,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct 
iphdr *iph, int hash)
+               }
+               sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
+                                    iph->saddr, iph->daddr,
+-                                   skb->dev->ifindex, sdif);
++                                   dif, sdif);
+       }
+ out:
+       read_unlock(&raw_v4_hashinfo.lock);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b89920c0f226..54343dc29cb4 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -563,7 +563,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct 
sk_buff *skb,
+ struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+                                __be16 sport, __be16 dport)
+ {
+-      return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
++      const struct iphdr *iph = ip_hdr(skb);
++
++      return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
++                               iph->daddr, dport, inet_iif(skb),
++                               inet_sdif(skb), &udp_table, NULL);
+ }
+ EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8d185a0fc5af..9f7bfeb90fb0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -308,7 +308,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ 
+       return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
+                                &iph->daddr, dport, inet6_iif(skb),
+-                               inet6_sdif(skb), &udp_table, skb);
++                               inet6_sdif(skb), &udp_table, NULL);
+ }
+ EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
+ 
+@@ -506,7 +506,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct 
inet6_skb_parm *opt,
+       struct net *net = dev_net(skb->dev);
+ 
+       sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
+-                             inet6_iif(skb), 0, udptable, skb);
++                             inet6_iif(skb), 0, udptable, NULL);
+       if (!sk) {
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+                                 ICMP6_MIB_INERRORS);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a968e81d4c81..047ee7ff7038 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2438,6 +2438,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 
+               ts = __packet_set_timestamp(po, ph, skb);
+               __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
++
++              if (!packet_read_pending(&po->tx_ring))
++                      complete(&po->skb_completion);
+       }
+ 
+       sock_wfree(skb);
+@@ -2632,7 +2635,7 @@ static int tpacket_parse_header(struct packet_sock *po, 
void *frame,
+ 
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+-      struct sk_buff *skb;
++      struct sk_buff *skb = NULL;
+       struct net_device *dev;
+       struct virtio_net_hdr *vnet_hdr = NULL;
+       struct sockcm_cookie sockc;
+@@ -2647,6 +2650,7 @@ static int tpacket_snd(struct packet_sock *po, struct 
msghdr *msg)
+       int len_sum = 0;
+       int status = TP_STATUS_AVAILABLE;
+       int hlen, tlen, copylen = 0;
++      long timeo = 0;
+ 
+       mutex_lock(&po->pg_vec_lock);
+ 
+@@ -2693,12 +2697,21 @@ static int tpacket_snd(struct packet_sock *po, struct 
msghdr *msg)
+       if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
+               size_max = dev->mtu + reserve + VLAN_HLEN;
+ 
++      reinit_completion(&po->skb_completion);
++
+       do {
+               ph = packet_current_frame(po, &po->tx_ring,
+                                         TP_STATUS_SEND_REQUEST);
+               if (unlikely(ph == NULL)) {
+-                      if (need_wait && need_resched())
+-                              schedule();
++                      if (need_wait && skb) {
++                              timeo = sock_sndtimeo(&po->sk, msg->msg_flags & 
MSG_DONTWAIT);
++                              timeo = 
wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
++                              if (timeo <= 0) {
++                                      err = !timeo ? -ETIMEDOUT : 
-ERESTARTSYS;
++                                      goto out_put;
++                              }
++                      }
++                      /* check for additional frames */
+                       continue;
+               }
+ 
+@@ -3252,6 +3265,7 @@ static int packet_create(struct net *net, struct socket 
*sock, int protocol,
+       sock_init_data(sock, sk);
+ 
+       po = pkt_sk(sk);
++      init_completion(&po->skb_completion);
+       sk->sk_family = PF_PACKET;
+       po->num = proto;
+       po->xmit = dev_queue_xmit;
+@@ -4340,7 +4354,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+                                   req3->tp_sizeof_priv ||
+                                   req3->tp_feature_req_word) {
+                                       err = -EINVAL;
+-                                      goto out;
++                                      goto out_free_pg_vec;
+                               }
+                       }
+                       break;
+@@ -4404,6 +4418,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
+       }
+ 
++out_free_pg_vec:
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 3bb7c5fb3bff..c70a2794456f 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -128,6 +128,7 @@ struct packet_sock {
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
+       unsigned int            tp_tstamp;
++      struct completion       skb_completion;
+       struct net_device __rcu *cached_dev;
+       int                     (*xmit)(struct sk_buff *skb);
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index ee1e601a0b11..5d4079ef3de6 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -126,10 +126,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct 
sctp_endpoint *ep,
+       /* Initialize the bind addr area */
+       sctp_bind_addr_init(&ep->base.bind_addr, 0);
+ 
+-      /* Remember who we are attached to.  */
+-      ep->base.sk = sk;
+-      sock_hold(ep->base.sk);
+-
+       /* Create the lists of associations.  */
+       INIT_LIST_HEAD(&ep->asocs);
+ 
+@@ -167,6 +163,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct 
sctp_endpoint *ep,
+       ep->prsctp_enable = net->sctp.prsctp_enable;
+       ep->reconf_enable = net->sctp.reconf_enable;
+ 
++      /* Remember who we are attached to.  */
++      ep->base.sk = sk;
++      sock_hold(ep->base.sk);
++
+       return ep;
+ 
+ nomem_hmacs:
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 7561e64c220e..67ac10434ba2 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -128,7 +128,7 @@ static int __init tipc_init(void)
+       if (err)
+               goto out_sysctl;
+ 
+-      err = register_pernet_subsys(&tipc_net_ops);
++      err = register_pernet_device(&tipc_net_ops);
+       if (err)
+               goto out_pernet;
+ 
+@@ -136,7 +136,7 @@ static int __init tipc_init(void)
+       if (err)
+               goto out_socket;
+ 
+-      err = register_pernet_subsys(&tipc_topsrv_net_ops);
++      err = register_pernet_device(&tipc_topsrv_net_ops);
+       if (err)
+               goto out_pernet_topsrv;
+ 
+@@ -147,11 +147,11 @@ static int __init tipc_init(void)
+       pr_info("Started in single node mode\n");
+       return 0;
+ out_bearer:
+-      unregister_pernet_subsys(&tipc_topsrv_net_ops);
++      unregister_pernet_device(&tipc_topsrv_net_ops);
+ out_pernet_topsrv:
+       tipc_socket_stop();
+ out_socket:
+-      unregister_pernet_subsys(&tipc_net_ops);
++      unregister_pernet_device(&tipc_net_ops);
+ out_pernet:
+       tipc_unregister_sysctl();
+ out_sysctl:
+@@ -166,9 +166,9 @@ out_netlink:
+ static void __exit tipc_exit(void)
+ {
+       tipc_bearer_cleanup();
+-      unregister_pernet_subsys(&tipc_topsrv_net_ops);
++      unregister_pernet_device(&tipc_topsrv_net_ops);
+       tipc_socket_stop();
+-      unregister_pernet_subsys(&tipc_net_ops);
++      unregister_pernet_device(&tipc_net_ops);
+       tipc_netlink_stop();
+       tipc_netlink_compat_stop();
+       tipc_unregister_sysctl();
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index aa75bc8b158f..41954ed7ff51 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -436,7 +436,11 @@ static int tipc_nl_compat_bearer_disable(struct 
tipc_nl_compat_cmd_doit *cmd,
+       if (!bearer)
+               return -EMSGSIZE;
+ 
+-      len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++      len = TLV_GET_DATA_LEN(msg->req);
++      if (len <= 0)
++              return -EINVAL;
++
++      len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+       if (!string_is_valid(name, len))
+               return -EINVAL;
+ 
+@@ -528,7 +532,11 @@ static int tipc_nl_compat_link_stat_dump(struct 
tipc_nl_compat_msg *msg,
+ 
+       name = (char *)TLV_DATA(msg->req);
+ 
+-      len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++      len = TLV_GET_DATA_LEN(msg->req);
++      if (len <= 0)
++              return -EINVAL;
++
++      len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+       if (!string_is_valid(name, len))
+               return -EINVAL;
+ 
+@@ -806,7 +814,11 @@ static int tipc_nl_compat_link_reset_stats(struct 
tipc_nl_compat_cmd_doit *cmd,
+       if (!link)
+               return -EMSGSIZE;
+ 
+-      len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++      len = TLV_GET_DATA_LEN(msg->req);
++      if (len <= 0)
++              return -EINVAL;
++
++      len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+       if (!string_is_valid(name, len))
+               return -EINVAL;
+ 
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index e3cff9d6c092..de011fdd7964 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -174,7 +174,6 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff 
*skb,
+                       goto tx_error;
+               }
+ 
+-              skb->dev = rt->dst.dev;
+               ttl = ip4_dst_hoplimit(&rt->dst);
+               udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
+                                   dst->ipv4.s_addr, 0, ttl, 0, src->port,
+@@ -193,10 +192,9 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff 
*skb,
+               if (err)
+                       goto tx_error;
+               ttl = ip6_dst_hoplimit(ndst);
+-              err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
+-                                         ndst->dev, &src->ipv6,
+-                                         &dst->ipv6, 0, ttl, 0, src->port,
+-                                         dst->port, false);
++              err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
++                                         &src->ipv6, &dst->ipv6, 0, ttl, 0,
++                                         src->port, dst->port, false);
+ #endif
+       }
+       return err;
+diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
+index a0f7ed2b869b..3c364798093b 100644
+--- a/tools/perf/builtin-help.c
++++ b/tools/perf/builtin-help.c
+@@ -189,7 +189,7 @@ static void add_man_viewer(const char *name)
+       while (*p)
+               p = &((*p)->next);
+       *p = zalloc(sizeof(**p) + len + 1);
+-      strncpy((*p)->name, name, len);
++      strcpy((*p)->name, name);
+ }
+ 
+ static int supported_man_viewer(const char *name, size_t len)
+diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
+index 4ca799aadb4e..93d6b7240285 100644
+--- a/tools/perf/ui/tui/helpline.c
++++ b/tools/perf/ui/tui/helpline.c
+@@ -24,7 +24,7 @@ static void tui_helpline__push(const char *msg)
+       SLsmg_set_color(0);
+       SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
+       SLsmg_refresh();
+-      strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
++      strlcpy(ui_helpline__current, msg, sz);
+ }
+ 
+ static int tui_helpline__show(const char *format, va_list ap)
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 696f2654826b..f11cead6a151 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3171,7 +3171,7 @@ perf_event__synthesize_event_update_name(struct 
perf_tool *tool,
+       if (ev == NULL)
+               return -ENOMEM;
+ 
+-      strncpy(ev->data, evsel->name, len);
++      strlcpy(ev->data, evsel->name, len + 1);
+       err = process(tool, (union perf_event*) ev, NULL, NULL);
+       free(ev);
+       return err;

Reply via email to