[PATCH AUTOSEL 4.19 097/206] ext4: fix a data race at inode->i_disksize

2020-09-17 Thread Sasha Levin
From: Qiujun Huang 

[ Upstream commit dce8e237100f60c28cc66effb526ba65a01d8cb3 ]

KCSAN find inode->i_disksize could be accessed concurrently.

BUG: KCSAN: data-race in ext4_mark_iloc_dirty / ext4_write_end

write (marked) to 0x8b8932f40090 of 8 bytes by task 66792 on cpu 0:
 ext4_write_end+0x53f/0x5b0
 ext4_da_write_end+0x237/0x510
 generic_perform_write+0x1c4/0x2a0
 ext4_buffered_write_iter+0x13a/0x210
 ext4_file_write_iter+0xe2/0x9b0
 new_sync_write+0x29c/0x3a0
 __vfs_write+0x92/0xa0
 vfs_write+0xfc/0x2a0
 ksys_write+0xe8/0x140
 __x64_sys_write+0x4c/0x60
 do_syscall_64+0x8a/0x2a0
 entry_SYSCALL_64_after_hwframe+0x44/0xa9

read to 0x8b8932f40090 of 8 bytes by task 14414 on cpu 1:
 ext4_mark_iloc_dirty+0x716/0x1190
 ext4_mark_inode_dirty+0xc9/0x360
 ext4_convert_unwritten_extents+0x1bc/0x2a0
 ext4_convert_unwritten_io_end_vec+0xc5/0x150
 ext4_put_io_end+0x82/0x130
 ext4_writepages+0xae7/0x16f0
 do_writepages+0x64/0x120
 __writeback_single_inode+0x7d/0x650
 writeback_sb_inodes+0x3a4/0x860
 __writeback_inodes_wb+0xc4/0x150
 wb_writeback+0x43f/0x510
 wb_workfn+0x3b2/0x8a0
 process_one_work+0x39b/0x7e0
 worker_thread+0x88/0x650
 kthread+0x1d4/0x1f0
 ret_from_fork+0x35/0x40

The plain read is outside of inode->i_data_sem critical section
which results in a data race. Fix it by adding READ_ONCE().

Signed-off-by: Qiujun Huang 
Link: 
https://lore.kernel.org/r/1582556566-3909-1-git-send-email-hqjag...@gmail.com
Signed-off-by: Theodore Ts'o 
Signed-off-by: Sasha Levin 
---
 fs/ext4/inode.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cd833f4e64ef1..52be4c9650241 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
-   if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
+   if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
-- 
2.25.1



[PATCH AUTOSEL 4.19 071/206] skbuff: fix a data race in skb_queue_len()

2020-09-17 Thread Sasha Levin
From: Qian Cai 

[ Upstream commit 86b18aaa2b5b5bb48e609cd591b3d2d0fdbe0442 ]

sk_buff.qlen can be accessed concurrently as noticed by KCSAN,

 BUG: KCSAN: data-race in __skb_try_recv_from_queue / unix_dgram_sendmsg

 read to 0x8a1b1d8a81c0 of 4 bytes by task 5371 on cpu 96:
  unix_dgram_sendmsg+0x9a9/0xb70 include/linux/skbuff.h:1821
 net/unix/af_unix.c:1761
  sys_sendmsg+0x33e/0x370
  ___sys_sendmsg+0xa6/0xf0
  __sys_sendmsg+0x69/0xf0
  __x64_sys_sendmsg+0x51/0x70
  do_syscall_64+0x91/0xb47
  entry_SYSCALL_64_after_hwframe+0x49/0xbe

 write to 0x8a1b1d8a81c0 of 4 bytes by task 1 on cpu 99:
  __skb_try_recv_from_queue+0x327/0x410 include/linux/skbuff.h:2029
  __skb_try_recv_datagram+0xbe/0x220
  unix_dgram_recvmsg+0xee/0x850
  sys_recvmsg+0x1fb/0x210
  ___sys_recvmsg+0xa2/0xf0
  __sys_recvmsg+0x66/0xf0
  __x64_sys_recvmsg+0x51/0x70
  do_syscall_64+0x91/0xb47
  entry_SYSCALL_64_after_hwframe+0x49/0xbe

Since only the read is operating as lockless, it could introduce a logic
bug in unix_recvq_full() due to the load tearing. Fix it by adding
a lockless variant of skb_queue_len() and unix_recvq_full() where
READ_ONCE() is on the read while WRITE_ONCE() is on the write similar to
the commit d7d16a89350a ("net: add skb_queue_empty_lockless()").

Signed-off-by: Qian Cai 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 include/linux/skbuff.h | 14 +-
 net/unix/af_unix.c | 11 +--
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 25407c206e732..f0474ea735695 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct 
sk_buff_head *list_)
return list_->qlen;
 }
 
+/**
+ * skb_queue_len_lockless  - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an _buff queue.
+ * This variant can be used in lockless contexts.
+ */
+static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
+{
+   return READ_ONCE(list_->qlen);
+}
+
 /**
  * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
  * @list: queue to initialize
@@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, 
struct sk_buff_head *list)
 {
struct sk_buff *next, *prev;
 
-   list->qlen--;
+   WRITE_ONCE(list->qlen, list->qlen - 1);
next   = skb->next;
prev   = skb->prev;
skb->next  = skb->prev = NULL;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2318e2e2748f4..2020306468af4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct 
sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 }
 
-static inline int unix_recvq_full(struct sock const *sk)
+static inline int unix_recvq_full(const struct sock *sk)
 {
return skb_queue_len(>sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
+static inline int unix_recvq_full_lockless(const struct sock *sk)
+{
+   return skb_queue_len_lockless(>sk_receive_queue) >
+   READ_ONCE(sk->sk_max_ack_backlog);
+}
+
 struct sock *unix_peer_get(struct sock *s)
 {
struct sock *peer;
@@ -1788,7 +1794,8 @@ restart_locked:
 * - unix_peer(sk) == sk by time of get but disconnected before lock
 */
if (other != sk &&
-   unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+   unlikely(unix_peer(other) != sk &&
+   unix_recvq_full_lockless(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
 
-- 
2.25.1



[PATCH AUTOSEL 4.19 063/206] selftests/ftrace: fix glob selftest

2020-09-17 Thread Sasha Levin
From: Sven Schnelle 

[ Upstream commit af4ddd607dff7aabd466a4a878e01b9f592a75ab ]

test.d/ftrace/func-filter-glob.tc is failing on s390 because it has
ARCH_INLINE_SPIN_LOCK and friends set to 'y'. So the usual
__raw_spin_lock symbol isn't in the ftrace function list. Change
'*aw*lock' to '*spin*lock' which would hopefully match some of the
locking functions on all platforms.

Reviewed-by: Steven Rostedt (VMware) 
Signed-off-by: Sven Schnelle 
Signed-off-by: Shuah Khan 
Signed-off-by: Sasha Levin 
---
 .../testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc 
b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 27a54a17da65d..f4e92afab14b2 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
 ftrace_filter_check 'schedule*' '^schedule.*$'
 
 # filter by *mid*end
-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+ftrace_filter_check '*pin*lock' '.*pin.*lock$'
 
 # filter by start*mid*
 ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
-- 
2.25.1



[PATCH AUTOSEL 4.19 076/206] drm/omap: fix possible object reference leak

2020-09-17 Thread Sasha Levin
From: Wen Yang 

[ Upstream commit 47340e46f34a3b1d80e40b43ae3d7a8da34a3541 ]

The call to of_find_matching_node returns a node pointer with refcount
incremented thus it must be explicitly decremented after the last
usage.

Detected by coccinelle with the following warnings:
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:212:2-8: ERROR: missing 
of_node_put; acquired a node pointer with refcount incremented on line 209, but 
without a corresponding object release within this function.
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:237:1-7: ERROR: missing 
of_node_put; acquired a node pointer with refcount incremented on line 209, but 
without a corresponding object release within this function.

Signed-off-by: Wen Yang 
Reviewed-by: Laurent Pinchart 
Reviewed-by: Mukesh Ojha 
Cc: Tomi Valkeinen 
Cc: David Airlie 
Cc: Daniel Vetter 
Cc: Sebastian Reichel 
Cc: Laurent Pinchart 
Cc: dri-de...@lists.freedesktop.org
Cc: linux-kernel@vger.kernel.org
Cc: Markus Elfring 
Signed-off-by: Tomi Valkeinen 
Link: 
https://patchwork.freedesktop.org/patch/msgid/1554692313-28882-2-git-send-email-wen.yan...@zte.com.cn
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c 
b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3bfb95d230e0e..d8fb686c1fda9 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
 
if (dss == NULL || !of_device_is_available(dss))
-   return 0;
+   goto put_node;
 
omapdss_walk_device(dss, true);
 
@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
 
+put_node:
+   of_node_put(dss);
return 0;
 }
 
-- 
2.25.1



[PATCH AUTOSEL 4.19 048/206] sctp: move trace_sctp_probe_path into sctp_outq_sack

2020-09-17 Thread Sasha Levin
From: Kevin Kou 

[ Upstream commit f643ee295c1c63bc117fb052d4da681354d6f732 ]

The original patch bringed in the "SCTP ACK tracking trace event"
feature was committed at Dec.20, 2017, it replaced jprobe usage
with trace events, and bringed in two trace events, one is
TRACE_EVENT(sctp_probe), another one is TRACE_EVENT(sctp_probe_path).
The original patch intended to trigger the trace_sctp_probe_path in
TRACE_EVENT(sctp_probe) as below code,

+TRACE_EVENT(sctp_probe,
+
+   TP_PROTO(const struct sctp_endpoint *ep,
+const struct sctp_association *asoc,
+struct sctp_chunk *chunk),
+
+   TP_ARGS(ep, asoc, chunk),
+
+   TP_STRUCT__entry(
+   __field(__u64, asoc)
+   __field(__u32, mark)
+   __field(__u16, bind_port)
+   __field(__u16, peer_port)
+   __field(__u32, pathmtu)
+   __field(__u32, rwnd)
+   __field(__u16, unack_data)
+   ),
+
+   TP_fast_assign(
+   struct sk_buff *skb = chunk->skb;
+
+   __entry->asoc = (unsigned long)asoc;
+   __entry->mark = skb->mark;
+   __entry->bind_port = ep->base.bind_addr.port;
+   __entry->peer_port = asoc->peer.port;
+   __entry->pathmtu = asoc->pathmtu;
+   __entry->rwnd = asoc->peer.rwnd;
+   __entry->unack_data = asoc->unack_data;
+
+   if (trace_sctp_probe_path_enabled()) {
+   struct sctp_transport *sp;
+
+   list_for_each_entry(sp, >peer.transport_addr_list,
+   transports) {
+   trace_sctp_probe_path(sp, asoc);
+   }
+   }
+   ),

But I found it did not work when I did testing, and trace_sctp_probe_path
had no output, I finally found that there is trace buffer lock
operation(trace_event_buffer_reserve) in include/trace/trace_events.h:

static notrace void \
trace_event_raw_event_##call(void *__data, proto)   \
{   \
struct trace_event_file *trace_file = __data;   \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_buffer fbuffer;  \
struct trace_event_raw_##call *entry;   \
int __data_size;\
\
if (trace_trigger_soft_disabled(trace_file))\
return; \
\
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
entry = trace_event_buffer_reserve(, trace_file,\
 sizeof(*entry) + __data_size); \
\
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \
\
trace_event_buffer_commit();\
}

The reason caused no output of trace_sctp_probe_path is that
trace_sctp_probe_path written in TP_fast_assign part of
TRACE_EVENT(sctp_probe), and it will be placed( { assign; } ) after the
trace_event_buffer_reserve() when compiler expands Macro,

entry = trace_event_buffer_reserve(, trace_file,\
 sizeof(*entry) + __data_size); \
\
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \

so trace_sctp_probe_path finally can not acquire trace_event_buffer
and return no output, that is to say the nest of tracepoint entry function
is not allowed. The function call flow is:

trace_sctp_probe()
-> trace_event_raw_event_sctp_probe()
 -> lock 

[PATCH AUTOSEL 4.19 043/206] neigh_stat_seq_next() should increase position index

2020-09-17 Thread Sasha Levin
From: Vasily Averin 

[ Upstream commit 1e3f9f073c47bee7c23e77316b07bc12338c5bba ]

if seq_file .next fuction does not change position index,
read after some lseek can generate unexpected output.

https://bugzilla.kernel.org/show_bug.cgi?id=206283
Signed-off-by: Vasily Averin 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 net/core/neighbour.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bf738ec68cb53..6e890f51b7d86 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, 
void *v, loff_t *pos)
*pos = cpu+1;
return per_cpu_ptr(tbl->stats, cpu);
}
+   (*pos)++;
return NULL;
 }
 
-- 
2.25.1



RE: [PATCH v2 1/1] fpga: dfl: afu: harden port enable logic

2020-09-17 Thread Wu, Hao
> -Original Message-
> From: Russ Weight 
> Sent: Friday, September 18, 2020 2:32 AM
> To: m...@kernel.org; linux-f...@vger.kernel.org; linux-
> ker...@vger.kernel.org
> Cc: t...@redhat.com; lgonc...@redhat.com; Xu, Yilun ;
> Wu, Hao ; Gerlach, Matthew
> ; Weight, Russell H
> 
> Subject: [PATCH v2 1/1] fpga: dfl: afu: harden port enable logic
> 
> Port enable is not complete until ACK = 0. Change
> __afu_port_enable() to guarantee that the enable process
> is complete by polling for ACK == 0.

The description of this port reset ack bit is

" After initiating a Port soft reset, SW should monitor this bit. HW 
will set this bit when all outstanding requests initiated by this port
have been drained, and the minimum soft reset pulse width has 
elapsed. "

But no description about what to do when clearing a Port soft reset
to enable the port.

So we need to understand clearly on why we need this change 
(e.g. what may happen without this change), and will it apply for all 
existing DFL devices and future ones, or just for one specific card.
Could you please help? : )

> 
> Signed-off-by: Russ Weight 
> ---
>  drivers/fpga/dfl-afu-error.c |  2 +-
>  drivers/fpga/dfl-afu-main.c  | 29 +
>  drivers/fpga/dfl-afu.h   |  2 +-
>  3 files changed, 23 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
> index c4691187cca9..0806532a3e9f 100644
> --- a/drivers/fpga/dfl-afu-error.c
> +++ b/drivers/fpga/dfl-afu-error.c
> @@ -103,7 +103,7 @@ static int afu_port_err_clear(struct device *dev, u64
> err)
>   __afu_port_err_mask(dev, false);
> 
>   /* Enable the Port by clear the reset */
> - __afu_port_enable(pdev);
> + ret = __afu_port_enable(pdev);
> 
>  done:
>   mutex_unlock(>lock);
> diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
> index 753cda4b2568..f73b06cdf13c 100644
> --- a/drivers/fpga/dfl-afu-main.c
> +++ b/drivers/fpga/dfl-afu-main.c
> @@ -21,6 +21,9 @@
> 
>  #include "dfl-afu.h"
> 
> +#define RST_POLL_INVL 10 /* us */
> +#define RST_POLL_TIMEOUT 1000 /* us */
> +
>  /**
>   * __afu_port_enable - enable a port by clear reset
>   * @pdev: port platform device.
> @@ -32,7 +35,7 @@
>   *
>   * The caller needs to hold lock for protection.
>   */
> -void __afu_port_enable(struct platform_device *pdev)
> +int __afu_port_enable(struct platform_device *pdev)
>  {
>   struct dfl_feature_platform_data *pdata = dev_get_platdata(
> >dev);
>   void __iomem *base;
> @@ -41,7 +44,7 @@ void __afu_port_enable(struct platform_device *pdev)
>   WARN_ON(!pdata->disable_count);
> 
>   if (--pdata->disable_count != 0)
> - return;
> + return 0;
> 
>   base = dfl_get_feature_ioaddr_by_id(>dev,
> PORT_FEATURE_ID_HEADER);
> 
> @@ -49,10 +52,20 @@ void __afu_port_enable(struct platform_device
> *pdev)
>   v = readq(base + PORT_HDR_CTRL);
>   v &= ~PORT_CTRL_SFTRST;
>   writeq(v, base + PORT_HDR_CTRL);
> -}
> 
> -#define RST_POLL_INVL 10 /* us */
> -#define RST_POLL_TIMEOUT 1000 /* us */
> + /*
> +  * HW clears the ack bit to indicate that the port is fully out
> +  * of reset.
> +  */
> + if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
> +!(v & PORT_CTRL_SFTRST_ACK),
> +RST_POLL_INVL, RST_POLL_TIMEOUT)) {
> + dev_err(>dev, "timeout, failure to enable device\n");
> + return -ETIMEDOUT;
> + }
> +
> + return 0;
> +}
> 
>  /**
>   * __afu_port_disable - disable a port by hold reset
> @@ -111,7 +124,7 @@ static int __port_reset(struct platform_device *pdev)
> 
>   ret = __afu_port_disable(pdev);
>   if (!ret)
> - __afu_port_enable(pdev);
> + ret = __afu_port_enable(pdev);
> 
>   return ret;

What about:

ret = __afu_port_disable(pdev);
if (ret)
return ret;

return __afu_port_enable(pdev);

Thanks
Hao

>  }
> @@ -872,11 +885,11 @@ static int afu_dev_destroy(struct platform_device
> *pdev)
>  static int port_enable_set(struct platform_device *pdev, bool enable)
>  {
>   struct dfl_feature_platform_data *pdata = dev_get_platdata(
> >dev);
> - int ret = 0;
> + int ret;
> 
>   mutex_lock(>lock);
>   if (enable)
> - __afu_port_enable(pdev);
> + ret = __afu_port_enable(pdev);
>   else
>   ret = __afu_port_disable(pdev);
>   mutex_unlock(>lock);
> diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
> index 576e94960086..e5020e2b1f3d 100644
> --- a/drivers/fpga/dfl-afu.h
> +++ b/drivers/fpga/dfl-afu.h
> @@ -80,7 +80,7 @@ struct dfl_afu {
>  };
> 
>  /* hold pdata->lock when call __afu_port_enable/disable */
> -void __afu_port_enable(struct platform_device *pdev);
> +int __afu_port_enable(struct platform_device *pdev);
>  int __afu_port_disable(struct platform_device *pdev);
> 
>  void 

[PATCH AUTOSEL 4.19 075/206] scsi: lpfc: Fix coverity errors in fmdi attribute handling

2020-09-17 Thread Sasha Levin
From: James Smart 

[ Upstream commit 4cb9e1ddaa145be9ed67b6a7de98ca705a43f998 ]

Coverity reported a memory corruption error for the fdmi attributes
routines:

  CID 15768 [Memory Corruption] Out-of-bounds access on FDMI

Sloppy coding of the fmdi structures. In both the lpfc_fdmi_attr_def and
lpfc_fdmi_reg_port_list structures, a field was placed at the start of
payload that may have variable content. The field was given an arbitrary
type (uint32_t). The code then uses the field name to derive an address,
which it used in things such as memset and memcpy. The memset sizes or
memcpy lengths were larger than the arbitrary type, thus coverity reported
an error.

Fix by replacing the arbitrary fields with the real field structures
describing the payload.

Link: https://lore.kernel.org/r/20200128002312.16346-8-jsmart2...@gmail.com
Signed-off-by: Dick Kennedy 
Signed-off-by: James Smart 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/lpfc/lpfc_ct.c | 137 ++--
 drivers/scsi/lpfc/lpfc_hw.h |  36 +-
 2 files changed, 85 insertions(+), 88 deletions(-)

diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 384f5cd7c3c81..99b4ff78f9dce 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct 
lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, sizeof(struct lpfc_name));
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
memcpy(>un.AttrWWN, >fc_sparam.nodeName,
   sizeof(struct lpfc_name));
@@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
/* This string MUST be consistent with other FC platforms
 * supported by Broadcom.
@@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct 
lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->SerialNumber,
sizeof(ae->un.AttrString));
@@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->ModelDesc,
sizeof(ae->un.AttrString));
@@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t i, j, incr, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
@@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, lpfc_release_version,
sizeof(ae->un.AttrString));
@@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
@@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport 

[PATCH AUTOSEL 4.19 062/206] ceph: ensure we have a new cap before continuing in fill_inode

2020-09-17 Thread Sasha Levin
From: Jeff Layton 

[ Upstream commit 9a6bed4fe0c8bf57785cbc4db9f86086cb9b193d ]

If the caller passes in a NULL cap_reservation, and we can't allocate
one then ensure that we fail gracefully.

Signed-off-by: Jeff Layton 
Signed-off-by: Ilya Dryomov 
Signed-off-by: Sasha Levin 
---
 fs/ceph/inode.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1e438e0faf77e..3c24fb77ef325 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page 
*locked_page,
info_caps = le32_to_cpu(info->cap.caps);
 
/* prealloc new cap struct */
-   if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+   if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
new_cap = ceph_get_cap(mdsc, caps_reservation);
+   if (!new_cap)
+   return -ENOMEM;
+   }
 
/*
 * prealloc xattr data, if it looks like we'll need it.  only
-- 
2.25.1



[PATCH AUTOSEL 4.19 078/206] crypto: chelsio - This fixes the kernel panic which occurs during a libkcapi test

2020-09-17 Thread Sasha Levin
From: Ayush Sawal 

[ Upstream commit 9195189e00a7db55e7d448cee973cae87c5a3c71 ]

The libkcapi test which causes kernel panic is
aead asynchronous vmsplice multiple test.

./bin/kcapi  -v -d 4 -x 10   -c "ccm(aes)"
-q 4edb58e8d5eb6bc711c43a6f3693daebde2e5524f1b55297abb29f003236e43d
-t a7877c99 -n 674742abd0f5ba -k 2861fd0253705d7875c95ba8a53171b4
-a fb7bc304a3909e66e2e0c5ef952712dd884ce3e7324171369f2c5db1adc48c7d

This patch avoids dma_mapping of a zero length sg which causes the panic,
by using sg_nents_for_len which maps only upto a specific length

Signed-off-by: Ayush Sawal 
Signed-off-by: Herbert Xu 
Signed-off-by: Sasha Levin 
---
 drivers/crypto/chelsio/chcr_algo.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 9b3c259f081d3..ee5087504 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev,
else
reqctx->b0_dma = 0;
if (req->src == req->dst) {
-   error = dma_map_sg(dev, req->src, sg_nents(req->src),
-  DMA_BIDIRECTIONAL);
+   error = dma_map_sg(dev, req->src,
+   sg_nents_for_len(req->src, dst_size),
+   DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
-- 
2.25.1



[PATCH AUTOSEL 4.19 053/206] dmaengine: zynqmp_dma: fix burst length configuration

2020-09-17 Thread Sasha Levin
From: Matthias Fend 

[ Upstream commit cc88525ebffc757e00cc5a5d61da6271646c7f5f ]

Since the dma engine expects the burst length register content as
power of 2 value, the burst length needs to be converted first.
Additionally add a burst length range check to avoid corrupting unrelated
register bits.

Signed-off-by: Matthias Fend 
Link: 
https://lore.kernel.org/r/20200115102249.24398-1-matthias.f...@wolfvision.net
Signed-off-by: Vinod Koul 
Signed-off-by: Sasha Levin 
---
 drivers/dma/xilinx/zynqmp_dma.c | 24 +++-
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 73de6a6179fcd..e002ff8413e2a 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -127,10 +127,12 @@
 /* Max transfer size per descriptor */
 #define ZYNQMP_DMA_MAX_TRANS_LEN   0x4000
 
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN32768U
+
 /* Reset values for data attributes */
 #define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL   0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL   0xF
 
 #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL   0x1F
 
@@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct 
zynqmp_dma_chan *chan, u32 status)
 
 static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
 {
-   u32 val;
+   u32 val, burst_val;
 
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
 
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+   burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
-   (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+   ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+   burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
-   (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+   ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
 }
 
@@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
 {
struct zynqmp_dma_chan *chan = to_chan(dchan);
 
-   chan->src_burst_len = config->src_maxburst;
-   chan->dst_burst_len = config->dst_maxburst;
+   chan->src_burst_len = clamp(config->src_maxburst, 1U,
+   ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+   chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+   ZYNQMP_DMA_MAX_DST_BURST_LEN);
 
return 0;
 }
@@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device 
*zdev,
return PTR_ERR(chan->regs);
 
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
-   chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
-   chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+   chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+   chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", >bus_width);
if (err < 0) {
dev_err(>dev, "missing xlnx,bus-width property\n");
-- 
2.25.1



[PATCH AUTOSEL 4.19 036/206] scsi: pm80xx: Cleanup command when a reset times out

2020-09-17 Thread Sasha Levin
From: peter chang 

[ Upstream commit 51c1c5f6ed64c2b65a8cf89dac136273d25ca540 ]

Added the fix so the if driver properly sent the abort it tries to remove
it from the firmware's list of outstanding commands regardless of the abort
status. This means that the task gets freed 'now' rather than possibly
getting freed later when the scsi layer thinks it's leaked but still valid.

Link: https://lore.kernel.org/r/20191114100910.6153-10-deepak.u...@microchip.com
Acked-by: Jack Wang 
Signed-off-by: peter chang 
Signed-off-by: Deepak Ukey 
Signed-off-by: Viswas G 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/pm8001/pm8001_sas.c | 50 +++-
 1 file changed, 37 insertions(+), 13 deletions(-)

diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 5be4212312cb0..49cf2b65de223 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_ha = pm8001_find_ha_by_dev(dev);
device_id = pm8001_dev->device_id;
phy_id = pm8001_dev->attached_phy;
-   rc = pm8001_find_tag(task, );
-   if (rc == 0) {
+   ret = pm8001_find_tag(task, );
+   if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task)
 
/* 2. Send Phy Control Hard Reset */
reinit_completion();
+   phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = 
phy->reset_completion = _reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
-   if (ret)
-   goto out;
-   PM8001_MSG_DBG(pm8001_ha,
-   pm8001_printk("Waiting for local phy ctl\n"));
-   wait_for_completion();
-   if (!phy->reset_success)
+   if (ret) {
+   phy->enable_completion = NULL;
+   phy->reset_completion = NULL;
goto out;
+   }
 
-   /* 3. Wait for Port Reset complete / Port reset TMO */
+   /* In the case of the reset timeout/fail we still
+* abort the command at the firmware. The assumption
+* here is that the drive is off doing something so
+* that it's not processing requests, and we want to
+* avoid getting a completion for this and either
+* leaking the task in libsas or losing the race and
+* getting a double free.
+*/
PM8001_MSG_DBG(pm8001_ha,
+   pm8001_printk("Waiting for local phy ctl\n"));
+   ret = wait_for_completion_timeout(,
+   PM8001_TASK_TIMEOUT * HZ);
+   if (!ret || !phy->reset_success) {
+   phy->enable_completion = NULL;
+   phy->reset_completion = NULL;
+   } else {
+   /* 3. Wait for Port Reset complete or
+* Port reset TMO
+*/
+   PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
-   wait_for_completion(_reset);
-   if (phy->port_reset_status) {
-   pm8001_dev_gone_notify(dev);
-   goto out;
+   ret = wait_for_completion_timeout(
+   _reset,
+   PM8001_TASK_TIMEOUT * HZ);
+   if (!ret)
+   phy->reset_completion = NULL;
+   WARN_ON(phy->port_reset_status ==
+   PORT_RESET_TMO);
+   if (phy->port_reset_status == PORT_RESET_TMO) {
+   pm8001_dev_gone_notify(dev);
+   goto out;
+   }
}
 
/*
-- 
2.25.1



[PATCH AUTOSEL 4.19 052/206] efi/arm: Defer probe of PCIe backed efifb on DT systems

2020-09-17 Thread Sasha Levin
From: Ard Biesheuvel 

[ Upstream commit 64c8a0cd0a535891d5905c3a1651150f0f141439 ]

The new of_devlink support breaks PCIe probing on ARM platforms booting
via UEFI if the firmware exposes a EFI framebuffer that is backed by a
PCI device. The reason is that the probing order gets reversed,
resulting in a resource conflict on the framebuffer memory window when
the PCIe probes last, causing it to give up entirely.

Given that we rely on PCI quirks to deal with EFI framebuffers that get
moved around in memory, we cannot simply drop the memory reservation, so
instead, let's use the device link infrastructure to register this
dependency, and force the probing to occur in the expected order.

Co-developed-by: Saravana Kannan 
Signed-off-by: Ard Biesheuvel 
Signed-off-by: Saravana Kannan 
Signed-off-by: Ard Biesheuvel 
Signed-off-by: Ingo Molnar 
Link: https://lore.kernel.org/r/20200113172245.27925-9-a...@kernel.org
Signed-off-by: Sasha Levin 
---
 drivers/firmware/efi/arm-init.c | 107 ++--
 1 file changed, 103 insertions(+), 4 deletions(-)

diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1a6a77df8a5e8..85533ec55396a 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -14,10 +14,12 @@
 #define pr_fmt(fmt)"efi: " fmt
 
 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -271,15 +273,112 @@ void __init efi_init(void)
efi_memmap_unmap();
 }
 
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+   u64 fb_base = screen_info.lfb_base;
+
+   if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+   fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+   return fb_base >= range->cpu_addr &&
+  fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+   struct device_node *np;
+
+   for_each_node_by_type(np, "pci") {
+   struct of_pci_range_parser parser;
+   struct of_pci_range range;
+   int err;
+
+   err = of_pci_range_parser_init(, np);
+   if (err) {
+   pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+   continue;
+   }
+
+   for_each_of_pci_range(, )
+   if (efifb_overlaps_pci_range())
+   return np;
+   }
+   return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+  struct device *dev)
+{
+   struct device_node *sup_np;
+   struct device *sup_dev;
+
+   sup_np = find_pci_overlap_node();
+
+   /*
+* If there's no PCI graphics controller backing the efifb, we are
+* done here.
+*/
+   if (!sup_np)
+   return 0;
+
+   sup_dev = get_dev_from_fwnode(_np->fwnode);
+   of_node_put(sup_np);
+
+   /*
+* Return -ENODEV if the PCI graphics controller device hasn't been
+* registered yet.  This ensures that efifb isn't allowed to probe
+* and this function is retried again when new devices are
+* registered.
+*/
+   if (!sup_dev)
+   return -ENODEV;
+
+   /*
+* If this fails, retrying this function at a later point won't
+* change anything. So, don't return an error after this.
+*/
+   if (!device_link_add(dev, sup_dev, 0))
+   dev_warn(dev, "device_link_add() failed\n");
+
+   put_device(sup_dev);
+
+   return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+   .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+   .ops = _fwnode_ops,
+};
+
 static int __init register_gop_device(void)
 {
-   void *pd;
+   struct platform_device *pd;
+   int err;
 
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return 0;
 
-   pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
-  _info, sizeof(screen_info));
-   return PTR_ERR_OR_ZERO(pd);
+   pd = platform_device_alloc("efi-framebuffer", 0);
+   if (!pd)
+   return -ENOMEM;
+
+   if (IS_ENABLED(CONFIG_PCI))
+   pd->dev.fwnode = _fwnode;
+
+   err = platform_device_add_data(pd, _info, sizeof(screen_info));
+   if (err)
+   return err;
+
+   return platform_device_add(pd);
 }
 

[PATCH AUTOSEL 4.19 035/206] gfs2: clean up iopen glock mess in gfs2_create_inode

2020-09-17 Thread Sasha Levin
From: Bob Peterson 

[ Upstream commit 2c47c1be51fbded1f7baa2ceaed90f97932f79be ]

Before this patch, gfs2_create_inode had a use-after-free for the
iopen glock in some error paths because it did this:

gfs2_glock_put(io_gl);
fail_gunlock2:
if (io_gl)
clear_bit(GLF_INODE_CREATING, _gl->gl_flags);

In some cases, the io_gl was used for create and only had one
reference, so the glock might be freed before the clear_bit().
This patch tries to straighten it out by only jumping to the
error paths where iopen is properly set, and moving the
gfs2_glock_put after the clear_bit.

Signed-off-by: Bob Peterson 
Signed-off-by: Andreas Gruenbacher 
Signed-off-by: Sasha Levin 
---
 fs/gfs2/inode.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d968b5c5df217..a52b8b0dceeb9 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
 
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
-   goto fail_gunlock2;
+   goto fail_free_inode;
 
if (blocks > 1) {
ip->i_eattr = ip->i_no_addr + 1;
@@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
 
error = gfs2_glock_get(sdp, ip->i_no_addr, _iopen_glops, CREATE, 
_gl);
if (error)
-   goto fail_gunlock2;
+   goto fail_free_inode;
 
BUG_ON(test_and_set_bit(GLF_INODE_CREATING, _gl->gl_flags));
 
@@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
goto fail_gunlock2;
 
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
-   gfs2_glock_put(io_gl);
gfs2_set_iop(inode);
insert_inode_hash(inode);
 
@@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
 
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
+   /* After instantiate, errors should result in evict which will destroy
+* both inode and iopen glocks properly. */
if (file) {
file->f_mode |= FMODE_CREATED;
error = finish_open(file, dentry, gfs2_open_common);
@@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
clear_bit(GLF_INODE_CREATING, _gl->gl_flags);
+   gfs2_glock_put(io_gl);
return error;
 
 fail_gunlock3:
glock_clear_object(io_gl, ip);
gfs2_glock_dq_uninit(>i_iopen_gh);
-   gfs2_glock_put(io_gl);
 fail_gunlock2:
-   if (io_gl)
-   clear_bit(GLF_INODE_CREATING, _gl->gl_flags);
+   clear_bit(GLF_INODE_CREATING, _gl->gl_flags);
+   gfs2_glock_put(io_gl);
 fail_free_inode:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
-- 
2.25.1



[PATCH AUTOSEL 4.19 021/206] mfd: mfd-core: Protect against NULL call-back function pointer

2020-09-17 Thread Sasha Levin
From: Lee Jones 

[ Upstream commit b195e101580db390f50b0d587b7f66f241d2bc88 ]

If a child device calls mfd_cell_{en,dis}able() without an appropriate
call-back being set, we are likely to encounter a panic.  Avoid this
by adding suitable checking.

Signed-off-by: Lee Jones 
Reviewed-by: Daniel Thompson 
Reviewed-by: Mark Brown 
Signed-off-by: Sasha Levin 
---
 drivers/mfd/mfd-core.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 182973df1aed4..77c965c6a65f1 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
int err = 0;
 
+   if (!cell->enable) {
+   dev_dbg(>dev, "No .enable() call-back registered\n");
+   return 0;
+   }
+
/* only call enable hook if the cell wasn't previously enabled */
if (atomic_inc_return(cell->usage_count) == 1)
err = cell->enable(pdev);
@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
int err = 0;
 
+   if (!cell->disable) {
+   dev_dbg(>dev, "No .disable() call-back registered\n");
+   return 0;
+   }
+
/* only disable if no other clients are using it */
if (atomic_dec_return(cell->usage_count) == 0)
err = cell->disable(pdev);
-- 
2.25.1



[PATCH AUTOSEL 4.19 032/206] RDMA/iw_cgxb4: Fix an error handling path in 'c4iw_connect()'

2020-09-17 Thread Sasha Levin
From: Christophe JAILLET 

[ Upstream commit 9067f2f0b41d7e817fc8c5259bab1f17512b0147 ]

We should jump to fail3 in order to undo the 'xa_insert_irq()' call.

Link: 
https://lore.kernel.org/r/20190923190746.10964-1-christophe.jail...@wanadoo.fr
Signed-off-by: Christophe JAILLET 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Sasha Levin 
---
 drivers/infiniband/hw/cxgb4/cm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 16145b0a14583..3fd3dfa3478b7 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct 
iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
-   goto fail2;
+   goto fail3;
}
 
/* find a route */
@@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct 
iw_cm_conn_param *conn_param)
if (ipv6_addr_type(>sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
-   goto fail2;
+   goto fail3;
}
 
/* find a route */
-- 
2.25.1



[PATCH AUTOSEL 4.19 037/206] debugfs: Fix !DEBUG_FS debugfs_create_automount

2020-09-17 Thread Sasha Levin
From: Kusanagi Kouichi 

[ Upstream commit 4250b047039d324e0ff65267c8beb5bad5052a86 ]

If DEBUG_FS=n, compile fails with the following error:

kernel/trace/trace.c: In function 'tracing_init_dentry':
kernel/trace/trace.c:8658:9: error: passing argument 3 of 
'debugfs_create_automount' from incompatible pointer type 
[-Werror=incompatible-pointer-types]
 8658 | trace_automount, NULL);
  | ^~~
  | |
  | struct vfsmount * (*)(struct dentry *, void *)
In file included from kernel/trace/trace.c:24:
./include/linux/debugfs.h:206:25: note: expected 'struct vfsmount * (*)(void 
*)' but argument is of type 'struct vfsmount * (*)(struct dentry *, void *)'
  206 |  struct vfsmount *(*f)(void *),
  |  ~~~^~

Signed-off-by: Kusanagi Kouichi 
Link: 
https://lore.kernel.org/r/20191121102021787.mlmy.25002.ppp.dion.ne...@dmta0003.auone-net.jp
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 include/linux/debugfs.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 3b0ba54cc4d5b..3bc1034c57e66 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -54,6 +54,8 @@ static const struct file_operations __fops = {
\
.llseek  = no_llseek,   \
 }
 
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
 #if defined(CONFIG_DEBUG_FS)
 
 struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct 
dentry *parent);
 struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
  const char *dest);
 
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
 struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
@@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const 
char *name,
 
 static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
-   struct vfsmount *(*f)(void *),
+   debugfs_automount_t f,
void *data)
 {
return ERR_PTR(-ENODEV);
-- 
2.25.1



[PATCH AUTOSEL 4.19 047/206] media: ti-vpe: cal: Restrict DMA to avoid memory corruption

2020-09-17 Thread Sasha Levin
From: Nikhil Devshatwar 

[ Upstream commit 6e72eab2e7b7a157d554b8f9faed7676047be7c1 ]

When setting DMA for video capture from CSI channel, if the DMA size
is not given, it ends up writing as much data as sent by the camera.

This may lead to overwriting the buffers causing memory corruption.
Observed green lines on the default framebuffer.

Restrict the DMA to maximum height as specified in the S_FMT ioctl.

Signed-off-by: Nikhil Devshatwar 
Signed-off-by: Benoit Parrot 
Signed-off-by: Hans Verkuil 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
---
 drivers/media/platform/ti-vpe/cal.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/media/platform/ti-vpe/cal.c 
b/drivers/media/platform/ti-vpe/cal.c
index be3155275a6ba..d945323fc437d 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
 }
 
 static void cal_wr_dma_config(struct cal_ctx *ctx,
- unsigned int width)
+ unsigned int width, unsigned int height)
 {
u32 val;
 
val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
set_field(, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+   set_field(, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
set_field(, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
  CAL_WR_DMA_CTRL_DTAG_MASK);
set_field(, CAL_WR_DMA_CTRL_MODE_CONST,
@@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, 
unsigned int count)
csi2_lane_config(ctx);
csi2_ctx_config(ctx);
pix_proc_config(ctx);
-   cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
+   cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
+ ctx->v_fmt.fmt.pix.height);
cal_wr_dma_addr(ctx, addr);
csi2_ppi_enable(ctx);
 
-- 
2.25.1



[PATCH AUTOSEL 4.19 011/206] media: smiapp: Fix error handling at NVM reading

2020-09-17 Thread Sasha Levin
From: Sakari Ailus 

[ Upstream commit a5b1d5413534607b05fb34470ff62bf395f5c8d0 ]

If NVM reading failed, the device was left powered on. Fix that.

Signed-off-by: Sakari Ailus 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
---
 drivers/media/i2c/smiapp/smiapp-core.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/media/i2c/smiapp/smiapp-core.c 
b/drivers/media/i2c/smiapp/smiapp-core.c
index 4731e1c72f960..0a434bdce3b3b 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct 
device_attribute *attr,
if (rval < 0) {
if (rval != -EBUSY && rval != -EAGAIN)
pm_runtime_set_active(>dev);
-   pm_runtime_put(>dev);
+   pm_runtime_put_noidle(>dev);
return -ENODEV;
}
 
if (smiapp_read_nvm(sensor, sensor->nvm)) {
+   pm_runtime_put(>dev);
dev_err(>dev, "nvm read failed\n");
return -ENODEV;
}
-- 
2.25.1



[PATCH AUTOSEL 5.4 326/330] perf parse-events: Use strcmp() to compare the PMU name

2020-09-17 Thread Sasha Levin
From: Jin Yao 

[ Upstream commit 8510895bafdbf7c4dd24c22946d925691135c2b2 ]

A big uncore event group is split into multiple small groups which only
include the uncore events from the same PMU. This has been supported in
the commit 3cdc5c2cb924a ("perf parse-events: Handle uncore event
aliases in small groups properly").

If the event's PMU name starts to repeat, it must be a new event.
That can be used to distinguish the leader from other members.
But now it only compares the pointer of pmu_name
(leader->pmu_name == evsel->pmu_name).

If we use "perf stat -M LLC_MISSES.PCIE_WRITE -a" on cascadelakex,
the event list is:

  evsel->name   evsel->pmu_name
  ---
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_4 (as leader)
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_2
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_0
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_5
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_3
  unc_iio_data_req_of_cpu.mem_write.part0   uncore_iio_1
  unc_iio_data_req_of_cpu.mem_write.part1   uncore_iio_4
  ..

For the event "unc_iio_data_req_of_cpu.mem_write.part1" with
"uncore_iio_4", it should be the event from PMU "uncore_iio_4".
It's not a new leader for this PMU.

But if we use "(leader->pmu_name == evsel->pmu_name)", the check
would be failed and the event is stored to leaders[] as a new
PMU leader.

So this patch uses strcmp to compare the PMU name between events.

Fixes: d4953f7ef1a2 ("perf parse-events: Fix 3 use after frees found with clang 
ASAN")
Signed-off-by: Jin Yao 
Acked-by: Jiri Olsa 
Cc: Alexander Shishkin 
Cc: Andi Kleen 
Cc: Jin Yao 
Cc: Kan Liang 
Cc: Peter Zijlstra 
Link: http://lore.kernel.org/lkml/20200430003618.17002-1-yao@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo 
Signed-off-by: Sasha Levin 
---
 tools/perf/util/parse-events.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f16748cfcb262..3fb9d53666d15 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1507,12 +1507,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, 
struct list_head *list,
 * event. That can be used to distinguish the leader from
 * other members, even they have the same event name.
 */
-   if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) 
{
+   if ((leader != evsel) &&
+   !strcmp(leader->pmu_name, evsel->pmu_name)) {
is_leader = false;
continue;
}
-   /* The name is always alias name */
-   WARN_ON(strcmp(leader->name, evsel->name));
 
/* Store the leader event for each PMU */
leaders[nr_pmu++] = (uintptr_t) evsel;
-- 
2.25.1



[PATCH AUTOSEL 4.19 015/206] scsi: fnic: fix use after free

2020-09-17 Thread Sasha Levin
From: Pan Bian 

[ Upstream commit ec990306f77fd4c58c3b27cc3b3c53032d6e6670 ]

The memory chunk io_req is released by mempool_free. Accessing
io_req->start_time will result in a use after free bug. The variable
start_time is a backup of the timestamp. So, use start_time here to
avoid use after free.

Link: 
https://lore.kernel.org/r/1572881182-37664-1-git-send-email-bianpan2...@163.com
Signed-off-by: Pan Bian 
Reviewed-by: Satish Kharat 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/fnic/fnic_scsi.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 73ffc16ec0225..b521fc7650cb9 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic 
*fnic,
atomic64_inc(_stats->io_stats.io_completions);
 
 
-   io_duration_time = jiffies_to_msecs(jiffies) - 
jiffies_to_msecs(io_req->start_time);
+   io_duration_time = jiffies_to_msecs(jiffies) -
+   jiffies_to_msecs(start_time);
 
if(io_duration_time <= 10)
atomic64_inc(_stats->io_stats.io_btw_0_to_10_msec);
-- 
2.25.1



[PATCH AUTOSEL 5.4 329/330] net: openvswitch: use div_u64() for 64-by-32 divisions

2020-09-17 Thread Sasha Levin
From: Tonghao Zhang 

[ Upstream commit 659d4587fe7233bfdff303744b20d6f41ad04362 ]

Compile the kernel for arm 32 platform, the build warning found.
To fix that, should use div_u64() for divisions.
| net/openvswitch/meter.c:396: undefined reference to `__udivdi3'

[add more commit msg, change reported tag, and use div_u64 instead
of do_div by Tonghao]

Fixes: e57358873bb5d6ca ("net: openvswitch: use u64 for meter bucket")
Reported-by: kbuild test robot 
Signed-off-by: Tonghao Zhang 
Tested-by: Tonghao Zhang 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 net/openvswitch/meter.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index b10734f18bbd6..541eea74ef7a6 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -252,7 +252,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
 * Start with a full bucket.
 */
band->bucket = (band->burst_size + band->rate) * 1000ULL;
-   band_max_delta_t = band->bucket / band->rate;
+   band_max_delta_t = div_u64(band->bucket, band->rate);
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
band++;
-- 
2.25.1



[PATCH AUTOSEL 5.4 327/330] ALSA: hda: Always use jackpoll helper for jack update after resume

2020-09-17 Thread Sasha Levin
From: Takashi Iwai 

[ Upstream commit 8d6762af302d69f76fa788a277a56a9d9cd275d5 ]

HD-audio codec driver applies a tricky procedure to forcibly perform
the runtime resume by mimicking the usage count even if the device has
been runtime-suspended beforehand.  This was needed to assure to
trigger the jack detection update after the system resume.

And recently we also applied the similar logic to the HD-audio
controller side.  However this seems leading to some inconsistency,
and eventually PCI controller gets screwed up.

This patch is an attempt to fix and clean up those behavior: instead
of the tricky runtime resume procedure, the existing jackpoll work is
scheduled when such a forced codec resume is required.  The jackpoll
work will power up the codec, and this alone should suffice for the
jack status update in usual cases.  If the extra polling is requested
(by checking codec->jackpoll_interval), the manual update is invoked
after that, and the codec is powered down again.

Also, we filter the spurious wake up of the codec from the controller
runtime resume by checking codec->relaxed_resume flag.  If this flag
is set, basically we don't need to wake up explicitly, but it's
supposed to be done via the audio component notifier.

Fixes: c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not needed")
Link: https://lore.kernel.org/r/20200422203744.26299-1-ti...@suse.de
Signed-off-by: Takashi Iwai 
Signed-off-by: Sasha Levin 
---
 sound/pci/hda/hda_codec.c | 28 +---
 sound/pci/hda/hda_intel.c | 17 ++---
 2 files changed, 19 insertions(+), 26 deletions(-)

diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 12da263fb02ba..6da296def283e 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -641,8 +641,18 @@ static void hda_jackpoll_work(struct work_struct *work)
struct hda_codec *codec =
container_of(work, struct hda_codec, jackpoll_work.work);
 
-   snd_hda_jack_set_dirty_all(codec);
-   snd_hda_jack_poll_all(codec);
+   /* for non-polling trigger: we need nothing if already powered on */
+   if (!codec->jackpoll_interval && snd_hdac_is_power_on(>core))
+   return;
+
+   /* the power-up/down sequence triggers the runtime resume */
+   snd_hda_power_up_pm(codec);
+   /* update jacks manually if polling is required, too */
+   if (codec->jackpoll_interval) {
+   snd_hda_jack_set_dirty_all(codec);
+   snd_hda_jack_poll_all(codec);
+   }
+   snd_hda_power_down_pm(codec);
 
if (!codec->jackpoll_interval)
return;
@@ -2958,18 +2968,14 @@ static int hda_codec_runtime_resume(struct device *dev)
 static int hda_codec_force_resume(struct device *dev)
 {
struct hda_codec *codec = dev_to_hda_codec(dev);
-   bool forced_resume = hda_codec_need_resume(codec);
int ret;
 
-   /* The get/put pair below enforces the runtime resume even if the
-* device hasn't been used at suspend time.  This trick is needed to
-* update the jack state change during the sleep.
-*/
-   if (forced_resume)
-   pm_runtime_get_noresume(dev);
ret = pm_runtime_force_resume(dev);
-   if (forced_resume)
-   pm_runtime_put(dev);
+   /* schedule jackpoll work for jack detection update */
+   if (codec->jackpoll_interval ||
+   (pm_runtime_suspended(dev) && hda_codec_need_resume(codec)))
+   schedule_delayed_work(>jackpoll_work,
+ codec->jackpoll_interval);
return ret;
 }
 
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a6e8aaa091c7d..754e4d1a86b57 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1002,7 +1002,8 @@ static void __azx_runtime_resume(struct azx *chip, bool 
from_rt)
 
if (status && from_rt) {
list_for_each_codec(codec, >bus)
-   if (status & (1 << codec->addr))
+   if (!codec->relaxed_resume &&
+   (status & (1 << codec->addr)))
schedule_delayed_work(>jackpoll_work,
  codec->jackpoll_interval);
}
@@ -1041,9 +1042,7 @@ static int azx_suspend(struct device *dev)
 static int azx_resume(struct device *dev)
 {
struct snd_card *card = dev_get_drvdata(dev);
-   struct hda_codec *codec;
struct azx *chip;
-   bool forced_resume = false;
 
if (!azx_is_pm_ready(card))
return 0;
@@ -1055,19 +1054,7 @@ static int azx_resume(struct device *dev)
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
 
-   /* check for the forced resume */
-   list_for_each_codec(codec, >bus) {
-   if (hda_codec_need_resume(codec)) {
-   forced_resume = true;
-   break;
-  

[PATCH AUTOSEL 5.4 330/330] nvme: explicitly update mpath disk capacity on revalidation

2020-09-17 Thread Sasha Levin
From: Anthony Iliopoulos 

[ Upstream commit 05b29021fba5e725dd385151ef00b6340229b500 ]

Commit 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is
blocked") reverted multipath head disk revalidation due to deadlocks
caused by holding the bd_mutex during revalidate.

Updating the multipath disk blockdev size is still required though for
userspace to be able to observe any resizing while the device is
mounted. Directly update the bdev inode size to avoid unnecessarily
holding the bdev->bd_mutex.

Fixes: 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is
blocked")

Signed-off-by: Anthony Iliopoulos 
Signed-off-by: Christoph Hellwig 
Signed-off-by: Sasha Levin 
---
 drivers/nvme/host/core.c |  1 +
 drivers/nvme/host/nvme.h | 13 +
 2 files changed, 14 insertions(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bbf52e88f045a..038ef9d113388 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1864,6 +1864,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, 
struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+   nvme_mpath_update_disk_size(ns->head->disk);
}
 #endif
 }
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7d57c42a641ca..aab976737f9a9 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -561,6 +561,16 @@ static inline void nvme_trace_bio_complete(struct request 
*req,
 req->bio, status);
 }
 
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+   struct block_device *bdev = bdget_disk(disk, 0);
+
+   if (bdev) {
+   bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
+   bdput(bdev);
+   }
+}
+
 extern struct device_attribute dev_attr_ana_grpid;
 extern struct device_attribute dev_attr_ana_state;
 extern struct device_attribute subsys_attr_iopolicy;
@@ -636,6 +646,9 @@ static inline void nvme_mpath_wait_freeze(struct 
nvme_subsystem *subsys)
 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
 {
 }
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
-- 
2.25.1



[PATCH AUTOSEL 5.4 300/330] wlcore: fix runtime pm imbalance in wl1271_tx_work

2020-09-17 Thread Sasha Levin
From: Dinghao Liu 

[ Upstream commit 9604617e998b49f7695fea1479ed82421ef8c9f0 ]

There are two error handling paths in this functon. When
wlcore_tx_work_locked() returns an error code, we should
decrease the runtime PM usage counter the same way as the
error handling path beginning from pm_runtime_get_sync().

Signed-off-by: Dinghao Liu 
Acked-by: Tony Lindgren 
Signed-off-by: Kalle Valo 
Link: https://lore.kernel.org/r/20200520124241.9931-1-dinghao@zju.edu.cn
Signed-off-by: Sasha Levin 
---
 drivers/net/wireless/ti/wlcore/tx.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/wireless/ti/wlcore/tx.c 
b/drivers/net/wireless/ti/wlcore/tx.c
index 90e56d4c3df3b..e20e18cd04aed 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work)
 
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
+   pm_runtime_put_noidle(wl->dev);
wl12xx_queue_recovery_work(wl);
goto out;
}
-- 
2.25.1



[PATCH AUTOSEL 5.4 310/330] x86/speculation/mds: Mark mds_user_clear_cpu_buffers() __always_inline

2020-09-17 Thread Sasha Levin
From: Thomas Gleixner 

[ Upstream commit a7ef9ba986b5fae9d80f8a7b31db0423687efe4e ]

Prevent the compiler from uninlining and creating traceable/probable
functions as this is invoked _after_ context tracking switched to
CONTEXT_USER and rcu idle.

Signed-off-by: Thomas Gleixner 
Reviewed-by: Alexandre Chartre 
Acked-by: Peter Zijlstra 
Link: https://lkml.kernel.org/r/20200505134340.902709...@linutronix.de
Signed-off-by: Sasha Levin 
---
 arch/x86/include/asm/nospec-branch.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
index 5c24a7b351665..b222a35959467 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -320,7 +320,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
  * combination with microcode which triggers a CPU buffer flush when the
  * instruction is executed.
  */
-static inline void mds_clear_cpu_buffers(void)
+static __always_inline void mds_clear_cpu_buffers(void)
 {
static const u16 ds = __KERNEL_DS;
 
@@ -341,7 +341,7 @@ static inline void mds_clear_cpu_buffers(void)
  *
  * Clear CPU buffers if the corresponding static key is enabled
  */
-static inline void mds_user_clear_cpu_buffers(void)
+static __always_inline void mds_user_clear_cpu_buffers(void)
 {
if (static_branch_likely(_user_clear))
mds_clear_cpu_buffers();
-- 
2.25.1



[PATCH AUTOSEL 5.4 308/330] rapidio: avoid data race between file operation callbacks and mport_cdev_add().

2020-09-17 Thread Sasha Levin
From: Madhuparna Bhowmik 

[ Upstream commit e1c3cdb26ab881b77486dc50370356a349077c74 ]

Fields of md(mport_dev) are set after cdev_device_add().  However, the
file operation callbacks can be called after cdev_device_add() and
therefore accesses to fields of md in the callbacks can race with the rest
of the mport_cdev_add() function.

One such example is INIT_LIST_HEAD(>portwrites) in mport_cdev_add(),
the list is initialised after cdev_device_add().  This can race with
list_add_tail(_filter->md_node,>portwrites) in
rio_mport_add_pw_filter() which is called by unlocked_ioctl.

To avoid such data races use cdev_device_add() after initializing md.

Found by Linux Driver Verification project (linuxtesting.org).

Signed-off-by: Madhuparna Bhowmik 
Signed-off-by: Andrew Morton 
Acked-by: Alexandre Bounine 
Cc: Matt Porter 
Cc: Dan Carpenter 
Cc: Mike Marshall 
Cc: Thomas Gleixner 
Cc: Ira Weiny 
Cc: Allison Randal 
Cc: Pavel Andrianov 
Link: 
http://lkml.kernel.org/r/20200426112950.1803-1-madhuparnabhowmi...@gmail.com
Signed-off-by: Linus Torvalds 
Signed-off-by: Sasha Levin 
---
 drivers/rapidio/devices/rio_mport_cdev.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index 10af330153b5e..0b85a80ae7ef6 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2384,13 +2384,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport 
*mport)
cdev_init(>cdev, _fops);
md->cdev.owner = THIS_MODULE;
 
-   ret = cdev_device_add(>cdev, >dev);
-   if (ret) {
-   rmcd_error("Failed to register mport %d (err=%d)",
-  mport->id, ret);
-   goto err_cdev;
-   }
-
INIT_LIST_HEAD(>doorbells);
spin_lock_init(>db_lock);
INIT_LIST_HEAD(>portwrites);
@@ -2410,6 +2403,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport 
*mport)
 #else
md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
 #endif
+
+   ret = cdev_device_add(>cdev, >dev);
+   if (ret) {
+   rmcd_error("Failed to register mport %d (err=%d)",
+  mport->id, ret);
+   goto err_cdev;
+   }
ret = rio_query_mport(mport, );
if (!ret) {
md->properties.flags = attr.flags;
-- 
2.25.1



[PATCH AUTOSEL 5.4 312/330] vfio/pci: Clear error and request eventfd ctx after releasing

2020-09-17 Thread Sasha Levin
From: Alex Williamson 

[ Upstream commit 5c5866c593bbd444d0339ede6a8fb5f14ff66d72 ]

The next use of the device will generate an underflow from the
stale reference.

Cc: Qian Cai 
Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx")
Reported-by: Daniel Wagner 
Reviewed-by: Cornelia Huck 
Tested-by: Daniel Wagner 
Signed-off-by: Alex Williamson 
Signed-off-by: Sasha Levin 
---
 drivers/vfio/pci/vfio_pci.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 12f7691e8b6ca..b669be5a20066 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -474,10 +474,14 @@ static void vfio_pci_release(void *device_data)
if (!(--vdev->refcnt)) {
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
-   if (vdev->err_trigger)
+   if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
-   if (vdev->req_trigger)
+   vdev->err_trigger = NULL;
+   }
+   if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
+   vdev->req_trigger = NULL;
+   }
}
 
mutex_unlock(>reflck->lock);
-- 
2.25.1



[PATCH AUTOSEL 5.4 309/330] mtd: parser: cmdline: Support MTD names containing one or more colons

2020-09-17 Thread Sasha Levin
From: Boris Brezillon 

[ Upstream commit eb13fa0227417e84aecc3bd9c029d376e33474d3 ]

Looks like some drivers define MTD names with a colon in it, thus
making mtdpart= parsing impossible. Let's fix the parser to gracefully
handle that case: the last ':' in a partition definition sequence is
considered instead of the first one.

Signed-off-by: Boris Brezillon 
Signed-off-by: Ron Minnich 
Tested-by: Ron Minnich 
Signed-off-by: Richard Weinberger 
Signed-off-by: Sasha Levin 
---
 drivers/mtd/parsers/cmdlinepart.c | 23 ---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/mtd/parsers/cmdlinepart.c 
b/drivers/mtd/parsers/cmdlinepart.c
index c86f2db8c882d..0625b25620ca7 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -218,12 +218,29 @@ static int mtdpart_setup_real(char *s)
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len, num_parts;
-   char *p, *mtd_id;
+   char *p, *mtd_id, *semicol;
+
+   /*
+* Replace the first ';' by a NULL char so strrchr can work
+* properly.
+*/
+   semicol = strchr(s, ';');
+   if (semicol)
+   *semicol = '\0';
 
mtd_id = s;
 
-   /* fetch  */
-   p = strchr(s, ':');
+   /*
+* fetch . We use strrchr to ignore all ':' that could
+* be present in the MTD name, only the last one is interpreted
+* as an / separator.
+*/
+   p = strrchr(s, ':');
+
+   /* Restore the ';' now. */
+   if (semicol)
+   *semicol = ';';
+
if (!p) {
pr_err("no mtd-id\n");
return -EINVAL;
-- 
2.25.1



[PATCH AUTOSEL 5.4 313/330] cifs: Fix double add page to memcg when cifs_readpages

2020-09-17 Thread Sasha Levin
From: Zhang Xiaoxu 

[ Upstream commit 95a3d8f3af9b0d63b43f221b630beaab9739d13a ]

When xfstests generic/451, there is an BUG at mm/memcontrol.c:
  page:ea000560f2c0 refcount:2 mapcount:0 mapping:8544e0ea
   index:0xf
  mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451"
  flags: 0x2f8001(locked)
  raw: 002f8001 c90002023c50 ea0005280088 88815cda0210
  raw: 000f  0002 88817287d000
  page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup)
  page->mem_cgroup:88817287d000
  [ cut here ]
  kernel BUG at mm/memcontrol.c:2659!
  invalid opcode:  [#1] SMP
  CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44
  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_
073836-buildvm-ppc64le-16.ppc.4
  RIP: 0010:commit_charge+0x35/0x50
  Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7
c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9
  RSP: 0018:c90002023a50 EFLAGS: 00010202
  RAX:  RBX: 88817287d000 RCX: 
  RDX:  RSI: 88817ac97ea0 RDI: 88817ac97ea0
  RBP: ea000560f2c0 R08: 0203 R09: 0005
  R10: 0030 R11: c900020237a8 R12: 
  R13: 0001 R14: 0001 R15: 88815a1272c0
  FS:  7f5071ab0800() GS:88817ac8() knlGS:
  CS:  0010 DS:  ES:  CR0: 80050033
  CR2: 55efcd5ca000 CR3: 00015d312000 CR4: 06e0
  DR0:  DR1:  DR2: 
  DR3:  DR6: fffe0ff0 DR7: 0400
  Call Trace:
   mem_cgroup_charge+0x166/0x4f0
   __add_to_page_cache_locked+0x4a9/0x710
   add_to_page_cache_locked+0x15/0x20
   cifs_readpages+0x217/0x1270
   read_pages+0x29a/0x670
   page_cache_readahead_unbounded+0x24f/0x390
   __do_page_cache_readahead+0x3f/0x60
   ondemand_readahead+0x1f1/0x470
   page_cache_async_readahead+0x14c/0x170
   generic_file_buffered_read+0x5df/0x1100
   generic_file_read_iter+0x10c/0x1d0
   cifs_strict_readv+0x139/0x170
   new_sync_read+0x164/0x250
   __vfs_read+0x39/0x60
   vfs_read+0xb5/0x1e0
   ksys_pread64+0x85/0xf0
   __x64_sys_pread64+0x22/0x30
   do_syscall_64+0x69/0x150
   entry_SYSCALL_64_after_hwframe+0x44/0xa9
  RIP: 0033:0x7f5071fcb1af
  Code: Bad RIP value.
  RSP: 002b:7ffde2cdb8e0 EFLAGS: 0293 ORIG_RAX: 0011
  RAX: ffda RBX: 7ffde2cdb990 RCX: 7f5071fcb1af
  RDX: 1000 RSI: 55efcd5ca000 RDI: 0003
  RBP: 0003 R08:  R09: 
  R10: 1000 R11: 0293 R12: 0001
  R13: 0009f000 R14:  R15: 1000
  Modules linked in:
  ---[ end trace 725fa14a3e1af65c ]---

Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new
mem_cgroup_charge() API") not cancel the page charge, the pages maybe
double add to pagecache:
thread1   | thread2
cifs_readpages
readpages_get_pages
 add_to_page_cache_locked(head,index=n)=0
  | readpages_get_pages
  | add_to_page_cache_locked(head,index=n+1)=0
 add_to_page_cache_locked(head, index=n+1)=-EEXIST
 then, will next loop with list head page's
 index=n+1 and the page->mapping not NULL
readpages_get_pages
add_to_page_cache_locked(head, index=n+1)
 commit_charge
  VM_BUG_ON_PAGE

So, we should not do the next loop when any page add to page cache
failed.

Reported-by: Hulk Robot 
Signed-off-by: Zhang Xiaoxu 
Signed-off-by: Steve French 
Acked-by: Ronnie Sahlberg 
Signed-off-by: Sasha Levin 
---
 fs/cifs/file.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 14ae341755d47..31d578739341b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4269,7 +4269,8 @@ readpages_get_pages(struct address_space *mapping, struct 
list_head *page_list,
break;
 
__SetPageLocked(page);
-   if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
+   rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
+   if (rc) {
__ClearPageLocked(page);
break;
}
@@ -4285,6 +4286,7 @@ static int cifs_readpages(struct file *file, struct 
address_space *mapping,
struct list_head *page_list, unsigned num_pages)
 {
int rc;
+   int err = 0;
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
@@ -4329,7 +4331,7 @@ static int cifs_readpages(struct file *file, struct 
address_space *mapping,
 * the order of declining indexes. When we put the pages in
 * the rdata->pages, 

[PATCH AUTOSEL 5.4 305/330] ceph: fix potential race in ceph_check_caps

2020-09-17 Thread Sasha Levin
From: Jeff Layton 

[ Upstream commit dc3da0461cc4b76f2d0c5b12247fcb3b520edbbf ]

Nothing ensures that session will still be valid by the time we
dereference the pointer. Take and put a reference.

In principle, we should always be able to get a reference here, but
throw a warning if that's ever not the case.

Signed-off-by: Jeff Layton 
Signed-off-by: Ilya Dryomov 
Signed-off-by: Sasha Levin 
---
 fs/ceph/caps.c | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b2695919435e8..af563d73d252c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2013,12 +2013,24 @@ ack:
if (mutex_trylock(>s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
 session);
+   session = ceph_get_mds_session(session);
spin_unlock(>i_ceph_lock);
if (took_snap_rwsem) {
up_read(>snap_rwsem);
took_snap_rwsem = 0;
}
-   mutex_lock(>s_mutex);
+   if (session) {
+   mutex_lock(>s_mutex);
+   ceph_put_mds_session(session);
+   } else {
+   /*
+* Because we take the reference while
+* holding the i_ceph_lock, it should
+* never be NULL. Throw a warning if it
+* ever is.
+*/
+   WARN_ON_ONCE(true);
+   }
goto retry;
}
}
-- 
2.25.1



[PATCH AUTOSEL 5.4 311/330] NFS: nfs_xdr_status should record the procedure name

2020-09-17 Thread Sasha Levin
From: Chuck Lever 

[ Upstream commit 5be5945864ea143fda628e8179c8474457af1f43 ]

When sunrpc trace points are not enabled, the recorded task ID
information alone is not helpful.

Signed-off-by: Chuck Lever 
Signed-off-by: Anna Schumaker 
Signed-off-by: Sasha Levin 
---
 fs/nfs/nfstrace.h | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 361cc10d6f95d..c8081d2b4166a 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1147,7 +1147,12 @@ TRACE_EVENT(nfs_xdr_status,
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
+   __field(int, version)
__field(unsigned long, error)
+   __string(program,
+
xdr->rqst->rq_task->tk_client->cl_program->name)
+   __string(procedure,
+xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
),
 
TP_fast_assign(
@@ -1157,13 +1162,19 @@ TRACE_EVENT(nfs_xdr_status,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
+   __entry->version = task->tk_client->cl_vers;
__entry->error = error;
+   __assign_str(program,
+task->tk_client->cl_program->name)
+   __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
),
 
TP_printk(
-   "task:%u@%d xid=0x%08x error=%ld (%s)",
+   "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)",
__entry->task_id, __entry->client_id, __entry->xid,
-   -__entry->error, nfs_show_status(__entry->error)
+   __get_str(program), __entry->version,
+   __get_str(procedure), -__entry->error,
+   nfs_show_status(__entry->error)
)
 );
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 320/330] btrfs: qgroup: fix data leak caused by race between writeback and truncate

2020-09-17 Thread Sasha Levin
From: Qu Wenruo 

[ Upstream commit fa91e4aa1716004ea8096d5185ec0451e206aea0 ]

[BUG]
When running tests like generic/013 on test device with btrfs quota
enabled, it can normally lead to data leak, detected at unmount time:

  BTRFS warning (device dm-3): qgroup 0/5 has unreleased space, type 0 rsv 4096
  [ cut here ]
  WARNING: CPU: 11 PID: 16386 at fs/btrfs/disk-io.c:4142 
close_ctree+0x1dc/0x323 [btrfs]
  RIP: 0010:close_ctree+0x1dc/0x323 [btrfs]
  Call Trace:
   btrfs_put_super+0x15/0x17 [btrfs]
   generic_shutdown_super+0x72/0x110
   kill_anon_super+0x18/0x30
   btrfs_kill_super+0x17/0x30 [btrfs]
   deactivate_locked_super+0x3b/0xa0
   deactivate_super+0x40/0x50
   cleanup_mnt+0x135/0x190
   __cleanup_mnt+0x12/0x20
   task_work_run+0x64/0xb0
   __prepare_exit_to_usermode+0x1bc/0x1c0
   __syscall_return_slowpath+0x47/0x230
   do_syscall_64+0x64/0xb0
   entry_SYSCALL_64_after_hwframe+0x44/0xa9
  ---[ end trace caf08beafeca2392 ]---
  BTRFS error (device dm-3): qgroup reserved space leaked

[CAUSE]
In the offending case, the offending operations are:
2/6: writev f2X[269 1 0 0 0 0] [1006997,67,288] 0
2/7: truncate f2X[269 1 0 0 48 1026293] 18388 0

The following sequence of events could happen after the writev():
CPU1 (writeback)|   CPU2 (truncate)
-
btrfs_writepages()  |
|- extent_write_cache_pages()   |
   |- Got page for 1003520  |
   |  1003520 is Dirty, no writeback|
   |  So (!clear_page_dirty_for_io())   |
   |  gets called for it|
   |- Now page 1003520 is Clean.|
   || btrfs_setattr()
   || |- btrfs_setsize()
   |||- truncate_setsize()
   ||   New i_size is 18388
   |- __extent_writepage()  |
   |  |- page_offset() > i_size |
  |- btrfs_invalidatepage() |
 |- Page is clean, so no qgroup |
callback executed

This means, the qgroup reserved data space is not properly released in
btrfs_invalidatepage() as the page is Clean.

[FIX]
Instead of checking the dirty bit of a page, call
btrfs_qgroup_free_data() unconditionally in btrfs_invalidatepage().

As qgroup rsv are completely bound to the QGROUP_RESERVED bit of
io_tree, not bound to page status, thus we won't cause double freeing
anyway.

Fixes: 0b34c261e235 ("btrfs: qgroup: Prevent qgroup->reserved from going 
subzero")
CC: sta...@vger.kernel.org # 4.14+
Reviewed-by: Josef Bacik 
Signed-off-by: Qu Wenruo 
Signed-off-by: David Sterba 
Signed-off-by: Sasha Levin 
---
 fs/btrfs/inode.c | 23 ++-
 1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9787b7b943a2..182e93a5b11d5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9044,20 +9044,17 @@ again:
/*
 * Qgroup reserved space handler
 * Page here will be either
-* 1) Already written to disk
-*In this case, its reserved space is released from data rsv map
-*and will be freed by delayed_ref handler finally.
-*So even we call qgroup_free_data(), it won't decrease reserved
-*space.
-* 2) Not written to disk
-*This means the reserved space should be freed here. However,
-*if a truncate invalidates the page (by clearing PageDirty)
-*and the page is accounted for while allocating extent
-*in btrfs_check_data_free_space() we let delayed_ref to
-*free the entire extent.
+* 1) Already written to disk or ordered extent already submitted
+*Then its QGROUP_RESERVED bit in io_tree is already cleaned.
+*Qgroup will be handled by its qgroup_record then.
+*btrfs_qgroup_free_data() call will do nothing here.
+*
+* 2) Not written to disk yet
+*Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
+*bit of its io_tree, and free the qgroup reserved data space.
+*Since the IO will never happen for this page.
 */
-   if (PageDirty(page))
-   btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+   btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
-- 
2.25.1



[PATCH AUTOSEL 5.4 302/330] mtd: rawnand: gpmi: Fix runtime PM imbalance on error

2020-09-17 Thread Sasha Levin
From: Dinghao Liu 

[ Upstream commit 550e68ea36a6671a96576c0531685ce6e6c0d19d ]

pm_runtime_get_sync() increments the runtime PM usage counter even
when it returns an error code. Thus a pairing decrement is needed on
the error handling path to keep the counter balanced.

Signed-off-by: Dinghao Liu 
Signed-off-by: Miquel Raynal 
Link: 
https://lore.kernel.org/linux-mtd/20200522095139.19653-1-dinghao@zju.edu.cn
Signed-off-by: Sasha Levin 
---
 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 
b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index b9d5d55a5edb9..ef89947ee3191 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this)
return ret;
 
ret = pm_runtime_get_sync(this->dev);
-   if (ret < 0)
+   if (ret < 0) {
+   pm_runtime_put_autosuspend(this->dev);
return ret;
+   }
 
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
-- 
2.25.1



[PATCH AUTOSEL 5.4 293/330] perf trace: Fix the selection for architectures to generate the errno name tables

2020-09-17 Thread Sasha Levin
From: Ian Rogers 

[ Upstream commit 7597ce89b3ed239f7a3408b930d2a6c7a4c938a1 ]

Make the architecture test directory agree with the code comment.

Committer notes:

This was split from a larger patch.

The code was assuming the developer always worked from tools/perf/, so make 
sure we
do the test -d having $toolsdir/perf/arch/$arch, to match the intent expressed 
in the comment,
just above that loop.

Signed-off-by: Ian Rogers 
Cc: Adrian Hunter 
Cc: Alexander Shishkin 
Cc: Alexios Zavras 
Cc: Andi Kleen 
Cc: Greg Kroah-Hartman 
Cc: Igor Lubashev 
Cc: Jiri Olsa 
Cc: Kan Liang 
Cc: Mark Rutland 
Cc: Mathieu Poirier 
Cc: Namhyung Kim 
Cc: Nick Desaulniers 
Cc: Peter Zijlstra 
Cc: Stephane Eranian 
Cc: Thomas Gleixner 
Cc: Wei Li 
Link: http://lore.kernel.org/lkml/20200306071110.130202-4-irog...@google.com
Signed-off-by: Arnaldo Carvalho de Melo 
Signed-off-by: Sasha Levin 
---
 tools/perf/trace/beauty/arch_errno_names.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/perf/trace/beauty/arch_errno_names.sh 
b/tools/perf/trace/beauty/arch_errno_names.sh
index 22c9fc900c847..f8c44a85650be 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -91,7 +91,7 @@ EoHEADER
 # in tools/perf/arch
 archlist=""
 for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf 
"%f\n" | grep -v x86 | sort); do
-   test -d arch/$arch && archlist="$archlist $arch"
+   test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
 done
 
 for arch in x86 $archlist generic; do
-- 
2.25.1



[PATCH AUTOSEL 5.4 295/330] perf util: Fix memory leak of prefix_if_not_in

2020-09-17 Thread Sasha Levin
From: Xie XiuQi 

[ Upstream commit 07e9a6f538cbeecaf5c55b6f2991416f873cdcbd ]

Need to free "str" before return when asprintf() failed to avoid memory
leak.

Signed-off-by: Xie XiuQi 
Cc: Alexander Shishkin 
Cc: Hongbo Yao 
Cc: Jiri Olsa 
Cc: Li Bin 
Cc: Mark Rutland 
Cc: Namhyung Kim 
Link: http://lore.kernel.org/lkml/20200521133218.30150-4-liwei...@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo 
Signed-off-by: Sasha Levin 
---
 tools/perf/util/sort.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 43d1d410854a3..4027906fd3e38 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2788,7 +2788,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
return str;
 
if (asprintf(, "%s,%s", pre, str) < 0)
-   return NULL;
+   n = NULL;
 
free(str);
return n;
-- 
2.25.1



[PATCH AUTOSEL 5.4 328/330] ALSA: hda: Workaround for spurious wakeups on some Intel platforms

2020-09-17 Thread Sasha Levin
From: Takashi Iwai 

[ Upstream commit a6630529aecb5a3e84370c376ed658e892e6261e ]

We've received a regression report on Intel HD-audio controller that
wakes up immediately after S3 suspend.  The bisection leads to the
commit c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not
needed").  This commit replaces the system-suspend to use
pm_runtime_force_suspend() instead of the direct call of
__azx_runtime_suspend().  However, by some really mysterious reason,
pm_runtime_force_suspend() causes a spurious wakeup (although it calls
the same __azx_runtime_suspend() internally).

As an ugly workaround for now, revert the behavior to call
__azx_runtime_suspend() and __azx_runtime_resume() for those old Intel
platforms that may exhibit such a problem, while keeping the new
standard pm_runtime_force_suspend() and pm_runtime_force_resume()
pair for the remaining chips.

Fixes: c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not needed")
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208649
Cc: 
Link: https://lore.kernel.org/r/20200727164443.4233-1-ti...@suse.de
Signed-off-by: Takashi Iwai 
Signed-off-by: Sasha Levin 
---
 sound/pci/hda/hda_controller.h |  2 +-
 sound/pci/hda/hda_intel.c  | 17 ++---
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 82e26442724ba..a356fb0e57738 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -41,7 +41,7 @@
 /* 24 unused */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)   /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious 
wakeups after suspend */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after 
reset */
 #define AZX_DCAPS_NO_MSI64  (1 << 29)  /* Stick to 32-bit MSIs */
 #define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use 
separate stream tag */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 754e4d1a86b57..590ea262f2e20 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -295,7 +295,8 @@ enum {
 /* PCH for HSW/BDW; with runtime PM */
 /* no i915 binding for this as HSW/BDW has another controller for HDMI */
 #define AZX_DCAPS_INTEL_PCH \
-   (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
+   (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
 
 /* HSW HDMI */
 #define AZX_DCAPS_INTEL_HASWELL \
@@ -1026,7 +1027,14 @@ static int azx_suspend(struct device *dev)
chip = card->private_data;
bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
-   pm_runtime_force_suspend(dev);
+   /* An ugly workaround: direct call of __azx_runtime_suspend() and
+* __azx_runtime_resume() for old Intel platforms that suffer from
+* spurious wakeups after S3 suspend
+*/
+   if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+   __azx_runtime_suspend(chip);
+   else
+   pm_runtime_force_suspend(dev);
if (bus->irq >= 0) {
free_irq(bus->irq, chip);
bus->irq = -1;
@@ -1054,7 +1062,10 @@ static int azx_resume(struct device *dev)
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
 
-   pm_runtime_force_resume(dev);
+   if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+   __azx_runtime_resume(chip, false);
+   else
+   pm_runtime_force_resume(dev);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
trace_azx_resume(chip);
-- 
2.25.1



[PATCH AUTOSEL 5.4 268/330] drm/exynos: dsi: Remove bridge node reference in error handling path in probe function

2020-09-17 Thread Sasha Levin
From: Christophe JAILLET 

[ Upstream commit 547a7348633b1f9923551f94ac3157a613d2c9f2 ]

'exynos_dsi_parse_dt()' takes a reference to 'dsi->in_bridge_node'.
This must be released in the error handling path.

In order to do that, add an error handling path and move the
'exynos_dsi_parse_dt()' call from the beginning to the end of the probe
function to ease the error handling path.
This function only sets some variables which are used only in the
'transfer' function.

The call chain is:
   .transfer
--> exynos_dsi_host_transfer
  --> exynos_dsi_init
--> exynos_dsi_enable_clock  (use burst_clk_rate and esc_clk_rate)
  --> exynos_dsi_set_pll (use pll_clk_rate)

While at it, also handle cases where 'component_add()' fails.

This patch is similar to commit 70505c2ef94b ("drm/exynos: dsi: Remove bridge 
node reference in removal")
which fixed the issue in the remove function.

Signed-off-by: Christophe JAILLET 
Signed-off-by: Inki Dae 
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/exynos/exynos_drm_dsi.c | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c 
b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 8ed94c9948008..b83acd696774b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1741,10 +1741,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dev = dev;
dsi->driver_data = of_device_get_match_data(dev);
 
-   ret = exynos_dsi_parse_dt(dsi);
-   if (ret)
-   return ret;
-
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1805,11 +1801,25 @@ static int exynos_dsi_probe(struct platform_device 
*pdev)
return ret;
}
 
+   ret = exynos_dsi_parse_dt(dsi);
+   if (ret)
+   return ret;
+
platform_set_drvdata(pdev, >encoder);
 
pm_runtime_enable(dev);
 
-   return component_add(dev, _dsi_component_ops);
+   ret = component_add(dev, _dsi_component_ops);
+   if (ret)
+   goto err_disable_runtime;
+
+   return 0;
+
+err_disable_runtime:
+   pm_runtime_disable(dev);
+   of_node_put(dsi->in_bridge_node);
+
+   return ret;
 }
 
 static int exynos_dsi_remove(struct platform_device *pdev)
-- 
2.25.1



[PATCH AUTOSEL 5.4 263/330] KVM: x86: handle wrap around 32-bit address space

2020-09-17 Thread Sasha Levin
From: Paolo Bonzini 

[ Upstream commit fede8076aab4c2280c673492f8f7a2e87712e8b4 ]

KVM is not handling the case where EIP wraps around the 32-bit address
space (that is, outside long mode).  This is needed both in vmx.c
and in emulate.c.  SVM with NRIPS is okay, but it can still print
an error to dmesg due to integer overflow.

Reported-by: Nick Peterson 
Signed-off-by: Paolo Bonzini 
Signed-off-by: Sasha Levin 
---
 arch/x86/kvm/emulate.c |  2 ++
 arch/x86/kvm/svm.c |  3 ---
 arch/x86/kvm/vmx/vmx.c | 15 ---
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 128d3ad46e965..cc7823e7ef96c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5836,6 +5836,8 @@ writeback:
}
 
ctxt->eip = ctxt->_eip;
+   if (ctxt->mode != X86EMUL_MODE_PROT64)
+   ctxt->eip = (u32)ctxt->_eip;
 
 done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3243a80ea32c0..802b5f9ab7446 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
return 0;
} else {
-   if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
-   pr_err("%s: ip 0x%lx next 0x%llx\n",
-  __func__, kvm_rip_read(vcpu), svm->next_rip);
kvm_rip_write(vcpu, svm->next_rip);
}
svm_set_interrupt_shadow(vcpu, 0);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a071eab3bab74..14b973990d5a8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1541,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 
data)
 
 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
 {
-   unsigned long rip;
+   unsigned long rip, orig_rip;
 
/*
 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
@@ -1553,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
 */
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
-   rip = kvm_rip_read(vcpu);
-   rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+   orig_rip = kvm_rip_read(vcpu);
+   rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+#ifdef CONFIG_X86_64
+   /*
+* We need to mask out the high 32 bits of RIP if not in 64-bit
+* mode, but just finding out that we are in 64-bit mode is
+* quite expensive.  Only do it if there was a carry.
+*/
+   if (unlikely(((rip ^ orig_rip) >> 31) == 3) && 
!is_64_bit_mode(vcpu))
+   rip = (u32)rip;
+#endif
kvm_rip_write(vcpu, rip);
} else {
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
-- 
2.25.1



[PATCH AUTOSEL 5.4 304/330] PCI: tegra: Fix runtime PM imbalance on error

2020-09-17 Thread Sasha Levin
From: Dinghao Liu 

[ Upstream commit fcee90cdf6f3a3a371add04d41528d5ba9c3b411 ]

pm_runtime_get_sync() increments the runtime PM usage counter even
when it returns an error code. Thus a pairing decrement is needed on
the error handling path to keep the counter balanced.

Also, call pm_runtime_disable() when pm_runtime_get_sync() returns
an error code.

Link: https://lore.kernel.org/r/20200521024709.2368-1-dinghao@zju.edu.cn
Signed-off-by: Dinghao Liu 
Signed-off-by: Lorenzo Pieralisi 
Acked-by: Thierry Reding 
Signed-off-by: Sasha Levin 
---
 drivers/pci/controller/pci-tegra.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/pci/controller/pci-tegra.c 
b/drivers/pci/controller/pci-tegra.c
index b71e753419c2d..cfa3c83d6cc74 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2768,7 +2768,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
-   goto teardown_msi;
+   goto pm_runtime_put;
}
 
err = tegra_pcie_request_resources(pcie);
@@ -2808,7 +2808,6 @@ free_resources:
 pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
-teardown_msi:
tegra_pcie_msi_teardown(pcie);
 put_resources:
tegra_pcie_put_resources(pcie);
-- 
2.25.1



[PATCH AUTOSEL 5.4 242/330] KVM: arm64: vgic-v3: Retire all pending LPIs on vcpu destroy

2020-09-17 Thread Sasha Levin
From: Zenghui Yu 

[ Upstream commit 969ce8b5260d8ec01e6f1949d2927a86419663ce ]

It's likely that the vcpu fails to handle all virtual interrupts if
userspace decides to destroy it, leaving the pending ones stay in the
ap_list. If the un-handled one is a LPI, its vgic_irq structure will
be eventually leaked because of an extra refcount increment in
vgic_queue_irq_unlock().

This was detected by kmemleak on almost every guest destroy, the
backtrace is as follows:

unreferenced object 0x80725aed5500 (size 128):
comm "CPU 5/KVM", pid 40711, jiffies 4298024754 (age 166366.512s)
hex dump (first 32 bytes):
00 00 00 00 00 00 00 00 08 01 a9 73 6d 80 ff ff ...sm...
c8 61 ee a9 00 20 ff ff 28 1e 55 81 6c 80 ff ff .a... ..(.U.l...
backtrace:
[<4bcaa122>] kmem_cache_alloc_trace+0x2dc/0x418
[<69c7dabb>] vgic_add_lpi+0x88/0x418
[] vgic_its_cmd_handle_mapi+0x4dc/0x588
[] vgic_its_process_commands.part.5+0x484/0x1198
[<4bd3f8e3>] vgic_its_process_commands+0x50/0x80
[] vgic_mmio_write_its_cwriter+0xac/0x108
[<09641ebb>] dispatch_mmio_write+0xd0/0x188
[<8f79d288>] __kvm_io_bus_write+0x134/0x240
[<882f39ac>] kvm_io_bus_write+0xe0/0x150
[<78197602>] io_mem_abort+0x484/0x7b8
[<60954e3c>] kvm_handle_guest_abort+0x4cc/0xa58
[] handle_exit+0x24c/0x770
[] kvm_arch_vcpu_ioctl_run+0x460/0x1988
[<25fb897c>] kvm_vcpu_ioctl+0x4f8/0xee0
[<3271e317>] do_vfs_ioctl+0x160/0xcd8
[] ksys_ioctl+0x98/0xd8

Fix it by retiring all pending LPIs in the ap_list on the destroy path.

p.s. I can also reproduce it on a normal guest shutdown. It is because
userspace still send LPIs to vcpu (through KVM_SIGNAL_MSI ioctl) while
the guest is being shutdown and unable to handle it. A little strange
though and haven't dig further...

Reviewed-by: James Morse 
Signed-off-by: Zenghui Yu 
[maz: moved the distributor deallocation down to avoid an UAF splat]
Signed-off-by: Marc Zyngier 
Link: https://lore.kernel.org/r/20200414030349.625-2-yuzeng...@huawei.com
Signed-off-by: Sasha Levin 
---
 virt/kvm/arm/vgic/vgic-init.c | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 6d85c6d894c39..6899101538890 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -358,6 +358,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
struct vgic_cpu *vgic_cpu = >arch.vgic_cpu;
 
+   /*
+* Retire all pending LPIs on this vcpu anyway as we're
+* going to destroy it.
+*/
+   vgic_flush_pending_lpis(vcpu);
+
INIT_LIST_HEAD(_cpu->ap_list_head);
 }
 
@@ -369,10 +375,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
 
vgic_debug_destroy(kvm);
 
-   kvm_vgic_dist_destroy(kvm);
-
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_destroy(vcpu);
+
+   kvm_vgic_dist_destroy(kvm);
 }
 
 void kvm_vgic_destroy(struct kvm *kvm)
-- 
2.25.1



[PATCH AUTOSEL 5.4 267/330] ALSA: hda: Fix potential race in unsol event handler

2020-09-17 Thread Sasha Levin
From: Takashi Iwai 

[ Upstream commit c637fa151259c0f74665fde7cba5b7eac1417ae5 ]

The unsol event handling code has a loop retrieving the read/write
indices and the arrays without locking while the append to the array
may happen concurrently.  This may lead to some inconsistency.
Although there hasn't been any proof of this bad results, it's still
safer to protect the racy accesses.

This patch adds the spinlock protection around the unsol handling loop
for addressing it.  Here we take bus->reg_lock as the writer side
snd_hdac_bus_queue_event() is also protected by that lock.

Link: https://lore.kernel.org/r/20200516062556.30951-1-ti...@suse.de
Signed-off-by: Takashi Iwai 
Signed-off-by: Sasha Levin 
---
 sound/hda/hdac_bus.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 8f19876244ebe..53be2cac98e7c 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -158,6 +158,7 @@ static void snd_hdac_bus_process_unsol_events(struct 
work_struct *work)
struct hdac_driver *drv;
unsigned int rp, caddr, res;
 
+   spin_lock_irq(>reg_lock);
while (bus->unsol_rp != bus->unsol_wp) {
rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
bus->unsol_rp = rp;
@@ -169,10 +170,13 @@ static void snd_hdac_bus_process_unsol_events(struct 
work_struct *work)
codec = bus->caddr_tbl[caddr & 0x0f];
if (!codec || !codec->dev.driver)
continue;
+   spin_unlock_irq(>reg_lock);
drv = drv_to_hdac_driver(codec->dev.driver);
if (drv->unsol_event)
drv->unsol_event(codec, res);
+   spin_lock_irq(>reg_lock);
}
+   spin_unlock_irq(>reg_lock);
 }
 
 /**
-- 
2.25.1



[PATCH AUTOSEL 5.4 270/330] powerpc/traps: Make unrecoverable NMIs die instead of panic

2020-09-17 Thread Sasha Levin
From: Nicholas Piggin 

[ Upstream commit 265d6e588d87194c2fe2d6c240247f0264e0c19b ]

System Reset and Machine Check interrupts that are not recoverable due
to being nested or interrupting when RI=0 currently panic. This is not
necessary, and can often just kill the current context and recover.

Signed-off-by: Nicholas Piggin 
Signed-off-by: Michael Ellerman 
Reviewed-by: Christophe Leroy 
Link: https://lore.kernel.org/r/20200508043408.886394-16-npig...@gmail.com
Signed-off-by: Sasha Levin 
---
 arch/powerpc/kernel/traps.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 014ff0701f245..9432fc6af28a5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -510,11 +510,11 @@ out:
 #ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
-   nmi_panic(regs, "Unrecoverable nested System Reset");
+   die("Unrecoverable nested System Reset", regs, SIGABRT);
 #endif
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
-   nmi_panic(regs, "Unrecoverable System Reset");
+   die("Unrecoverable System Reset", regs, SIGABRT);
 
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);
@@ -858,7 +858,7 @@ void machine_check_exception(struct pt_regs *regs)
 
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
-   nmi_panic(regs, "Unrecoverable Machine check");
+   die("Unrecoverable Machine check", regs, SIGBUS);
 
return;
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 240/330] bdev: Reduce time holding bd_mutex in sync in blkdev_close()

2020-09-17 Thread Sasha Levin
From: Douglas Anderson 

[ Upstream commit b849dd84b6ccfe32622988b79b7b073861fcf9f7 ]

While trying to "dd" to the block device for a USB stick, I
encountered a hung task warning (blocked for > 120 seconds).  I
managed to come up with an easy way to reproduce this on my system
(where /dev/sdb is the block device for my USB stick) with:

  while true; do dd if=/dev/zero of=/dev/sdb bs=4M; done

With my reproduction here are the relevant bits from the hung task
detector:

 INFO: task udevd:294 blocked for more than 122 seconds.
 ...
 udevd   D0   294  1 0x0048
 Call trace:
  ...
  mutex_lock_nested+0x40/0x50
  __blkdev_get+0x7c/0x3d4
  blkdev_get+0x118/0x138
  blkdev_open+0x94/0xa8
  do_dentry_open+0x268/0x3a0
  vfs_open+0x34/0x40
  path_openat+0x39c/0xdf4
  do_filp_open+0x90/0x10c
  do_sys_open+0x150/0x3c8
  ...

 ...
 Showing all locks held in the system:
 ...
 1 lock held by dd/2798:
  #0: ff814ac1a3b8 (>bd_mutex){+.+.}, at: __blkdev_put+0x50/0x204
 ...
 dd  D0  2798   2764 0x00400208
 Call trace:
  ...
  schedule+0x8c/0xbc
  io_schedule+0x1c/0x40
  wait_on_page_bit_common+0x238/0x338
  __lock_page+0x5c/0x68
  write_cache_pages+0x194/0x500
  generic_writepages+0x64/0xa4
  blkdev_writepages+0x24/0x30
  do_writepages+0x48/0xa8
  __filemap_fdatawrite_range+0xac/0xd8
  filemap_write_and_wait+0x30/0x84
  __blkdev_put+0x88/0x204
  blkdev_put+0xc4/0xe4
  blkdev_close+0x28/0x38
  __fput+0xe0/0x238
  fput+0x1c/0x28
  task_work_run+0xb0/0xe4
  do_notify_resume+0xfc0/0x14bc
  work_pending+0x8/0x14

The problem appears related to the fact that my USB disk is terribly
slow and that I have a lot of RAM in my system to cache things.
Specifically my writes seem to be happening at ~15 MB/s and I've got
~4 GB of RAM in my system that can be used for buffering.  To write 4
GB of buffer to disk thus takes ~4000 MB / ~15 MB/s = ~267 seconds.

The 267 second number is a problem because in __blkdev_put() we call
sync_blockdev() while holding the bd_mutex.  Any other callers who
want the bd_mutex will be blocked for the whole time.

The problem is made worse because I believe blkdev_put() specifically
tells other tasks (namely udev) to go try to access the device at right
around the same time we're going to hold the mutex for a long time.

Putting some traces around this (after disabling the hung task detector),
I could confirm:
 dd:437.608600: __blkdev_put() right before sync_blockdev() for sdb
 udevd: 437.623901: blkdev_open() right before blkdev_get() for sdb
 dd:661.468451: __blkdev_put() right after sync_blockdev() for sdb
 udevd: 663.820426: blkdev_open() right after blkdev_get() for sdb

A simple fix for this is to realize that sync_blockdev() works fine if
you're not holding the mutex.  Also, it's not the end of the world if
you sync a little early (though it can have performance impacts).
Thus we can make a guess that we're going to need to do the sync and
then do it without holding the mutex.  We still do one last sync with
the mutex but it should be much, much faster.

With this, my hung task warnings for my test case are gone.

Signed-off-by: Douglas Anderson 
Reviewed-by: Guenter Roeck 
Reviewed-by: Christoph Hellwig 
Signed-off-by: Jens Axboe 
Signed-off-by: Sasha Levin 
---
 fs/block_dev.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2dc9c73a4cb29..79272cdbe8277 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1857,6 +1857,16 @@ static void __blkdev_put(struct block_device *bdev, 
fmode_t mode, int for_part)
struct gendisk *disk = bdev->bd_disk;
struct block_device *victim = NULL;
 
+   /*
+* Sync early if it looks like we're the last one.  If someone else
+* opens the block device between now and the decrement of bd_openers
+* then we did a sync that we didn't need to, but that's not the end
+* of the world and we want to avoid long (could be several minute)
+* syncs while holding the mutex.
+*/
+   if (bdev->bd_openers == 1)
+   sync_blockdev(bdev);
+
mutex_lock_nested(>bd_mutex, for_part);
if (for_part)
bdev->bd_part_count--;
-- 
2.25.1



[PATCH AUTOSEL 5.4 266/330] tty: serial: samsung: Correct clock selection logic

2020-09-17 Thread Sasha Levin
From: Jonathan Bakker 

[ Upstream commit 7d31676a8d91dd18e08853efd1cb26961a38c6a6 ]

Some variants of the samsung tty driver can pick which clock
to use for their baud rate generation.  In the DT conversion,
a default clock was selected to be used if a specific one wasn't
assigned and then a comparison of which clock rate worked better
was done.  Unfortunately, the comparison was implemented in such
a way that only the default clock was ever actually compared.
Fix this by iterating through all possible clocks, except when a
specific clock has already been picked via clk_sel (which is
only possible via board files).

Signed-off-by: Jonathan Bakker 
Reviewed-by: Krzysztof Kozlowski 
Link: 
https://lore.kernel.org/r/bn6pr04mb06604e63833ea41837ebf77ba3...@bn6pr04mb0660.namprd04.prod.outlook.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/tty/serial/samsung.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 71f99e9217592..c7683beb3412a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct 
s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
-   unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+   unsigned int cnt, baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
 
-   clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
-   ourport->info->def_clk_sel;
for (cnt = 0; cnt < info->num_clks; cnt++) {
-   if (!(clk_sel & (1 << cnt)))
+   /* Keep selected clock if provided */
+   if (ourport->cfg->clk_sel &&
+   !(ourport->cfg->clk_sel & (1 << cnt)))
continue;
 
sprintf(clkname, "clk_uart_baud%d", cnt);
-- 
2.25.1



[PATCH AUTOSEL 5.4 286/330] btrfs: don't force read-only after error in drop snapshot

2020-09-17 Thread Sasha Levin
From: David Sterba 

[ Upstream commit 7c09c03091ac562ddca2b393e5d65c1d37da79f1 ]

Deleting a subvolume on a full filesystem leads to ENOSPC followed by a
forced read-only. This is not a transaction abort and the filesystem is
otherwise ok, so the error should be just propagated to the callers.

This is caused by unnecessary call to btrfs_handle_fs_error for all
errors, except EAGAIN. This does not make sense as the standard
transaction abort mechanism is in btrfs_drop_snapshot so all relevant
failures are handled.

Originally in commit cb1b69f4508a ("Btrfs: forced readonly when
btrfs_drop_snapshot() fails") there was no return value at all, so the
btrfs_std_error made some sense but once the error handling and
propagation has been implemented we don't need it anymore.

Signed-off-by: David Sterba 
Signed-off-by: Sasha Levin 
---
 fs/btrfs/extent-tree.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 541497036cc24..60c3a03203fae 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5429,8 +5429,6 @@ out:
 */
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
-   if (err && err != -EAGAIN)
-   btrfs_handle_fs_error(fs_info, err, NULL);
return err;
 }
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 243/330] KVM: arm64: vgic-its: Fix memory leak on the error path of vgic_add_lpi()

2020-09-17 Thread Sasha Levin
From: Zenghui Yu 

[ Upstream commit 57bdb436ce869a45881d8aa4bc5dac8e072dd2b6 ]

If we're going to fail out the vgic_add_lpi(), let's make sure the
allocated vgic_irq memory is also freed. Though it seems that both
cases are unlikely to fail.

Signed-off-by: Zenghui Yu 
Signed-off-by: Marc Zyngier 
Link: https://lore.kernel.org/r/20200414030349.625-3-yuzeng...@huawei.com
Signed-off-by: Sasha Levin 
---
 virt/kvm/arm/vgic/vgic-its.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index f8ad7096555d7..35be0e2a46393 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -96,14 +96,21 @@ out_unlock:
 * We "cache" the configuration table entries in our struct vgic_irq's.
 * However we only have those structs for mapped IRQs, so we read in
 * the respective config data from memory here upon mapping the LPI.
+*
+* Should any of these fail, behave as if we couldn't create the LPI
+* by dropping the refcount and returning the error.
 */
ret = update_lpi_config(kvm, irq, NULL, false);
-   if (ret)
+   if (ret) {
+   vgic_put_irq(kvm, irq);
return ERR_PTR(ret);
+   }
 
ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
-   if (ret)
+   if (ret) {
+   vgic_put_irq(kvm, irq);
return ERR_PTR(ret);
+   }
 
return irq;
 }
-- 
2.25.1



[PATCH AUTOSEL 5.4 265/330] drm/amd/powerplay: try to do a graceful shutdown on SW CTF

2020-09-17 Thread Sasha Levin
From: Evan Quan 

[ Upstream commit 9495220577416632675959caf122e968469ffd16 ]

Normally this(SW CTF) should not happen. And by doing graceful
shutdown we can prevent further damage.

Signed-off-by: Evan Quan 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
Signed-off-by: Sasha Levin 
---
 .../gpu/drm/amd/powerplay/hwmgr/smu_helper.c  | 21 +++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c |  7 +++
 2 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index d09690fca4520..414added3d02c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -22,6 +22,7 @@
  */
 
 #include 
+#include 
 
 #include "hwmgr.h"
 #include "pp_debug.h"
@@ -593,12 +594,18 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
 
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
-   if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
+   if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
pr_warn("GPU over temperature range detected on PCIe 
%d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
-   else if (src_id == 
VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+   /*
+* SW CTF just occurred.
+* Try to do a graceful shutdown to prevent further 
damage.
+*/
+   dev_emerg(adev->dev, "System is going to shutdown due 
to SW CTF!\n");
+   orderly_poweroff(true);
+   } else if (src_id == 
VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
pr_warn("GPU under temperature range detected on PCIe 
%d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
@@ -609,12 +616,18 @@ int phm_irq_process(struct amdgpu_device *adev,
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
} else if (client_id == SOC15_IH_CLIENTID_THM) {
-   if (src_id == 0)
+   if (src_id == 0) {
pr_warn("GPU over temperature range detected on PCIe 
%d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
-   else
+   /*
+* SW CTF just occurred.
+* Try to do a graceful shutdown to prevent further 
damage.
+*/
+   dev_emerg(adev->dev, "System is going to shutdown due 
to SW CTF!\n");
+   orderly_poweroff(true);
+   } else
pr_warn("GPU under temperature range detected on PCIe 
%d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c4d8c52c6b9ca..6c4405622c9bb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -23,6 +23,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "pp_debug.h"
 #include "amdgpu.h"
@@ -1538,6 +1539,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device 
*adev,
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
+   /*
+* SW CTF just occurred.
+* Try to do a graceful shutdown to prevent further 
damage.
+*/
+   dev_emerg(adev->dev, "System is going to shutdown due 
to SW CTF!\n");
+   orderly_poweroff(true);
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
pr_warn("GPU under temperature range detected on PCIe 
%d:%d.%d!\n",
-- 
2.25.1



[PATCH AUTOSEL 5.4 241/330] drivers: char: tlclk.c: Avoid data race between init and interrupt handler

2020-09-17 Thread Sasha Levin
From: Madhuparna Bhowmik 

[ Upstream commit 44b8fb6eaa7c3fb770bf1e37619cdb3902cca1fc ]

After registering character device the file operation callbacks can be
called. The open callback registers interrupt handler.
Therefore interrupt handler can execute in parallel with rest of the init
function. To avoid such data race initialize telclk_interrupt variable
and struct alarm_events before registering character device.

Found by Linux Driver Verification project (linuxtesting.org).

Signed-off-by: Madhuparna Bhowmik 
Link: 
https://lore.kernel.org/r/20200417153451.1551-1-madhuparnabhowmi...@gmail.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/char/tlclk.c | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6d81bb3bb503f..896a3550fba9f 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
 {
int ret;
 
+   telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+   alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+   if (!alarm_events) {
+   ret = -ENOMEM;
+   goto out1;
+   }
+
ret = register_chrdev(tlclk_major, "telco_clock", _fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+   kfree(alarm_events);
return ret;
}
tlclk_major = ret;
-   alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
-   if (!alarm_events) {
-   ret = -ENOMEM;
-   goto out1;
-   }
 
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
-   telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
 
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8);
 out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
 }
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 264/330] tipc: fix memory leak in service subscripting

2020-09-17 Thread Sasha Levin
From: Tuong Lien 

[ Upstream commit 0771d7df819284d46cf5cfb57698621b503ec17f ]

Upon receipt of a service subscription request from user via a topology
connection, one 'sub' object will be allocated in kernel, so it will be
able to send an event of the service if any to the user correspondingly
then. Also, in case of any failure, the connection will be shutdown and
all the pertaining 'sub' objects will be freed.

However, there is a race condition as follows resulting in memory leak:

   receive-work   connectionsend-work
  |||
sub-1 |<--//---||
sub-2 |<--//---||
  ||<---| evt for sub-x
sub-3 |<--//---||
  :::
  :::
  |   /||
  |   |* peer closed|
  |   |||
  |   ||<---X---| evt for sub-y
  |   ||<===|
sub-n |<--/Xshutdown|
-> orphan | |

That is, the 'receive-work' may get the last subscription request while
the 'send-work' is shutting down the connection due to peer close.

We had a 'lock' on the connection, so the two actions cannot be carried
out simultaneously. If the last subscription is allocated e.g. 'sub-n',
before the 'send-work' closes the connection, there will be no issue at
all, the 'sub' objects will be freed. In contrast the last subscription
will become orphan since the connection was closed, and we released all
references.

This commit fixes the issue by simply adding one test if the connection
remains in 'connected' state right after we obtain the connection lock,
then a subscription object can be created as usual, otherwise we ignore
it.

Acked-by: Ying Xue 
Acked-by: Jon Maloy 
Reported-by: Thang Ngo 
Signed-off-by: Tuong Lien 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 net/tipc/topsrv.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 73dbed0c4b6b8..931c426673c02 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -400,7 +400,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
return -EWOULDBLOCK;
if (ret == sizeof(s)) {
read_lock_bh(>sk_callback_lock);
-   ret = tipc_conn_rcv_sub(srv, con, );
+   /* RACE: the connection can be closed in the meantime */
+   if (likely(connected(con)))
+   ret = tipc_conn_rcv_sub(srv, con, );
read_unlock_bh(>sk_callback_lock);
if (!ret)
return 0;
-- 
2.25.1



[PATCH AUTOSEL 5.4 246/330] staging:r8188eu: avoid skb_clone for amsdu to msdu conversion

2020-09-17 Thread Sasha Levin
From: Ivan Safonov 

[ Upstream commit 628cbd971a927abe6388d44320e351c337b331e4 ]

skb clones use same data buffer,
so tail of one skb is corrupted by beginning of next skb.

Signed-off-by: Ivan Safonov 
Link: https://lore.kernel.org/r/20200423191404.12028-1-insafo...@gmail.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/staging/rtl8188eu/core/rtw_recv.c | 19 ++-
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c 
b/drivers/staging/rtl8188eu/core/rtw_recv.c
index d4278361e0028..a036ef104198e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, 
struct recv_frame *prframe)
 
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
-   if (sub_skb) {
-   skb_reserve(sub_skb, 12);
-   skb_put_data(sub_skb, pdata, nSubframe_Length);
-   } else {
-   sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
-   if (sub_skb) {
-   sub_skb->data = pdata;
-   sub_skb->len = nSubframe_Length;
-   skb_set_tail_pointer(sub_skb, nSubframe_Length);
-   } else {
-   DBG_88E("skb_clone() Fail!!! , 
nr_subframes=%d\n", nr_subframes);
-   break;
-   }
+   if (!sub_skb) {
+   DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", 
nr_subframes);
+   break;
}
 
+   skb_reserve(sub_skb, 12);
+   skb_put_data(sub_skb, pdata, nSubframe_Length);
+
subframes[nr_subframes++] = sub_skb;
 
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
-- 
2.25.1



[PATCH AUTOSEL 5.4 236/330] serial: uartps: Wait for tx_empty in console setup

2020-09-17 Thread Sasha Levin
From: Raviteja Narayanam 

[ Upstream commit 42e11948ddf68b9f799cad8c0ddeab0a39da33e8 ]

On some platforms, the log is corrupted while console is being
registered. It is observed that when set_termios is called, there
are still some bytes in the FIFO to be transmitted.

So, wait for tx_empty inside cdns_uart_console_setup before calling
set_termios.

Signed-off-by: Raviteja Narayanam 
Reviewed-by: Shubhrajyoti Datta 
Link: 
https://lore.kernel.org/r/1586413563-29125-2-git-send-email-raviteja.naraya...@xilinx.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/tty/serial/xilinx_uartps.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/drivers/tty/serial/xilinx_uartps.c 
b/drivers/tty/serial/xilinx_uartps.c
index 8948970f795e6..9359c80fbb9f5 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1248,6 +1248,7 @@ static int cdns_uart_console_setup(struct console *co, 
char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+   unsigned long time_out;
 
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
@@ -1258,6 +1259,13 @@ static int cdns_uart_console_setup(struct console *co, 
char *options)
if (options)
uart_parse_options(options, , , , );
 
+   /* Wait for tx_empty before setting up the console */
+   time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
+
+   while (time_before(jiffies, time_out) &&
+  cdns_uart_tx_empty(port) != TIOCSER_TEMT)
+   cpu_relax();
+
return uart_set_options(port, co, baud, parity, bits, flow);
 }
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 235/330] scsi: qedi: Fix termination timeouts in session logout

2020-09-17 Thread Sasha Levin
From: Nilesh Javali 

[ Upstream commit b9b97e6903032ec56e6dcbe137a9819b74a17fea ]

The destroy connection ramrod timed out during session logout.  Fix the
wait delay for graceful vs abortive termination as per the FW requirements.

Link: https://lore.kernel.org/r/20200408064332.19377-7-mrangan...@marvell.com
Reviewed-by: Lee Duncan 
Signed-off-by: Nilesh Javali 
Signed-off-by: Manish Rangankar 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/qedi/qedi_iscsi.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 0f57c80734061..0f2622a48311c 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1062,6 +1062,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
 
+   if (!abrt_conn)
+   wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
-- 
2.25.1



[PATCH AUTOSEL 5.4 245/330] scsi: aacraid: Fix error handling paths in aac_probe_one()

2020-09-17 Thread Sasha Levin
From: Christophe JAILLET 

[ Upstream commit f7854c382240c1686900b2f098b36430c6f5047e ]

If 'scsi_host_alloc()' or 'kcalloc()' fail, 'error' is known to be 0. Set
it explicitly to -ENOMEM before branching to the error handling path.

While at it, remove 2 useless assignments to 'error'. These values are
overwridden a few lines later.

Link: 
https://lore.kernel.org/r/20200412094039.8822-1-christophe.jail...@wanadoo.fr
Signed-off-by: Christophe JAILLET 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/aacraid/linit.c | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 514aed38b5afe..1035f947f1bcf 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1607,7 +1607,7 @@ static int aac_probe_one(struct pci_dev *pdev, const 
struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = _devices;
-   int error = -ENODEV;
+   int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1632,7 +1632,6 @@ static int aac_probe_one(struct pci_dev *pdev, const 
struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
-   error = -ENODEV;
 
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1664,8 +1663,10 @@ static int aac_probe_one(struct pci_dev *pdev, const 
struct pci_device_id *id)
pci_set_master(pdev);
 
shost = scsi_host_alloc(_driver_template, sizeof(struct aac_dev));
-   if (!shost)
+   if (!shost) {
+   error = -ENOMEM;
goto out_disable_pdev;
+   }
 
shost->irq = pdev->irq;
shost->unique_id = unique_id;
@@ -1690,8 +1691,11 @@ static int aac_probe_one(struct pci_dev *pdev, const 
struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
-   if (!aac->fibs)
+   if (!aac->fibs) {
+   error = -ENOMEM;
goto out_free_host;
+   }
+
spin_lock_init(>fib_lock);
 
mutex_init(>ioctl_mutex);
-- 
2.25.1



[PATCH AUTOSEL 5.4 244/330] net: openvswitch: use u64 for meter bucket

2020-09-17 Thread Sasha Levin
From: Tonghao Zhang 

[ Upstream commit e57358873bb5d6caa882b9684f59140912b37dde ]

When setting the meter rate to 4+Gbps, there is an
overflow, the meters don't work as expected.

Cc: Pravin B Shelar 
Cc: Andy Zhou 
Signed-off-by: Tonghao Zhang 
Acked-by: Pravin B Shelar 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 net/openvswitch/meter.c | 2 +-
 net/openvswitch/meter.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 3323b79ff548d..b10734f18bbd6 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -251,7 +251,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
 *
 * Start with a full bucket.
 */
-   band->bucket = (band->burst_size + band->rate) * 1000;
+   band->bucket = (band->burst_size + band->rate) * 1000ULL;
band_max_delta_t = band->bucket / band->rate;
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
index f645913870bd2..2e3fd6f1d7ebe 100644
--- a/net/openvswitch/meter.h
+++ b/net/openvswitch/meter.h
@@ -23,7 +23,7 @@ struct dp_meter_band {
u32 type;
u32 rate;
u32 burst_size;
-   u32 bucket; /* 1/1000 packets, or in bits */
+   u64 bucket; /* 1/1000 packets, or in bits */
struct ovs_flow_stats stats;
 };
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 234/330] ALSA: hda: Skip controller resume if not needed

2020-09-17 Thread Sasha Levin
From: Takashi Iwai 

[ Upstream commit c4c8dd6ef807663e42a5f04ea77cd62029eb99fa ]

The HD-audio controller does system-suspend and resume operations by
directly calling its helpers __azx_runtime_suspend() and
__azx_runtime_resume().  However, in general, we don't have to resume
always the device fully at the system resume; typically, if a device
has been runtime-suspended, we can leave it to runtime resume.

Usually for achieving this, the driver would call
pm_runtime_force_suspend() and pm_runtime_force_resume() pairs in the
system suspend and resume ops.  Unfortunately, this doesn't work for
the resume path in our case.  For handling the jack detection at the
system resume, a child codec device may need the (literally) forcibly
resume even if it's been runtime-suspended, and for that, the
controller device must be also resumed even if it's been suspended.

This patch is an attempt to improve the situation.  It replaces the
direct __azx_runtime_suspend()/_resume() calls with with
pm_runtime_force_suspend() and pm_runtime_force_resume() with a slight
trick as we've done for the codec side.  More exactly:

- azx_has_pm_runtime() check is dropped from azx_runtime_suspend() and
  azx_runtime_resume(), so that it can be properly executed from the
  system-suspend/resume path

- The WAKEEN handling depends on the card's power state now; it's set
  and cleared only for the runtime-suspend

- azx_resume() checks whether any codec may need the forcible resume
  beforehand.  If the forcible resume is required, it does temporary
  PM refcount up/down for actually triggering the runtime resume.

- A new helper function, hda_codec_need_resume(), is introduced for
  checking whether the codec needs a forcible runtime-resume, and the
  existing code is rewritten with that.

BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=207043
Link: https://lore.kernel.org/r/20200413082034.25166-6-ti...@suse.de
Signed-off-by: Takashi Iwai 
Signed-off-by: Sasha Levin 
---
 include/sound/hda_codec.h |  5 +
 sound/pci/hda/hda_codec.c |  2 +-
 sound/pci/hda/hda_intel.c | 38 +++---
 3 files changed, 33 insertions(+), 12 deletions(-)

diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 9a0393cf024c2..65c056ce91128 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -494,6 +494,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec);
 static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
 #endif
 
+static inline bool hda_codec_need_resume(struct hda_codec *codec)
+{
+   return !codec->relaxed_resume && codec->jacktbl.used;
+}
+
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
 /*
  * patch firmware
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 103011e7285a3..12da263fb02ba 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2958,7 +2958,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 static int hda_codec_force_resume(struct device *dev)
 {
struct hda_codec *codec = dev_to_hda_codec(dev);
-   bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used;
+   bool forced_resume = hda_codec_need_resume(codec);
int ret;
 
/* The get/put pair below enforces the runtime resume even if the
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7353d2ec359ae..a6e8aaa091c7d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1025,7 +1025,7 @@ static int azx_suspend(struct device *dev)
chip = card->private_data;
bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
-   __azx_runtime_suspend(chip);
+   pm_runtime_force_suspend(dev);
if (bus->irq >= 0) {
free_irq(bus->irq, chip);
bus->irq = -1;
@@ -1041,7 +1041,9 @@ static int azx_suspend(struct device *dev)
 static int azx_resume(struct device *dev)
 {
struct snd_card *card = dev_get_drvdata(dev);
+   struct hda_codec *codec;
struct azx *chip;
+   bool forced_resume = false;
 
if (!azx_is_pm_ready(card))
return 0;
@@ -1052,7 +1054,20 @@ static int azx_resume(struct device *dev)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
-   __azx_runtime_resume(chip, false);
+
+   /* check for the forced resume */
+   list_for_each_codec(codec, >bus) {
+   if (hda_codec_need_resume(codec)) {
+   forced_resume = true;
+   break;
+   }
+   }
+
+   if (forced_resume)
+   pm_runtime_get_noresume(dev);
+   pm_runtime_force_resume(dev);
+   if (forced_resume)
+   pm_runtime_put(dev);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
trace_azx_resume(chip);
@@ -1099,12 +1114,12 @@ static int azx_runtime_suspend(struct device *dev)
if 

[PATCH AUTOSEL 5.4 231/330] workqueue: Remove the warning in wq_worker_sleeping()

2020-09-17 Thread Sasha Levin
From: Sebastian Andrzej Siewior 

[ Upstream commit 62849a9612924a655c67cf6962920544aa5c20db ]

The kernel test robot triggered a warning with the following race:
   task-ctx Ainterrupt-ctx B
 worker
  -> process_one_work()
-> work_item()
  -> schedule();
 -> sched_submit_work()
   -> wq_worker_sleeping()
 -> ->sleeping = 1
   atomic_dec_and_test(nr_running)
 __schedule();*interrupt*
   async_page_fault()
   -> local_irq_enable();
   -> schedule();
  -> sched_submit_work()
-> wq_worker_sleeping()
   -> if (WARN_ON(->sleeping)) 
return
  -> __schedule()
->  sched_update_worker()
  -> wq_worker_running()
 -> atomic_inc(nr_running);
 -> ->sleeping = 0;

  ->  sched_update_worker()
-> wq_worker_running()
  if (!->sleeping) return

In this context the warning is pointless everything is fine.
An interrupt before wq_worker_sleeping() will perform the ->sleeping
assignment (0 -> 1 > 0) twice.
An interrupt after wq_worker_sleeping() will trigger the warning and
nr_running will be decremented (by A) and incremented once (only by B, A
will skip it). This is the case until the ->sleeping is zeroed again in
wq_worker_running().

Remove the WARN statement because this condition may happen. Document
that preemption around wq_worker_sleeping() needs to be disabled to
protect ->sleeping and not just as an optimisation.

Fixes: 6d25be5782e48 ("sched/core, workqueues: Distangle worker accounting from 
rq lock")
Reported-by: kernel test robot 
Signed-off-by: Sebastian Andrzej Siewior 
Signed-off-by: Peter Zijlstra (Intel) 
Signed-off-by: Ingo Molnar 
Cc: Tejun Heo 
Link: https://lkml.kernel.org/r/20200327074308.GY11705@shao2-debian
Signed-off-by: Sasha Levin 
---
 kernel/sched/core.c | 3 ++-
 kernel/workqueue.c  | 6 --
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 352239c411a44..79ce22de44095 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4199,7 +4199,8 @@ static inline void sched_submit_work(struct task_struct 
*tsk)
 * it wants to wake up a task to maintain concurrency.
 * As this function is called inside the schedule() context,
 * we disable preemption to avoid it calling schedule() again
-* in the possible wakeup of a kworker.
+* in the possible wakeup of a kworker and because wq_worker_sleeping()
+* requires it.
 */
if (tsk->flags & PF_WQ_WORKER) {
preempt_disable();
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1a0c224af6fb3..4aa268582a225 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -864,7 +864,8 @@ void wq_worker_running(struct task_struct *task)
  * @task: task going to sleep
  *
  * This function is called from schedule() when a busy worker is
- * going to sleep.
+ * going to sleep. Preemption needs to be disabled to protect ->sleeping
+ * assignment.
  */
 void wq_worker_sleeping(struct task_struct *task)
 {
@@ -881,7 +882,8 @@ void wq_worker_sleeping(struct task_struct *task)
 
pool = worker->pool;
 
-   if (WARN_ON_ONCE(worker->sleeping))
+   /* Return if preempted before wq_worker_running() was reached */
+   if (worker->sleeping)
return;
 
worker->sleeping = 1;
-- 
2.25.1



[PATCH AUTOSEL 5.4 211/330] IB/iser: Always check sig MR before putting it to the free pool

2020-09-17 Thread Sasha Levin
From: Sergey Gorenko 

[ Upstream commit 26e28deb813eed908cf31a6052870b6493ec0e86 ]

libiscsi calls the check_protection transport handler only if SCSI-Respose
is received. So, the handler is never called if iSCSI task is completed
for some other reason like a timeout or error handling. And this behavior
looks correct. But the iSER does not handle this case properly because it
puts a non-checked signature MR to the free pool. Then the error occurs at
reusing the MR because it is not allowed to invalidate a signature MR
without checking.

This commit adds an extra check to iser_unreg_mem_fastreg(), which is a
part of the task cleanup flow. Now the signature MR is checked there if it
is needed.

Link: https://lore.kernel.org/r/20200325151210.1548-1-serge...@mellanox.com
Signed-off-by: Sergey Gorenko 
Reviewed-by: Max Gurtovoy 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Sasha Levin 
---
 drivers/infiniband/ulp/iser/iser_memory.c | 21 ++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/infiniband/ulp/iser/iser_memory.c 
b/drivers/infiniband/ulp/iser/iser_memory.c
index 2cc89a9b9e9bb..ea8e611397a3b 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task 
*iser_task,
 {
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = _task->rdma_reg[cmd_dir];
+   struct iser_fr_desc *desc;
+   struct ib_mr_status mr_status;
 
-   if (!reg->mem_h)
+   desc = reg->mem_h;
+   if (!desc)
return;
 
-   device->reg_ops->reg_desc_put(_task->iser_conn->ib_conn,
-reg->mem_h);
+   /*
+* The signature MR cannot be invalidated and reused without checking.
+* libiscsi calls the check_protection transport handler only if
+* SCSI-Response is received. And the signature MR is not checked if
+* the task is completed for some other reason like a timeout or error
+* handling. That's why we must check the signature MR here before
+* putting it to the free pool.
+*/
+   if (unlikely(desc->sig_protected)) {
+   desc->sig_protected = false;
+   ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+  _status);
+   }
+   device->reg_ops->reg_desc_put(_task->iser_conn->ib_conn, desc);
reg->mem_h = NULL;
 }
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 200/330] selftests/ptrace: add test cases for dead-locks

2020-09-17 Thread Sasha Levin
From: Bernd Edlinger 

[ Upstream commit 2de4e82318c7f9d34f4b08599a612cd4cd10bf0b ]

This adds test cases for ptrace deadlocks.

Additionally fixes a compile problem in get_syscall_info.c,
observed with gcc-4.8.4:

get_syscall_info.c: In function 'get_syscall_info':
get_syscall_info.c:93:3: error: 'for' loop initial declarations are only
 allowed in C99 mode
   for (unsigned int i = 0; i < ARRAY_SIZE(args); ++i) {
   ^
get_syscall_info.c:93:3: note: use option -std=c99 or -std=gnu99 to compile
   your code

Signed-off-by: Bernd Edlinger 
Reviewed-by: Kees Cook 
Signed-off-by: Eric W. Biederman 
Signed-off-by: Sasha Levin 
---
 tools/testing/selftests/ptrace/Makefile   |  4 +-
 tools/testing/selftests/ptrace/vmaccess.c | 86 +++
 2 files changed, 88 insertions(+), 2 deletions(-)
 create mode 100644 tools/testing/selftests/ptrace/vmaccess.c

diff --git a/tools/testing/selftests/ptrace/Makefile 
b/tools/testing/selftests/ptrace/Makefile
index c0b7f89f09300..2f1f532c39dbc 100644
--- a/tools/testing/selftests/ptrace/Makefile
+++ b/tools/testing/selftests/ptrace/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -iquote../../../../include/uapi -Wall
+CFLAGS += -std=c99 -pthread -iquote../../../../include/uapi -Wall
 
-TEST_GEN_PROGS := get_syscall_info peeksiginfo
+TEST_GEN_PROGS := get_syscall_info peeksiginfo vmaccess
 
 include ../lib.mk
diff --git a/tools/testing/selftests/ptrace/vmaccess.c 
b/tools/testing/selftests/ptrace/vmaccess.c
new file mode 100644
index 0..4db327b445862
--- /dev/null
+++ b/tools/testing/selftests/ptrace/vmaccess.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2020 Bernd Edlinger 
+ * All rights reserved.
+ *
+ * Check whether /proc/$pid/mem can be accessed without causing deadlocks
+ * when de_thread is blocked with ->cred_guard_mutex held.
+ */
+
+#include "../kselftest_harness.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static void *thread(void *arg)
+{
+   ptrace(PTRACE_TRACEME, 0, 0L, 0L);
+   return NULL;
+}
+
+TEST(vmaccess)
+{
+   int f, pid = fork();
+   char mm[64];
+
+   if (!pid) {
+   pthread_t pt;
+
+   pthread_create(, NULL, thread, NULL);
+   pthread_join(pt, NULL);
+   execlp("true", "true", NULL);
+   }
+
+   sleep(1);
+   sprintf(mm, "/proc/%d/mem", pid);
+   f = open(mm, O_RDONLY);
+   ASSERT_GE(f, 0);
+   close(f);
+   f = kill(pid, SIGCONT);
+   ASSERT_EQ(f, 0);
+}
+
+TEST(attach)
+{
+   int s, k, pid = fork();
+
+   if (!pid) {
+   pthread_t pt;
+
+   pthread_create(, NULL, thread, NULL);
+   pthread_join(pt, NULL);
+   execlp("sleep", "sleep", "2", NULL);
+   }
+
+   sleep(1);
+   k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
+   ASSERT_EQ(errno, EAGAIN);
+   ASSERT_EQ(k, -1);
+   k = waitpid(-1, , WNOHANG);
+   ASSERT_NE(k, -1);
+   ASSERT_NE(k, 0);
+   ASSERT_NE(k, pid);
+   ASSERT_EQ(WIFEXITED(s), 1);
+   ASSERT_EQ(WEXITSTATUS(s), 0);
+   sleep(1);
+   k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
+   ASSERT_EQ(k, 0);
+   k = waitpid(-1, , 0);
+   ASSERT_EQ(k, pid);
+   ASSERT_EQ(WIFSTOPPED(s), 1);
+   ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
+   k = ptrace(PTRACE_DETACH, pid, 0L, 0L);
+   ASSERT_EQ(k, 0);
+   k = waitpid(-1, , 0);
+   ASSERT_EQ(k, pid);
+   ASSERT_EQ(WIFEXITED(s), 1);
+   ASSERT_EQ(WEXITSTATUS(s), 0);
+   k = waitpid(-1, NULL, 0);
+   ASSERT_EQ(k, -1);
+   ASSERT_EQ(errno, ECHILD);
+}
+
+TEST_HARNESS_MAIN
-- 
2.25.1



[PATCH AUTOSEL 5.4 223/330] drm/amdgpu/vcn2.0: stall DPG when WPTR/RPTR reset

2020-09-17 Thread Sasha Levin
From: James Zhu 

[ Upstream commit ef563ff403404ef2f234abe79bdd9f04ab6481c9 ]

Add vcn dpg harware synchronization to fix race condition
issue between vcn driver and hardware.

Signed-off-by: James Zhu 
Reviewed-by: Leo Liu 
Signed-off-by: Alex Deucher 
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 16 
 1 file changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 36ad0c0e8efbc..cd2cbe760e883 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1026,6 +1026,10 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device 
*adev, bool indirect)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
+   /* Stall DPG before WPTR/RPTR reset */
+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* set the write pointer delay */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
 
@@ -1048,6 +1052,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device 
*adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
 
+   /* Unstall DPG */
+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
 }
 
@@ -1357,8 +1364,13 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device 
*adev,
   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
   
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
+   /* Stall DPG before WPTR/RPTR reset */
+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, 
mmUVD_POWER_STATUS),
+  
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+  
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Restore */
ring = >vcn.inst->ring_enc[0];
+   ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, 
ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, 
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, 
ring->ring_size / 4);
@@ -1366,6 +1378,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device 
*adev,
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 
lower_32_bits(ring->wptr));
 
ring = >vcn.inst->ring_enc[1];
+   ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, 
ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, 
ring->ring_size / 4);
@@ -1374,6 +1387,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device 
*adev,
 
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) 
& 0x7FFF);
+   /* Unstall DPG */
+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, 
mmUVD_POWER_STATUS),
+  0, 
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
 
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
-- 
2.25.1



[PATCH AUTOSEL 5.4 201/330] kernel/kcmp.c: Use new infrastructure to fix deadlocks in execve

2020-09-17 Thread Sasha Levin
From: Bernd Edlinger 

[ Upstream commit 454e3126cb842388e22df6b3ac3da44062c00765 ]

This changes kcmp_epoll_target to use the new exec_update_mutex
instead of cred_guard_mutex.

This should be safe, as the credentials are only used for reading,
and furthermore ->mm and ->sighand are updated on execve,
but only under the new exec_update_mutex.

Signed-off-by: Bernd Edlinger 
Signed-off-by: Eric W. Biederman 
Signed-off-by: Sasha Levin 
---
 kernel/kcmp.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index a0e3d7a0e8b81..b3ff9288c6cc9 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
/*
 * One should have enough rights to inspect task details.
 */
-   ret = kcmp_lock(>signal->cred_guard_mutex,
-   >signal->cred_guard_mutex);
+   ret = kcmp_lock(>signal->exec_update_mutex,
+   >signal->exec_update_mutex);
if (ret)
goto err;
if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
@@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
}
 
 err_unlock:
-   kcmp_unlock(>signal->cred_guard_mutex,
-   >signal->cred_guard_mutex);
+   kcmp_unlock(>signal->exec_update_mutex,
+   >signal->exec_update_mutex);
 err:
put_task_struct(task1);
put_task_struct(task2);
-- 
2.25.1



[PATCH AUTOSEL 5.4 222/330] NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests()

2020-09-17 Thread Sasha Levin
From: Trond Myklebust 

[ Upstream commit 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 ]

When a subrequest is being detached from the subgroup, we want to
ensure that it is not holding the group lock, or in the process
of waiting for the group lock.

Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and 
nfs_lock_and_join_requests() race cases")
Signed-off-by: Trond Myklebust 
Signed-off-by: Sasha Levin 
---
 fs/nfs/pagelist.c| 67 +++-
 fs/nfs/write.c   | 10 --
 include/linux/nfs_page.h |  2 ++
 3 files changed, 55 insertions(+), 24 deletions(-)

diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b736912098eee..f4407dd426bf0 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -133,47 +133,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct 
nfs_lock_context *l_ctx)
 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
 
 /*
- * nfs_page_group_lock - lock the head of the page group
- * @req - request in group that is to be locked
+ * nfs_page_set_headlock - set the request PG_HEADLOCK
+ * @req: request that is to be locked
  *
- * this lock must be held when traversing or modifying the page
- * group list
+ * this lock must be held when modifying req->wb_head
  *
  * return 0 on success, < 0 on error
  */
 int
-nfs_page_group_lock(struct nfs_page *req)
+nfs_page_set_headlock(struct nfs_page *req)
 {
-   struct nfs_page *head = req->wb_head;
-
-   WARN_ON_ONCE(head != head->wb_head);
-
-   if (!test_and_set_bit(PG_HEADLOCK, >wb_flags))
+   if (!test_and_set_bit(PG_HEADLOCK, >wb_flags))
return 0;
 
-   set_bit(PG_CONTENDED1, >wb_flags);
+   set_bit(PG_CONTENDED1, >wb_flags);
smp_mb__after_atomic();
-   return wait_on_bit_lock(>wb_flags, PG_HEADLOCK,
+   return wait_on_bit_lock(>wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
 }
 
 /*
- * nfs_page_group_unlock - unlock the head of the page group
- * @req - request in group that is to be unlocked
+ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
+ * @req: request that is to be locked
  */
 void
-nfs_page_group_unlock(struct nfs_page *req)
+nfs_page_clear_headlock(struct nfs_page *req)
 {
-   struct nfs_page *head = req->wb_head;
-
-   WARN_ON_ONCE(head != head->wb_head);
-
smp_mb__before_atomic();
-   clear_bit(PG_HEADLOCK, >wb_flags);
+   clear_bit(PG_HEADLOCK, >wb_flags);
smp_mb__after_atomic();
-   if (!test_bit(PG_CONTENDED1, >wb_flags))
+   if (!test_bit(PG_CONTENDED1, >wb_flags))
return;
-   wake_up_bit(>wb_flags, PG_HEADLOCK);
+   wake_up_bit(>wb_flags, PG_HEADLOCK);
+}
+
+/*
+ * nfs_page_group_lock - lock the head of the page group
+ * @req: request in group that is to be locked
+ *
+ * this lock must be held when traversing or modifying the page
+ * group list
+ *
+ * return 0 on success, < 0 on error
+ */
+int
+nfs_page_group_lock(struct nfs_page *req)
+{
+   int ret;
+
+   ret = nfs_page_set_headlock(req);
+   if (ret || req->wb_head == req)
+   return ret;
+   return nfs_page_set_headlock(req->wb_head);
+}
+
+/*
+ * nfs_page_group_unlock - unlock the head of the page group
+ * @req: request in group that is to be unlocked
+ */
+void
+nfs_page_group_unlock(struct nfs_page *req)
+{
+   if (req != req->wb_head)
+   nfs_page_clear_headlock(req->wb_head);
+   nfs_page_clear_headlock(req);
 }
 
 /*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 58c8317dd7d88..613c3ef23e07b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -425,22 +425,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page 
*destroy_list,
destroy_list = (subreq->wb_this_page == old_head) ?
   NULL : subreq->wb_this_page;
 
+   /* Note: lock subreq in order to change subreq->wb_head */
+   nfs_page_set_headlock(subreq);
WARN_ON_ONCE(old_head != subreq->wb_head);
 
/* make sure old group is not used */
subreq->wb_this_page = subreq;
+   subreq->wb_head = subreq;
 
clear_bit(PG_REMOVE, >wb_flags);
 
/* Note: races with nfs_page_group_destroy() */
if (!kref_read(>wb_kref)) {
/* Check if we raced with nfs_page_group_destroy() */
-   if (test_and_clear_bit(PG_TEARDOWN, >wb_flags))
+   if (test_and_clear_bit(PG_TEARDOWN, >wb_flags)) 
{
+   nfs_page_clear_headlock(subreq);
nfs_free_request(subreq);
+   } else
+   nfs_page_clear_headlock(subreq);
continue;
}
+   nfs_page_clear_headlock(subreq);
 
-   subreq->wb_head = subreq;
nfs_release_request(old_head);
 
if 

[PATCH AUTOSEL 5.4 167/330] scsi: aacraid: Disabling TM path and only processing IOP reset

2020-09-17 Thread Sasha Levin
From: Sagar Biradar 

[ Upstream commit bef18d308a2215eff8c3411a23d7f34604ce56c3 ]

Fixes the occasional adapter panic when sg_reset is issued with -d, -t, -b
and -H flags.  Removal of command type HBA_IU_TYPE_SCSI_TM_REQ in
aac_hba_send since iu_type, request_id and fib_flags are not populated.
Device and target reset handlers are made to send TMF commands only when
reset_state is 0.

Link: 
https://lore.kernel.org/r/1581553771-25796-1-git-send-email-sagar.bira...@microchip.com
Reviewed-by: Sagar Biradar 
Signed-off-by: Sagar Biradar 
Signed-off-by: Balsundar P 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/aacraid/commsup.c |  2 +-
 drivers/scsi/aacraid/linit.c   | 34 +-
 2 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 2142a649e865b..90fb17c5dd69c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -728,7 +728,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, 
fib_callback callback,
hbacmd->request_id =
cpu_to_le32u32)(fibptr - dev->fibs)) << 2) + 1);
fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
-   } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+   } else
return -EINVAL;
 
 
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4a858789e6c5e..514aed38b5afe 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -723,7 +723,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
  (fib_callback) aac_hba_callback,
  (void *) cmd);
-
+   if (status != -EINPROGRESS) {
+   aac_fib_complete(fib);
+   aac_fib_free(fib);
+   return ret;
+   }
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
@@ -902,11 +906,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
 
info = >hba_map[bus][cid];
 
-   if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
-   info->reset_state > 0)
+   if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+!(info->reset_state > 0)))
return FAILED;
 
-   pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+   pr_err("%s: Host device reset request. SCSI hang ?\n",
   AAC_DRIVERNAME);
 
fib = aac_fib_alloc(aac);
@@ -921,7 +925,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
status = aac_hba_send(command, fib,
  (fib_callback) aac_tmf_callback,
  (void *) info);
-
+   if (status != -EINPROGRESS) {
+   info->reset_state = 0;
+   aac_fib_complete(fib);
+   aac_fib_free(fib);
+   return ret;
+   }
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state == 0) {
@@ -960,11 +969,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
 
info = >hba_map[bus][cid];
 
-   if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
-   info->reset_state > 0)
+   if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+!(info->reset_state > 0)))
return FAILED;
 
-   pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+   pr_err("%s: Host target reset request. SCSI hang ?\n",
   AAC_DRIVERNAME);
 
fib = aac_fib_alloc(aac);
@@ -981,6 +990,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
  (fib_callback) aac_tmf_callback,
  (void *) info);
 
+   if (status != -EINPROGRESS) {
+   info->reset_state = 0;
+   aac_fib_complete(fib);
+   aac_fib_free(fib);
+   return ret;
+   }
+
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state <= 0) {
@@ -1033,7 +1049,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
}
}
 
-   pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+   pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
 
/*
 * Check the health of the controller
-- 
2.25.1



[PATCH AUTOSEL 5.4 176/330] rtc: ds1374: fix possible race condition

2020-09-17 Thread Sasha Levin
From: Alexandre Belloni 

[ Upstream commit c11af8131a4e7ba1960faed731ee7e84c2c13c94 ]

The RTC IRQ is requested before the struct rtc_device is allocated,
this may lead to a NULL pointer dereference in the IRQ handler.

To fix this issue, allocating the rtc_device struct before requesting
the RTC IRQ using devm_rtc_allocate_device, and use rtc_register_device
to register the RTC device.

Link: 
https://lore.kernel.org/r/20200306073404.56921-1-alexandre.bell...@bootlin.com
Signed-off-by: Alexandre Belloni 
Signed-off-by: Sasha Levin 
---
 drivers/rtc/rtc-ds1374.c | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 367497914c100..28eb96cbaf98b 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
if (!ds1374)
return -ENOMEM;
 
+   ds1374->rtc = devm_rtc_allocate_device(>dev);
+   if (IS_ERR(ds1374->rtc))
+   return PTR_ERR(ds1374->rtc);
+
ds1374->client = client;
i2c_set_clientdata(client, ds1374);
 
@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
device_set_wakeup_capable(>dev, 1);
}
 
-   ds1374->rtc = devm_rtc_device_register(>dev, client->name,
-   _rtc_ops, THIS_MODULE);
-   if (IS_ERR(ds1374->rtc)) {
-   dev_err(>dev, "unable to register the class device\n");
-   return PTR_ERR(ds1374->rtc);
-   }
+   ds1374->rtc->ops = _rtc_ops;
+
+   ret = rtc_register_device(ds1374->rtc);
+   if (ret)
+   return ret;
 
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
save_client = client;
-- 
2.25.1



[PATCH AUTOSEL 5.4 209/330] xfs: prohibit fs freezing when using empty transactions

2020-09-17 Thread Sasha Levin
From: "Darrick J. Wong" 

[ Upstream commit 27fb5a72f50aa770dd38b0478c07acacef97e3e7 ]

I noticed that fsfreeze can take a very long time to freeze an XFS if
there happens to be a GETFSMAP caller running in the background.  I also
happened to notice the following in dmesg:

[ cut here ]
WARNING: CPU: 2 PID: 43492 at fs/xfs/xfs_super.c:853 xfs_quiesce_attr+0x83/0x90 
[xfs]
Modules linked in: xfs libcrc32c ip6t_REJECT nf_reject_ipv6 ipt_REJECT 
nf_reject_ipv4 ip_set_hash_ip ip_set_hash_net xt_tcpudp xt_set ip_set_hash_mac 
ip_set nfnetlink ip6table_filter ip6_tables bfq iptable_filter sch_fq_codel 
ip_tables x_tables nfsv4 af_packet [last unloaded: xfs]
CPU: 2 PID: 43492 Comm: xfs_io Not tainted 5.6.0-rc4-djw #rc4
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.10.2-1ubuntu1 
04/01/2014
RIP: 0010:xfs_quiesce_attr+0x83/0x90 [xfs]
Code: 7c 07 00 00 85 c0 75 22 48 89 df 5b e9 96 c1 00 00 48 c7 c6 b0 2d 38 a0 
48 89 df e8 57 64 ff ff 8b 83 7c 07 00 00 85 c0 74 de <0f> 0b 48 89 df 5b e9 72 
c1 00 00 66 90 0f 1f 44 00 00 41 55 41 54
RSP: 0018:c900030f3e28 EFLAGS: 00010202
RAX: 0001 RBX: 88802ac54000 RCX: 
RDX:  RSI: 81e4a6f0 RDI: 
RBP: 88807859f070 R08: 0001 R09: 
R10:  R11: 0010 R12: 
R13: 88807859f388 R14: 88807859f4b8 R15: 88807859f5e8
FS:  7fad1c6c0fc0() GS:88807e00() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 7f0c7d237000 CR3: 77f01003 CR4: 001606a0
Call Trace:
 xfs_fs_freeze+0x25/0x40 [xfs]
 freeze_super+0xc8/0x180
 do_vfs_ioctl+0x70b/0x750
 ? __fget_files+0x135/0x210
 ksys_ioctl+0x3a/0xb0
 __x64_sys_ioctl+0x16/0x20
 do_syscall_64+0x50/0x1a0
 entry_SYSCALL_64_after_hwframe+0x49/0xbe

These two things appear to be related.  The assertion trips when another
thread initiates a fsmap request (which uses an empty transaction) after
the freezer waited for m_active_trans to hit zero but before the the
freezer executes the WARN_ON just prior to calling xfs_log_quiesce.

The lengthy delays in freezing happen because the freezer calls
xfs_wait_buftarg to clean out the buffer lru list.  Meanwhile, the
GETFSMAP caller is continuing to grab and release buffers, which means
that it can take a very long time for the buffer lru list to empty out.

We fix both of these races by calling sb_start_write to obtain freeze
protection while using empty transactions for GETFSMAP and for metadata
scrubbing.  The other two users occur during mount, during which time we
cannot fs freeze.

Signed-off-by: Darrick J. Wong 
Reviewed-by: Dave Chinner 
Signed-off-by: Sasha Levin 
---
 fs/xfs/scrub/scrub.c | 9 +
 fs/xfs/xfs_fsmap.c   | 9 +
 fs/xfs/xfs_trans.c   | 5 +
 3 files changed, 23 insertions(+)

diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 15c8c5f3f688d..720bef5779989 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -167,6 +167,7 @@ xchk_teardown(
xfs_irele(sc->ip);
sc->ip = NULL;
}
+   sb_end_write(sc->mp->m_super);
if (sc->flags & XCHK_REAPING_DISABLED)
xchk_start_reaping(sc);
if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) {
@@ -489,6 +490,14 @@ xfs_scrub_metadata(
sc.ops = _scrub_ops[sm->sm_type];
sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type);
 retry_op:
+   /*
+* If freeze runs concurrently with a scrub, the freeze can be delayed
+* indefinitely as we walk the filesystem and iterate over metadata
+* buffers.  Freeze quiesces the log (which waits for the buffer LRU to
+* be emptied) and that won't happen while checking is running.
+*/
+   sb_start_write(mp->m_super);
+
/* Set up for the operation. */
error = sc.ops->setup(, ip);
if (error)
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index d082143feb5ab..c13754e119be1 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -895,6 +895,14 @@ xfs_getfsmap(
info.format_arg = arg;
info.head = head;
 
+   /*
+* If fsmap runs concurrently with a scrub, the freeze can be delayed
+* indefinitely as we walk the rmapbt and iterate over metadata
+* buffers.  Freeze quiesces the log (which waits for the buffer LRU to
+* be emptied) and that won't happen while we're reading buffers.
+*/
+   sb_start_write(mp->m_super);
+
/* For each device we support... */
for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
/* Is this device within the range the user asked for? */
@@ -934,6 +942,7 @@ xfs_getfsmap(
 
if (tp)
xfs_trans_cancel(tp);
+   sb_end_write(mp->m_super);
head->fmh_oflags = FMH_OF_DEV_T;
return error;
 }
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c

[PATCH AUTOSEL 5.4 198/330] exec: Add exec_update_mutex to replace cred_guard_mutex

2020-09-17 Thread Sasha Levin
From: "Eric W. Biederman" 

[ Upstream commit eea9673250db4e854e9998ef9da6d4584857f0ea ]

The cred_guard_mutex is problematic as it is held over possibly
indefinite waits for userspace.  The possible indefinite waits for
userspace that I have identified are: The cred_guard_mutex is held in
PTRACE_EVENT_EXIT waiting for the tracer.  The cred_guard_mutex is
held over "put_user(0, tsk->clear_child_tid)" in exit_mm().  The
cred_guard_mutex is held over "get_user(futex_offset, ...")  in
exit_robust_list.  The cred_guard_mutex held over copy_strings.

The functions get_user and put_user can trigger a page fault which can
potentially wait indefinitely in the case of userfaultfd or if
userspace implements part of the page fault path.

In any of those cases the userspace process that the kernel is waiting
for might make a different system call that winds up taking the
cred_guard_mutex and result in deadlock.

Holding a mutex over any of those possibly indefinite waits for
userspace does not appear necessary.  Add exec_update_mutex that will
just cover updating the process during exec where the permissions and
the objects pointed to by the task struct may be out of sync.

The plan is to switch the users of cred_guard_mutex to
exec_update_mutex one by one.  This lets us move forward while still
being careful and not introducing any regressions.

Link: https://lore.kernel.org/lkml/20160921152946.ga24...@dhcp22.suse.cz/
Link: 
https://lore.kernel.org/lkml/am6pr03mb5170b06f3a2b75efb98d071ae4...@am6pr03mb5170.eurprd03.prod.outlook.com/
Link: https://lore.kernel.org/linux-fsdevel/20161102181806.gb1...@redhat.com/
Link: https://lore.kernel.org/lkml/20160923095031.ga14...@redhat.com/
Link: https://lore.kernel.org/lkml/20170213141452.ga30...@redhat.com/
Ref: 45c1a159b85b ("Add PTRACE_O_TRACEVFORKDONE and PTRACE_O_TRACEEXIT 
facilities.")
Ref: 456f17cd1a28 ("[PATCH] user-vm-unlock-2.5.31-A2")
Reviewed-by: Kirill Tkhai 
Signed-off-by: "Eric W. Biederman" 
Signed-off-by: Bernd Edlinger 
Signed-off-by: Eric W. Biederman 
Signed-off-by: Sasha Levin 
---
 fs/exec.c| 22 +++---
 include/linux/binfmts.h  |  8 +++-
 include/linux/sched/signal.h |  9 -
 init/init_task.c |  1 +
 kernel/fork.c|  1 +
 5 files changed, 36 insertions(+), 5 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index d62cd1d71098f..de833553ae27d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1007,16 +1007,26 @@ ssize_t read_code(struct file *file, unsigned long 
addr, loff_t pos, size_t len)
 }
 EXPORT_SYMBOL(read_code);
 
+/*
+ * Maps the mm_struct mm into the current task struct.
+ * On success, this function returns with the mutex
+ * exec_update_mutex locked.
+ */
 static int exec_mmap(struct mm_struct *mm)
 {
struct task_struct *tsk;
struct mm_struct *old_mm, *active_mm;
+   int ret;
 
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
 
+   ret = mutex_lock_killable(>signal->exec_update_mutex);
+   if (ret)
+   return ret;
+
if (old_mm) {
sync_mm_rss(old_mm);
/*
@@ -1028,9 +1038,11 @@ static int exec_mmap(struct mm_struct *mm)
down_read(_mm->mmap_sem);
if (unlikely(old_mm->core_state)) {
up_read(_mm->mmap_sem);
+   mutex_unlock(>signal->exec_update_mutex);
return -EINTR;
}
}
+
task_lock(tsk);
active_mm = tsk->active_mm;
membarrier_exec_mmap(mm);
@@ -1285,11 +1297,12 @@ int flush_old_exec(struct linux_binprm * bprm)
goto out;
 
/*
-* After clearing bprm->mm (to mark that current is using the
-* prepared mm now), we have nothing left of the original
+* After setting bprm->called_exec_mmap (to mark that current is
+* using the prepared mm now), we have nothing left of the original
 * process. If anything from here on returns an error, the check
 * in search_binary_handler() will SEGV current.
 */
+   bprm->called_exec_mmap = 1;
bprm->mm = NULL;
 
set_fs(USER_DS);
@@ -1423,6 +1436,8 @@ static void free_bprm(struct linux_binprm *bprm)
 {
free_arg_pages(bprm);
if (bprm->cred) {
+   if (bprm->called_exec_mmap)
+   mutex_unlock(>signal->exec_update_mutex);
mutex_unlock(>signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1472,6 +1487,7 @@ void install_exec_creds(struct linux_binprm *bprm)
 * credentials; any time after this it may be unlocked.
 */
security_bprm_committed_creds(bprm);
+   mutex_unlock(>signal->exec_update_mutex);
mutex_unlock(>signal->cred_guard_mutex);
 }
 EXPORT_SYMBOL(install_exec_creds);
@@ -1663,7 +1679,7 @@ 

[PATCH AUTOSEL 5.4 221/330] PCI: pciehp: Fix MSI interrupt race

2020-09-17 Thread Sasha Levin
From: Stuart Hayes 

[ Upstream commit 8edf5332c39340b9583cf9cba659eb7ec71f75b5 ]

Without this commit, a PCIe hotplug port can stop generating interrupts on
hotplug events, so device adds and removals will not be seen:

The pciehp interrupt handler pciehp_isr() reads the Slot Status register
and then writes back to it to clear the bits that caused the interrupt.  If
a different interrupt event bit gets set between the read and the write,
pciehp_isr() returns without having cleared all of the interrupt event
bits.  If this happens when the MSI isn't masked (which by default it isn't
in handle_edge_irq(), and which it will never be when MSI per-vector
masking is not supported), we won't get any more hotplug interrupts from
that device.

That is expected behavior, according to the PCIe Base Spec r5.0, section
6.7.3.4, "Software Notification of Hot-Plug Events".

Because the Presence Detect Changed and Data Link Layer State Changed event
bits can both get set at nearly the same time when a device is added or
removed, this is more likely to happen than it might seem.  The issue was
found (and can be reproduced rather easily) by connecting and disconnecting
an NVMe storage device on at least one system model where the NVMe devices
were being connected to an AMD PCIe port (PCI device 0x1022/0x1483).

Fix the issue by modifying pciehp_isr() to loop back and re-read the Slot
Status register immediately after writing to it, until it sees that all of
the event status bits have been cleared.

[lukas: drop loop count limitation, write "events" instead of "status",
don't loop back in INTx and poll modes, tweak code comment & commit msg]
Link: 
https://lore.kernel.org/r/78b4ced5072bfe6e369d20e8b47c279b8c7af12e.1582121613.git.lu...@wunner.de
Tested-by: Stuart Hayes 
Signed-off-by: Stuart Hayes 
Signed-off-by: Lukas Wunner 
Signed-off-by: Bjorn Helgaas 
Reviewed-by: Joerg Roedel 
Signed-off-by: Sasha Levin 
---
 drivers/pci/hotplug/pciehp_hpc.c | 26 --
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 356786a3b7f4b..88b996764ff95 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -529,7 +529,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct device *parent = pdev->dev.parent;
-   u16 status, events;
+   u16 status, events = 0;
 
/*
 * Interrupts only occur in D3hot or shallower and only if enabled
@@ -554,6 +554,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
}
}
 
+read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, );
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
@@ -566,24 +567,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
 * Slot Status contains plain status bits as well as event
 * notification bits; right now we only want the event bits.
 */
-   events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
-  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
-  PCI_EXP_SLTSTA_DLLSC);
+   status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC;
 
/*
 * If we've already reported a power fault, don't report it again
 * until we've done something to handle it.
 */
if (ctrl->power_fault_detected)
-   events &= ~PCI_EXP_SLTSTA_PFD;
+   status &= ~PCI_EXP_SLTSTA_PFD;
 
+   events |= status;
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
 
-   pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+   if (status) {
+   pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+
+   /*
+* In MSI mode, all event bits must be zero before the port
+* will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
+* So re-read the Slot Status register in case a bit was set
+* between read and write.
+*/
+   if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
+   goto read_status;
+   }
+
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
-- 
2.25.1



[PATCH AUTOSEL 5.4 199/330] exec: Fix a deadlock in strace

2020-09-17 Thread Sasha Levin
From: Bernd Edlinger 

[ Upstream commit 3e74fabd39710ee29fa25618d2c2b40cfa7d76c7 ]

This fixes a deadlock in the tracer when tracing a multi-threaded
application that calls execve while more than one thread are running.

I observed that when running strace on the gcc test suite, it always
blocks after a while, when expect calls execve, because other threads
have to be terminated.  They send ptrace events, but the strace is no
longer able to respond, since it is blocked in vm_access.

The deadlock is always happening when strace needs to access the
tracees process mmap, while another thread in the tracee starts to
execve a child process, but that cannot continue until the
PTRACE_EVENT_EXIT is handled and the WIFEXITED event is received:

strace  D0 30614  30584 0x
Call Trace:
__schedule+0x3ce/0x6e0
schedule+0x5c/0xd0
schedule_preempt_disabled+0x15/0x20
__mutex_lock.isra.13+0x1ec/0x520
__mutex_lock_killable_slowpath+0x13/0x20
mutex_lock_killable+0x28/0x30
mm_access+0x27/0xa0
process_vm_rw_core.isra.3+0xff/0x550
process_vm_rw+0xdd/0xf0
__x64_sys_process_vm_readv+0x31/0x40
do_syscall_64+0x64/0x220
entry_SYSCALL_64_after_hwframe+0x44/0xa9

expect  D0 31933  30876 0x80004003
Call Trace:
__schedule+0x3ce/0x6e0
schedule+0x5c/0xd0
flush_old_exec+0xc4/0x770
load_elf_binary+0x35a/0x16c0
search_binary_handler+0x97/0x1d0
__do_execve_file.isra.40+0x5d4/0x8a0
__x64_sys_execve+0x49/0x60
do_syscall_64+0x64/0x220
entry_SYSCALL_64_after_hwframe+0x44/0xa9

This changes mm_access to use the new exec_update_mutex
instead of cred_guard_mutex.

This patch is based on the following patch by Eric W. Biederman:
"[PATCH 0/5] Infrastructure to allow fixing exec deadlocks"
Link: https://lore.kernel.org/lkml/87v9ne5y4y.fsf...@x220.int.ebiederm.org/

Signed-off-by: Bernd Edlinger 
Reviewed-by: Kees Cook 
Signed-off-by: Eric W. Biederman 
Signed-off-by: Sasha Levin 
---
 kernel/fork.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/fork.c b/kernel/fork.c
index cfdc57658ad88..594272569a80f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1221,7 +1221,7 @@ struct mm_struct *mm_access(struct task_struct *task, 
unsigned int mode)
struct mm_struct *mm;
int err;
 
-   err =  mutex_lock_killable(>signal->cred_guard_mutex);
+   err =  mutex_lock_killable(>signal->exec_update_mutex);
if (err)
return ERR_PTR(err);
 
@@ -1231,7 +1231,7 @@ struct mm_struct *mm_access(struct task_struct *task, 
unsigned int mode)
mmput(mm);
mm = ERR_PTR(-EACCES);
}
-   mutex_unlock(>signal->cred_guard_mutex);
+   mutex_unlock(>signal->exec_update_mutex);
 
return mm;
 }
-- 
2.25.1



[PATCH AUTOSEL 5.4 194/330] net: axienet: Convert DMA error handler to a work queue

2020-09-17 Thread Sasha Levin
From: Andre Przywara 

[ Upstream commit 24201a64770afe2e17050b2ab9e8c0e24e9c23b2 ]

The DMA error handler routine is currently a tasklet, scheduled to run
after the DMA error IRQ was handled.
However it needs to take the MDIO mutex, which is not allowed to do in a
tasklet. A kernel (with debug options) complains consequently:
[  614.050361] net eth0: DMA Tx error 0x174019
[  614.064002] net eth0: Current BD is at: 0x8f84aa0ce
[  614.080195] BUG: sleeping function called from invalid context at 
kernel/locking/mutex.c:935
[  614.109484] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 40, name: 
kworker/u4:4
[  614.135428] 3 locks held by kworker/u4:4/40:
[  614.149075]  #0: 000879863328 ((wq_completion)rpciod){}, at: 
process_one_work+0x1f0/0x6a8
[  614.177528]  #1: 80001251bdf8 
((work_completion)(>u.tk_work)){}, at: process_one_work+0x1f0/0x6a8
[  614.209033]  #2: 0008784e0110 (sk_lock-AF_INET-RPC){}, at: 
tcp_sendmsg+0x24/0x58
[  614.235429] CPU: 0 PID: 40 Comm: kworker/u4:4 Not tainted 
5.6.0-rc3-00926-g4a165a9d5921 #26
[  614.260854] Hardware name: ARM Test FPGA (DT)
[  614.274734] Workqueue: rpciod rpc_async_schedule
[  614.289022] Call trace:
[  614.296871]  dump_backtrace+0x0/0x1a0
[  614.308311]  show_stack+0x14/0x20
[  614.318751]  dump_stack+0xbc/0x100
[  614.329403]  ___might_sleep+0xf0/0x140
[  614.341018]  __might_sleep+0x4c/0x80
[  614.352201]  __mutex_lock+0x5c/0x8a8
[  614.363348]  mutex_lock_nested+0x1c/0x28
[  614.375654]  axienet_dma_err_handler+0x38/0x388
[  614.38]  tasklet_action_common.isra.15+0x160/0x1a8
[  614.405894]  tasklet_action+0x24/0x30
[  614.417297]  efi_header_end+0xe0/0x494
[  614.429020]  irq_exit+0xd0/0xd8
[  614.439047]  __handle_domain_irq+0x60/0xb0
[  614.451877]  gic_handle_irq+0xdc/0x2d0
[  614.463486]  el1_irq+0xcc/0x180
[  614.473451]  __tcp_transmit_skb+0x41c/0xb58
[  614.486513]  tcp_write_xmit+0x224/0x10a0
[  614.498792]  __tcp_push_pending_frames+0x38/0xc8
[  614.513126]  tcp_rcv_established+0x41c/0x820
[  614.526301]  tcp_v4_do_rcv+0x8c/0x218
[  614.537784]  __release_sock+0x5c/0x108
[  614.549466]  release_sock+0x34/0xa0
[  614.560318]  tcp_sendmsg+0x40/0x58
[  614.571053]  inet_sendmsg+0x40/0x68
[  614.582061]  sock_sendmsg+0x18/0x30
[  614.593074]  xs_sendpages+0x218/0x328
[  614.604506]  xs_tcp_send_request+0xa0/0x1b8
[  614.617461]  xprt_transmit+0xc8/0x4f0
[  614.628943]  call_transmit+0x8c/0xa0
[  614.640028]  __rpc_execute+0xbc/0x6f8
[  614.651380]  rpc_async_schedule+0x28/0x48
[  614.663846]  process_one_work+0x298/0x6a8
[  614.676299]  worker_thread+0x40/0x490
[  614.687687]  kthread+0x134/0x138
[  614.697804]  ret_from_fork+0x10/0x18
[  614.717319] xilinx_axienet 7fe0.ethernet eth0: Link is Down
[  615.748343] xilinx_axienet 7fe0.ethernet eth0: Link is Up - 1Gbps/Full - 
flow control off

Since tasklets are not really popular anymore anyway, lets convert this
over to a work queue, which can sleep and thus can take the MDIO mutex.

Signed-off-by: Andre Przywara 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
---
 drivers/net/ethernet/xilinx/xilinx_axienet.h  |  2 +-
 .../net/ethernet/xilinx/xilinx_axienet_main.c | 24 +--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h 
b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 2dacfc85b3baa..04e51af32178c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -435,7 +435,7 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
 
-   struct tasklet_struct dma_err_tasklet;
+   struct work_struct dma_err_task;
 
int tx_irq;
int rx_irq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c 
b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 479325eeaf8a0..345a795666e92 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -806,7 +806,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
-   tasklet_schedule(>dma_err_tasklet);
+   schedule_work(>dma_err_task);
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
 out:
@@ -855,7 +855,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
-   tasklet_schedule(>dma_err_tasklet);
+   schedule_work(>dma_err_task);
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
 out:
@@ -891,7 +891,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
 }
 
-static void axienet_dma_err_handler(unsigned long data);
+static void 

[PATCH AUTOSEL 5.4 161/330] clk: imx: Fix division by zero warning on pfdv2

2020-09-17 Thread Sasha Levin
From: Anson Huang 

[ Upstream commit 28b2f82e0383e27476be8a5e13d2aea07ebeb275 ]

Fix below division by zero warning:

[3.176443] Division by zero in kernel.
[3.181809] CPU: 0 PID: 88 Comm: kworker/0:2 Not tainted 
5.3.0-rc2-next-20190730-63758-ge08da51-dirty #124
[3.191817] Hardware name: Freescale i.MX7ULP (Device Tree)
[3.197821] Workqueue: events dbs_work_handler
[3.202849] [] (unwind_backtrace) from [] 
(show_stack+0x10/0x14)
[3.211058] [] (show_stack) from [] 
(dump_stack+0xd8/0x110)
[3.218820] [] (dump_stack) from [] (Ldiv0_64+0x8/0x18)
[3.226263] [] (Ldiv0_64) from [] 
(clk_pfdv2_set_rate+0x54/0xac)
[3.234487] [] (clk_pfdv2_set_rate) from [] 
(clk_change_rate+0x1a4/0x698)
[3.243468] [] (clk_change_rate) from [] 
(clk_change_rate+0x280/0x698)
[3.252180] [] (clk_change_rate) from [] 
(clk_core_set_rate_nolock+0x1a0/0x278)
[3.261679] [] (clk_core_set_rate_nolock) from [] 
(clk_set_rate+0x30/0x64)
[3.270743] [] (clk_set_rate) from [] 
(imx7ulp_set_target+0x184/0x2a4)
[3.279501] [] (imx7ulp_set_target) from [] 
(__cpufreq_driver_target+0x188/0x514)
[3.289196] [] (__cpufreq_driver_target) from [] 
(od_dbs_update+0x130/0x15c)
[3.298438] [] (od_dbs_update) from [] 
(dbs_work_handler+0x2c/0x5c)
[3.306914] [] (dbs_work_handler) from [] 
(process_one_work+0x2ac/0x704)
[3.315826] [] (process_one_work) from [] 
(worker_thread+0x2c/0x574)
[3.324404] [] (worker_thread) from [] 
(kthread+0x134/0x148)
[3.332278] [] (kthread) from [] 
(ret_from_fork+0x14/0x20)
[3.339858] Exception stack(0xe82d5fb0 to 0xe82d5ff8)
[3.345314] 5fa0:   
 
[3.353926] 5fc0:       
 
[3.362519] 5fe0:     0013 

Signed-off-by: Anson Huang 
Signed-off-by: Peng Fan 
Signed-off-by: Shawn Guo 
Signed-off-by: Sasha Levin 
---
 drivers/clk/imx/clk-pfdv2.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index a03bbed662c6b..2a46b9b61b466 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -139,6 +139,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned 
long rate,
u32 val;
u8 frac;
 
+   if (!rate)
+   return -EINVAL;
+
+   /* PFD can NOT change rate without gating */
+   WARN_ON(clk_pfdv2_is_enabled(hw));
+
tmp = tmp * 18 + rate / 2;
do_div(tmp, rate);
frac = tmp;
-- 
2.25.1



[PATCH AUTOSEL 5.4 179/330] intel_th: Disallow multi mode on devices where it's broken

2020-09-17 Thread Sasha Levin
From: Alexander Shishkin 

[ Upstream commit 397c7729665a3b07a7b4ce7215173df8e9112809 ]

Some versions of Intel TH have an issue that prevents the multi mode of
MSU from working correctly, resulting in no trace data and potentially
stuck MSU pipeline.

Disable multi mode on such devices.

Signed-off-by: Alexander Shishkin 
Reviewed-by: Andy Shevchenko 
Link: 
https://lore.kernel.org/r/20200317062215.15598-2-alexander.shish...@linux.intel.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/hwtracing/intel_th/intel_th.h |  2 ++
 drivers/hwtracing/intel_th/msu.c  | 11 +--
 drivers/hwtracing/intel_th/pci.c  |  8 ++--
 3 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/hwtracing/intel_th/intel_th.h 
b/drivers/hwtracing/intel_th/intel_th.h
index 6f4f5486fe6dc..5fe694708b7a3 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -47,11 +47,13 @@ struct intel_th_output {
 /**
  * struct intel_th_drvdata - describes hardware capabilities and quirks
  * @tscu_enable:   device needs SW to enable time stamping unit
+ * @multi_is_broken:   device has multiblock mode is broken
  * @has_mintctl:   device has interrupt control (MINTCTL) register
  * @host_mode_only:device can only operate in 'host debugger' mode
  */
 struct intel_th_drvdata {
unsigned inttscu_enable: 1,
+   multi_is_broken: 1,
has_mintctl: 1,
host_mode_only : 1;
 };
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 255f8f41c8ff7..3cd2489d398c5 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -157,7 +157,8 @@ struct msc {
/* config */
unsigned intenabled : 1,
wrap: 1,
-   do_irq  : 1;
+   do_irq  : 1,
+   multi_is_broken : 1;
unsigned intmode;
unsigned intburst_len;
unsigned intindex;
@@ -1665,7 +1666,7 @@ static int intel_th_msc_init(struct msc *msc)
 {
atomic_set(>user_count, -1);
 
-   msc->mode = MSC_MODE_MULTI;
+   msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
mutex_init(>buf_mutex);
INIT_LIST_HEAD(>win_list);
INIT_LIST_HEAD(>iter_list);
@@ -1877,6 +1878,9 @@ mode_store(struct device *dev, struct device_attribute 
*attr, const char *buf,
return -EINVAL;
 
 found:
+   if (i == MSC_MODE_MULTI && msc->multi_is_broken)
+   return -EOPNOTSUPP;
+
mutex_lock(>buf_mutex);
ret = 0;
 
@@ -2083,6 +2087,9 @@ static int intel_th_msc_probe(struct intel_th_device 
*thdev)
if (!res)
msc->do_irq = 1;
 
+   if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
+   msc->multi_is_broken = 1;
+
msc->index = thdev->id;
 
msc->thdev = thdev;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0d26484d67955..21fdf0b935166 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
 }
 
+static const struct intel_th_drvdata intel_th_1x_multi_is_broken = {
+   .multi_is_broken= 1,
+};
+
 static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable= 1,
.has_mintctl= 1,
@@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = 
{
{
/* Kaby Lake PCH-H */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
-   .driver_data = (kernel_ulong_t)0,
+   .driver_data = (kernel_ulong_t)_th_1x_multi_is_broken,
},
{
/* Denverton */
@@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = 
{
{
/* Comet Lake PCH-V */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
-   .driver_data = (kernel_ulong_t)_th_2x,
+   .driver_data = (kernel_ulong_t)_th_1x_multi_is_broken,
},
{
/* Ice Lake NNPI */
-- 
2.25.1



[PATCH AUTOSEL 5.4 187/330] btrfs: do not init a reloc root if we aren't relocating

2020-09-17 Thread Sasha Levin
From: Josef Bacik 

[ Upstream commit 2abc726ab4b83db774e315c660ab8da21477092f ]

We previously were checking if the root had a dead root before accessing
root->reloc_root in order to avoid a use-after-free type bug.  However
this scenario happens after we've unset the reloc control, so we would
have been saved if we'd simply checked for fs_info->reloc_control.  At
this point during relocation we no longer need to be creating new reloc
roots, so simply move this check above the reloc_root checks to avoid
any future races and confusion.

Reviewed-by: Qu Wenruo 
Signed-off-by: Josef Bacik 
Reviewed-by: David Sterba 
Signed-off-by: David Sterba 
Signed-off-by: Sasha Levin 
---
 fs/btrfs/relocation.c | 20 
 1 file changed, 16 insertions(+), 4 deletions(-)

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index af3605a0bf2e0..1313506a7ecb5 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1468,6 +1468,10 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle 
*trans,
int clear_rsv = 0;
int ret;
 
+   if (!rc || !rc->create_reloc_tree ||
+   root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+   return 0;
+
/*
 * The subvolume has reloc tree but the swap is finished, no need to
 * create/update the dead reloc tree
@@ -1481,10 +1485,6 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle 
*trans,
return 0;
}
 
-   if (!rc || !rc->create_reloc_tree ||
-   root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
-   return 0;
-
if (!trans->reloc_reserved) {
rsv = trans->block_rsv;
trans->block_rsv = rc->block_rsv;
@@ -2336,6 +2336,18 @@ static noinline_for_stack int merge_reloc_root(struct 
reloc_control *rc,
trans = NULL;
goto out;
}
+
+   /*
+* At this point we no longer have a reloc_control, so we can't
+* depend on btrfs_init_reloc_root to update our last_trans.
+*
+* But that's ok, we started the trans handle on our
+* corresponding fs_root, which means it's been added to the
+* dirty list.  At commit time we'll still call
+* btrfs_update_reloc_root() and update our root item
+* appropriately.
+*/
+   reloc_root->last_trans = trans->transid;
trans->block_rsv = rc->block_rsv;
 
replaced = 0;
-- 
2.25.1



[PATCH AUTOSEL 5.4 166/330] ath10k: use kzalloc to read for ath10k_sdio_hif_diag_read

2020-09-17 Thread Sasha Levin
From: Wen Gong 

[ Upstream commit 402f2992b4d62760cce7c689ff216ea3bf4d6e8a ]

When use command to read values, it crashed.

command:
dd if=/sys/kernel/debug/ieee80211/phy0/ath10k/mem_value count=1 bs=4 
skip=$((0x100233))

It will call to ath10k_sdio_hif_diag_read with address = 0x4008cc and buf_len = 
4.

Then system crash:
[ 1786.013258] Unable to handle kernel paging request at virtual address 
ffc00bd45000
[ 1786.013273] Mem abort info:
[ 1786.013281]   ESR = 0x9645
[ 1786.013291]   Exception class = DABT (current EL), IL = 32 bits
[ 1786.013299]   SET = 0, FnV = 0
[ 1786.013307]   EA = 0, S1PTW = 0
[ 1786.013314] Data abort info:
[ 1786.013322]   ISV = 0, ISS = 0x0045
[ 1786.013330]   CM = 0, WnR = 1
[ 1786.013342] swapper pgtable: 4k pages, 39-bit VAs, pgdp = 8542a60e
[ 1786.013350] [ffc00bd45000] pgd=, pud=
[ 1786.013368] Internal error: Oops: 9645 [#1] PREEMPT SMP
[ 1786.013609] Process swapper/0 (pid: 0, stack limit = 0x84b153c6)
[ 1786.013623] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.86 #137
[ 1786.013631] Hardware name: MediaTek krane sku176 board (DT)
[ 1786.013643] pstate: 8085 (Nzcv daIf -PAN -UAO)
[ 1786.013662] pc : __memcpy+0x94/0x180
[ 1786.013678] lr : swiotlb_tbl_unmap_single+0x84/0x150
[ 1786.013686] sp : ff8008003c60
[ 1786.013694] x29: ff8008003c90 x28: ffae96411f80
[ 1786.013708] x27: ffae960d2018 x26: ff8019a4b9a8
[ 1786.013721] x25:  x24: 0001
[ 1786.013734] x23: ffae96567000 x22: 51d4
[ 1786.013747] x21:  x20: fe6e9000
[ 1786.013760] x19: 0004 x18: 0020
[ 1786.013773] x17: 0001 x16: 
[ 1786.013787] x15:  x14: 44c0
[ 1786.013800] x13: 00365ba4 x12: 
[ 1786.013813] x11: 0001 x10: 0037be6e9000
[ 1786.013826] x9 : ffc94000 x8 : 0bd45000
[ 1786.013839] x7 :  x6 : ffc00bd45000
[ 1786.013852] x5 :  x4 : 
[ 1786.013865] x3 : 0c00 x2 : 0004
[ 1786.013878] x1 : fff7be6e9004 x0 : ffc00bd45000
[ 1786.013891] Call trace:
[ 1786.013903]  __memcpy+0x94/0x180
[ 1786.013914]  unmap_single+0x6c/0x84
[ 1786.013925]  swiotlb_unmap_sg_attrs+0x54/0x80
[ 1786.013938]  __swiotlb_unmap_sg_attrs+0x8c/0xa4
[ 1786.013952]  msdc_unprepare_data+0x6c/0x84
[ 1786.013963]  msdc_request_done+0x58/0x84
[ 1786.013974]  msdc_data_xfer_done+0x1a0/0x1c8
[ 1786.013985]  msdc_irq+0x12c/0x17c
[ 1786.013996]  __handle_irq_event_percpu+0xe4/0x250
[ 1786.014006]  handle_irq_event_percpu+0x28/0x68
[ 1786.014015]  handle_irq_event+0x48/0x78
[ 1786.014026]  handle_fasteoi_irq+0xd0/0x1a0
[ 1786.014039]  __handle_domain_irq+0x84/0xc4
[ 1786.014050]  gic_handle_irq+0x124/0x1a4
[ 1786.014059]  el1_irq+0xb0/0x128
[ 1786.014072]  cpuidle_enter_state+0x298/0x328
[ 1786.014082]  cpuidle_enter+0x30/0x40
[ 1786.014094]  do_idle+0x190/0x268
[ 1786.014104]  cpu_startup_entry+0x24/0x28
[ 1786.014116]  rest_init+0xd4/0xe0
[ 1786.014126]  start_kernel+0x30c/0x38c
[ 1786.014139] Code: f8408423 f80084c3 36100062 b8404423 (b80044c3)
[ 1786.014150] ---[ end trace 3b02ddb698ea69ee ]---
[ 1786.015415] Kernel panic - not syncing: Fatal exception in interrupt
[ 1786.015433] SMP: stopping secondary CPUs
[ 1786.015447] Kernel Offset: 0x2e8d20 from 0xff800800
[ 1786.015458] CPU features: 0x0,2188200c
[ 1786.015466] Memory Limit: none

For sdio chip, it need the memory which is kmalloc, if it is
vmalloc from ath10k_mem_value_read, then it have a memory error.
kzalloc of ath10k_sdio_hif_diag_read32 is the correct type, so
add kzalloc in ath10k_sdio_hif_diag_read to replace the buffer
which is vmalloc from ath10k_mem_value_read.

This patch only effect sdio chip.

Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00029.

Signed-off-by: Wen Gong 
Signed-off-by: Kalle Valo 
Signed-off-by: Sasha Levin 
---
 drivers/net/wireless/ath/ath10k/sdio.c | 18 ++
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/net/wireless/ath/ath10k/sdio.c 
b/drivers/net/wireless/ath/ath10k/sdio.c
index 9870d2d095c87..8fe626deadeb0 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1582,23 +1582,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, 
u32 address, void *buf,
 size_t buf_len)
 {
int ret;
+   void *mem;
+
+   mem = kzalloc(buf_len, GFP_KERNEL);
+   if (!mem)
+   return -ENOMEM;
 
/* set window register to start read cycle */
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
if (ret) {
ath10k_warn(ar, "failed to set mbox window read address: %d", 
ret);
-   return ret;
+   goto out;
}
 
/* read the data */
-   

[PATCH AUTOSEL 5.4 192/330] serial: 8250: 8250_omap: Terminate DMA before pushing data on RX timeout

2020-09-17 Thread Sasha Levin
From: Vignesh Raghavendra 

[ Upstream commit 7cf4df30a98175033e9849f7f16c46e96ba47f41 ]

Terminate and flush DMA internal buffers, before pushing RX data to
higher layer. Otherwise, this will lead to data corruption, as driver
would end up pushing stale buffer data to higher layer while actual data
is still stuck inside DMA hardware and has yet not arrived at the
memory.
While at that, replace deprecated dmaengine_terminate_all() with
dmaengine_terminate_async().

Signed-off-by: Vignesh Raghavendra 
Link: https://lore.kernel.org/r/20200319110344.21348-2-vigne...@ti.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/tty/serial/8250/8250_omap.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/tty/serial/8250/8250_omap.c 
b/drivers/tty/serial/8250/8250_omap.c
index 2624b5d083366..f2c6d9d3bb28f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -790,7 +790,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, );
 
count = dma->rx_size - state.residue;
-
+   if (count < dma->rx_size)
+   dmaengine_terminate_async(dma->rxchan);
+   if (!count)
+   goto unlock;
ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
 
p->port.icount.rx += ret;
@@ -852,7 +855,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
spin_unlock_irqrestore(>rx_dma_lock, flags);
 
__dma_rx_do_complete(p);
-   dmaengine_terminate_all(dma->rxchan);
 }
 
 static int omap_8250_rx_dma(struct uart_8250_port *p)
-- 
2.25.1



[PATCH AUTOSEL 5.4 160/330] drm/amd/display: Stop if retimer is not available

2020-09-17 Thread Sasha Levin
From: Rodrigo Siqueira 

[ Upstream commit a0e40018dcc3f59a10ca21d58f8ea8ceb1b035ac ]

Raven provides retimer feature support that requires i2c interaction in
order to make it work well, all settings required for this configuration
are loaded from the Atom bios which include the i2c address. If the
retimer feature is not available, we should abort the attempt to set
this feature, otherwise, it makes the following line return
I2C_CHANNEL_OPERATION_NO_RESPONSE:

 i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer));
 ...
 if (!i2c_success)
   ASSERT(i2c_success);

This ends up causing problems with hotplugging HDMI displays on Raven,
and causes retimer settings to warn like so:

WARNING: CPU: 1 PID: 429 at
drivers/gpu/drm/amd/amdgpu/../dal/dc/core/dc_link.c:1998
write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Modules linked in:
edac_mce_amd ccp kvm irqbypass binfmt_misc crct10dif_pclmul crc32_pclmul
ghash_clmulni_intel snd_hda_codec_realtek snd_hda_codec_generic
ledtrig_audio snd_hda_codec_hdmi snd_hda_intel amdgpu(+) snd_hda_codec
snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event
snd_rawmidi aesni_intel snd_seq amd_iommu_v2 gpu_sched aes_x86_64
crypto_simd cryptd glue_helper snd_seq_device ttm drm_kms_helper
snd_timer eeepc_wmi wmi_bmof asus_wmi sparse_keymap drm mxm_wmi snd
k10temp fb_sys_fops syscopyarea sysfillrect sysimgblt soundcore joydev
input_leds mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables
x_tables autofs4 igb i2c_algo_bit hid_generic usbhid i2c_piix4 dca ahci
hid libahci video wmi gpio_amdpt gpio_generic CPU: 1 PID: 429 Comm:
systemd-udevd Tainted: GW 5.2.0-rc1sept162019+ #1
Hardware name: System manufacturer System Product Name/ROG STRIX B450-F
GAMING, BIOS 2605 08/06/2019
RIP: 0010:write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu]
Code: ff 0f b6 4d ce 44 0f b6 45 cf 44 0f b6 c8 45 89 cf 44 89 e2 48 c7
c6 f0 34 bc c0 bf 04 00 00 00 e8 63 b0 90 ff 45 84 ff 75 02 <0f> 0b 42
0f b6 04 73 8d 50 f6 80 fa 02 77 8c 3c 0a 0f 85 c8 00 00 RSP:
0018:a99d02726fd0 EFLAGS: 00010246
RAX:  RBX: a99d02727035 RCX: 0006
RDX:  RSI: 0002 RDI: 976acc857440
RBP: a99d02727018 R08: 0002 R09: 0002a600
R10: e90610193680 R11: 05e3 R12: 005d
R13: 976ac4b201b8 R14: 0001 R15: 
FS:  7f14f99e1680() GS:976acc84() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 7fdf212843b8 CR3: 000408906000 CR4: 003406e0
Call Trace:
 core_link_enable_stream+0x626/0x680 [amdgpu]
 dce110_apply_ctx_to_hw+0x414/0x4e0 [amdgpu]
 dc_commit_state+0x331/0x5e0 [amdgpu]
 ? drm_calc_timestamping_constants+0xf9/0x150 [drm]
 amdgpu_dm_atomic_commit_tail+0x395/0x1e00 [amdgpu]
 ? dm_plane_helper_prepare_fb+0x20c/0x280 [amdgpu]
 commit_tail+0x42/0x70 [drm_kms_helper]
 drm_atomic_helper_commit+0x10c/0x120 [drm_kms_helper]
 amdgpu_dm_atomic_commit+0x95/0xa0 [amdgpu]
 drm_atomic_commit+0x4a/0x50 [drm]
 restore_fbdev_mode_atomic+0x1c0/0x1e0 [drm_kms_helper]
 restore_fbdev_mode+0x4c/0x160 [drm_kms_helper]
 ? _cond_resched+0x19/0x40
 drm_fb_helper_restore_fbdev_mode_unlocked+0x4e/0xa0 [drm_kms_helper]
 drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper]
 fbcon_init+0x471/0x630
 visual_init+0xd5/0x130
 do_bind_con_driver+0x20a/0x430
 do_take_over_console+0x7d/0x1b0
 do_fbcon_takeover+0x5c/0xb0
 fbcon_event_notify+0x6cd/0x8a0
 notifier_call_chain+0x4c/0x70
 blocking_notifier_call_chain+0x43/0x60
 fb_notifier_call_chain+0x1b/0x20
 register_framebuffer+0x254/0x360
 __drm_fb_helper_initial_config_and_unlock+0x2c5/0x510 [drm_kms_helper]
 drm_fb_helper_initial_config+0x35/0x40 [drm_kms_helper]
 amdgpu_fbdev_init+0xcd/0x100 [amdgpu]
 amdgpu_device_init+0x1156/0x1930 [amdgpu]
 amdgpu_driver_load_kms+0x8d/0x2e0 [amdgpu]
 drm_dev_register+0x12b/0x1c0 [drm]
 amdgpu_pci_probe+0xd3/0x160 [amdgpu]
 local_pci_probe+0x47/0xa0
 pci_device_probe+0x142/0x1b0
 really_probe+0xf5/0x3d0
 driver_probe_device+0x11b/0x130
 device_driver_attach+0x58/0x60
 __driver_attach+0xa3/0x140
 ? device_driver_attach+0x60/0x60
 ? device_driver_attach+0x60/0x60
 bus_for_each_dev+0x74/0xb0
 ? kmem_cache_alloc_trace+0x1a3/0x1c0
 driver_attach+0x1e/0x20
 bus_add_driver+0x147/0x220
 ? 0xc0cb9000
 driver_register+0x60/0x100
 ? 0xc0cb9000
 __pci_register_driver+0x5a/0x60
 amdgpu_init+0x74/0x83 [amdgpu]
 do_one_initcall+0x4a/0x1fa
 ? _cond_resched+0x19/0x40
 ? kmem_cache_alloc_trace+0x3f/0x1c0
 ? __vunmap+0x1cc/0x200
 do_init_module+0x5f/0x227
 load_module+0x2330/0x2b40
 __do_sys_finit_module+0xfc/0x120
 ? __do_sys_finit_module+0xfc/0x120
 __x64_sys_finit_module+0x1a/0x20
 do_syscall_64+0x5a/0x130
 entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7f14f9500839
Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89
f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
f0 ff ff 73 01 c3 48 8b 0d 1f 

[PATCH AUTOSEL 5.4 178/330] RDMA/cm: Remove a race freeing timewait_info

2020-09-17 Thread Sasha Levin
From: Jason Gunthorpe 

[ Upstream commit bede86a39d9dc3387ac00dcb8e1ac221676b2f25 ]

When creating a cm_id during REQ the id immediately becomes visible to the
other MAD handlers, and shortly after the state is moved to IB_CM_REQ_RCVD

This allows cm_rej_handler() to run concurrently and free the work:

CPU 0CPU1
 cm_req_handler()
  ib_create_cm_id()
  cm_match_req()
id_priv->state = IB_CM_REQ_RCVD
   cm_rej_handler()
 cm_acquire_id()
 spin_lock(_priv->lock)
 switch (id_priv->state)
   case IB_CM_REQ_RCVD:
cm_reset_to_idle()
 kfree(id_priv->timewait_info);
   goto destroy
  destroy:
kfree(id_priv->timewait_info);
 id_priv->timewait_info = NULL

Causing a double free or worse.

Do not free the timewait_info without also holding the
id_priv->lock. Simplify this entire flow by making the free unconditional
during cm_destroy_id() and removing the confusing special case error
unwind during creation of the timewait_info.

This also fixes a leak of the timewait if cm_destroy_id() is called in
IB_CM_ESTABLISHED with an XRC TGT QP. The state machine will be left in
ESTABLISHED while it needed to transition through IB_CM_TIMEWAIT to
release the timewait pointer.

Also fix a leak of the timewait_info if the caller mis-uses the API and
does ib_send_cm_reqs().

Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation")
Link: https://lore.kernel.org/r/20200310092545.251365-4-l...@kernel.org
Signed-off-by: Leon Romanovsky 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Sasha Levin 
---
 drivers/infiniband/core/cm.c | 25 +++--
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 09af96ec41dd6..c1d6a068f50fe 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1092,14 +1092,22 @@ retest:
break;
}
 
-   spin_lock_irq();
+   spin_lock_irq(_id_priv->lock);
+   spin_lock();
+   /* Required for cleanup paths related cm_req_handler() */
+   if (cm_id_priv->timewait_info) {
+   cm_cleanup_timewait(cm_id_priv->timewait_info);
+   kfree(cm_id_priv->timewait_info);
+   cm_id_priv->timewait_info = NULL;
+   }
if (!list_empty(_id_priv->altr_list) &&
(!cm_id_priv->altr_send_port_not_ready))
list_del(_id_priv->altr_list);
if (!list_empty(_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready))
list_del(_id_priv->prim_list);
-   spin_unlock_irq();
+   spin_unlock();
+   spin_unlock_irq(_id_priv->lock);
 
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
@@ -1416,7 +1424,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(_id_priv->lock, flags);
-   if (cm_id->state != IB_CM_IDLE) {
+   if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(_id_priv->lock, flags);
ret = -EINVAL;
goto out;
@@ -1434,12 +1442,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
 param->ppath_sgid_attr, _id_priv->av,
 cm_id_priv);
if (ret)
-   goto error1;
+   goto out;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path, NULL,
 _id_priv->alt_av, cm_id_priv);
if (ret)
-   goto error1;
+   goto out;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
@@ -1457,7 +1465,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
 
ret = cm_alloc_msg(cm_id_priv, _id_priv->msg);
if (ret)
-   goto error1;
+   goto out;
 
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
@@ -1480,7 +1488,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
return 0;
 
 error2:cm_free_msg(cm_id_priv->msg);
-error1:kfree(cm_id_priv->timewait_info);
 out:   return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_req);
@@ -1965,7 +1972,7 @@ static int cm_req_handler(struct cm_work *work)
pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
 be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
-   goto free_timeinfo;

[PATCH AUTOSEL 5.4 188/330] btrfs: free the reloc_control in a consistent way

2020-09-17 Thread Sasha Levin
From: Josef Bacik 

[ Upstream commit 1a0afa0ecfc4dbc8d7583d03cafd3f68f781df0c ]

If we have an error while processing the reloc roots we could leak roots
that were added to rc->reloc_roots before we hit the error.  We could
have also not removed the reloc tree mapping from our rb_tree, so clean
up any remaining nodes in the reloc root rb_tree.

Signed-off-by: Josef Bacik 
Reviewed-by: David Sterba 
[ use rbtree_postorder_for_each_entry_safe ]
Signed-off-by: David Sterba 
Signed-off-by: Sasha Levin 
---
 fs/btrfs/relocation.c | 16 ++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 1313506a7ecb5..ece53d2f55ae3 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4354,6 +4354,18 @@ static struct reloc_control *alloc_reloc_control(struct 
btrfs_fs_info *fs_info)
return rc;
 }
 
+static void free_reloc_control(struct reloc_control *rc)
+{
+   struct mapping_node *node, *tmp;
+
+   free_reloc_roots(>reloc_roots);
+   rbtree_postorder_for_each_entry_safe(node, tmp,
+   >reloc_root_tree.rb_root, rb_node)
+   kfree(node);
+
+   kfree(rc);
+}
+
 /*
  * Print the block group being relocated
  */
@@ -4486,7 +4498,7 @@ out:
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
-   kfree(rc);
+   free_reloc_control(rc);
return err;
 }
 
@@ -4659,7 +4671,7 @@ out_clean:
err = ret;
 out_unset:
unset_reloc_control(rc);
-   kfree(rc);
+   free_reloc_control(rc);
 out:
if (!list_empty(_roots))
free_reloc_roots(_roots);
-- 
2.25.1



[PATCH AUTOSEL 5.4 191/330] serial: 8250_omap: Fix sleeping function called from invalid context during probe

2020-09-17 Thread Sasha Levin
From: Peter Ujfalusi 

[ Upstream commit 4ce35a3617c0ac758c61122b2218b6c8c9ac9398 ]

When booting j721e the following bug is printed:

[1.154821] BUG: sleeping function called from invalid context at 
kernel/sched/completion.c:99
[1.154827] in_atomic(): 0, irqs_disabled(): 128, non_block: 0, pid: 12, 
name: kworker/0:1
[1.154832] 3 locks held by kworker/0:1/12:
[1.154836]  #0: 000840030728 ((wq_completion)events){+.+.}, at: 
process_one_work+0x1d4/0x6e8
[1.154852]  #1: 80001214fdd8 (deferred_probe_work){+.+.}, at: 
process_one_work+0x1d4/0x6e8
[1.154860]  #2: 00084060b170 (>mutex){}, at: 
__device_attach+0x38/0x138
[1.154872] irq event stamp: 63096
[1.154881] hardirqs last  enabled at (63095): [] 
_raw_spin_unlock_irqrestore+0x70/0x78
[1.154887] hardirqs last disabled at (63096): [] 
_raw_spin_lock_irqsave+0x28/0x80
[1.154893] softirqs last  enabled at (62254): [] 
_stext+0x488/0x564
[1.154899] softirqs last disabled at (62247): [] 
irq_exit+0x114/0x140
[1.154906] CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 
5.6.0-rc6-next-20200318-00094-g45e4089b0bd3 #221
[1.154911] Hardware name: Texas Instruments K3 J721E SoC (DT)
[1.154917] Workqueue: events deferred_probe_work_func
[1.154923] Call trace:
[1.154928]  dump_backtrace+0x0/0x190
[1.154933]  show_stack+0x14/0x20
[1.154940]  dump_stack+0xe0/0x148
[1.154946]  ___might_sleep+0x150/0x1f0
[1.154952]  __might_sleep+0x4c/0x80
[1.154957]  wait_for_completion_timeout+0x40/0x140
[1.154964]  ti_sci_set_device_state+0xa0/0x158
[1.154969]  ti_sci_cmd_get_device_exclusive+0x14/0x20
[1.154977]  ti_sci_dev_start+0x34/0x50
[1.154984]  genpd_runtime_resume+0x78/0x1f8
[1.154991]  __rpm_callback+0x3c/0x140
[1.154996]  rpm_callback+0x20/0x80
[1.155001]  rpm_resume+0x568/0x758
[1.155007]  __pm_runtime_resume+0x44/0xb0
[1.155013]  omap8250_probe+0x2b4/0x508
[1.155019]  platform_drv_probe+0x50/0xa0
[1.155023]  really_probe+0xd4/0x318
[1.155028]  driver_probe_device+0x54/0xe8
[1.155033]  __device_attach_driver+0x80/0xb8
[1.155039]  bus_for_each_drv+0x74/0xc0
[1.155044]  __device_attach+0xdc/0x138
[1.155049]  device_initial_probe+0x10/0x18
[1.155053]  bus_probe_device+0x98/0xa0
[1.155058]  deferred_probe_work_func+0x74/0xb0
[1.155063]  process_one_work+0x280/0x6e8
[1.155068]  worker_thread+0x48/0x430
[1.155073]  kthread+0x108/0x138
[1.155079]  ret_from_fork+0x10/0x18

To fix the bug we need to first call pm_runtime_enable() prior to any
pm_runtime calls.

Reported-by: Tomi Valkeinen 
Signed-off-by: Peter Ujfalusi 
Link: https://lore.kernel.org/r/20200320125200.6772-1-peter.ujfal...@ti.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/tty/serial/8250/8250_omap.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/tty/serial/8250/8250_omap.c 
b/drivers/tty/serial/8250/8250_omap.c
index 836e736ae188b..2624b5d083366 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1234,6 +1234,7 @@ static int omap8250_probe(struct platform_device *pdev)
spin_lock_init(>rx_dma_lock);
 
device_init_wakeup(>dev, true);
+   pm_runtime_enable(>dev);
pm_runtime_use_autosuspend(>dev);
 
/*
@@ -1247,7 +1248,6 @@ static int omap8250_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(>dev, -1);
 
pm_runtime_irq_safe(>dev);
-   pm_runtime_enable(>dev);
 
pm_runtime_get_sync(>dev);
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 162/330] cpu-topology: Fix the potential data corruption

2020-09-17 Thread Sasha Levin
From: Zeng Tao 

[ Upstream commit 4a33691c4cea9eb0a7c66e87248be4637e14b180 ]

Currently there are only 10 bytes to store the cpu-topology 'name'
information. Only 10 bytes copied into cluster/thread/core names.

If the cluster ID exceeds 2-digit number, it will result in the data
corruption, and ending up in a dead loop in the parsing routines. The
same applies to the thread names with more that 3-digit number.

This issue was found using the boundary tests under virtualised
environment like QEMU.

Let us increase the buffer to fix such potential issues.

Reviewed-by: Sudeep Holla 
Signed-off-by: Zeng Tao 

Link: 
https://lore.kernel.org/r/1583294092-5929-1-git-send-email-prime.z...@hisilicon.com
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
---
 drivers/base/arch_topology.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1eb81f113786f..83e26fd188cc9 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -270,7 +270,7 @@ static int __init get_cpu_for_node(struct device_node *node)
 static int __init parse_core(struct device_node *core, int package_id,
 int core_id)
 {
-   char name[10];
+   char name[20];
bool leaf = true;
int i = 0;
int cpu;
@@ -317,7 +317,7 @@ static int __init parse_core(struct device_node *core, int 
package_id,
 
 static int __init parse_cluster(struct device_node *cluster, int depth)
 {
-   char name[10];
+   char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
-- 
2.25.1



[PATCH AUTOSEL 5.4 151/330] Bluetooth: guard against controllers sending zero'd events

2020-09-17 Thread Sasha Levin
From: Alain Michaud 

[ Upstream commit 08bb4da90150e2a225f35e0f642cdc463958d696 ]

Some controllers have been observed to send zero'd events under some
conditions.  This change guards against this condition as well as adding
a trace to facilitate diagnosability of this condition.

Signed-off-by: Alain Michaud 
Signed-off-by: Marcel Holtmann 
Signed-off-by: Sasha Levin 
---
 net/bluetooth/hci_event.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7bf6860fed783..1bbeb14b8b64e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5853,6 +5853,11 @@ void hci_event_packet(struct hci_dev *hdev, struct 
sk_buff *skb)
u8 status = 0, event = hdr->evt, req_evt = 0;
u16 opcode = HCI_OP_NOP;
 
+   if (!event) {
+   bt_dev_warn(hdev, "Received unexpected HCI Event ");
+   goto done;
+   }
+
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
opcode = __le16_to_cpu(cmd_hdr->opcode);
@@ -6064,6 +6069,7 @@ void hci_event_packet(struct hci_dev *hdev, struct 
sk_buff *skb)
req_complete_skb(hdev, status, opcode, orig_skb);
}
 
+done:
kfree_skb(orig_skb);
kfree_skb(skb);
hdev->stat.evt_rx++;
-- 
2.25.1



[PATCH AUTOSEL 5.4 109/330] selftests/ftrace: fix glob selftest

2020-09-17 Thread Sasha Levin
From: Sven Schnelle 

[ Upstream commit af4ddd607dff7aabd466a4a878e01b9f592a75ab ]

test.d/ftrace/func-filter-glob.tc is failing on s390 because it has
ARCH_INLINE_SPIN_LOCK and friends set to 'y'. So the usual
__raw_spin_lock symbol isn't in the ftrace function list. Change
'*aw*lock' to '*spin*lock' which would hopefully match some of the
locking functions on all platforms.

Reviewed-by: Steven Rostedt (VMware) 
Signed-off-by: Sven Schnelle 
Signed-off-by: Shuah Khan 
Signed-off-by: Sasha Levin 
---
 .../testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc 
b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 27a54a17da65d..f4e92afab14b2 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
 ftrace_filter_check 'schedule*' '^schedule.*$'
 
 # filter by *mid*end
-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+ftrace_filter_check '*pin*lock' '.*pin.*lock$'
 
 # filter by start*mid*
 ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
-- 
2.25.1



[PATCH AUTOSEL 5.4 126/330] locking/lockdep: Decrement IRQ context counters when removing lock chain

2020-09-17 Thread Sasha Levin
From: Waiman Long 

[ Upstream commit b3b9c187dc2544923a601733a85352b9ddaba9b3 ]

There are currently three counters to track the IRQ context of a lock
chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains.
They are incremented when a new lock chain is added, but they are
not decremented when a lock chain is removed. That causes some of the
statistic counts reported by /proc/lockdep_stats to be incorrect.
IRQ
Fix that by decrementing the right counter when a lock chain is removed.

Since inc_chains() no longer accesses hardirq_context and softirq_context
directly, it is moved out from the CONFIG_TRACE_IRQFLAGS conditional
compilation block.

Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in 
use")
Signed-off-by: Waiman Long 
Signed-off-by: Peter Zijlstra (Intel) 
Signed-off-by: Ingo Molnar 
Link: https://lkml.kernel.org/r/20200206152408.24165-2-long...@redhat.com
Signed-off-by: Sasha Levin 
---
 kernel/locking/lockdep.c   | 40 +-
 kernel/locking/lockdep_internals.h |  6 +
 2 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9ab1a965c3b92..bca0f7f71cde4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2302,18 +2302,6 @@ static int check_irq_usage(struct task_struct *curr, 
struct held_lock *prev,
return 0;
 }
 
-static void inc_chains(void)
-{
-   if (current->hardirq_context)
-   nr_hardirq_chains++;
-   else {
-   if (current->softirq_context)
-   nr_softirq_chains++;
-   else
-   nr_process_chains++;
-   }
-}
-
 #else
 
 static inline int check_irq_usage(struct task_struct *curr,
@@ -2321,13 +2309,27 @@ static inline int check_irq_usage(struct task_struct 
*curr,
 {
return 1;
 }
+#endif /* CONFIG_TRACE_IRQFLAGS */
 
-static inline void inc_chains(void)
+static void inc_chains(int irq_context)
 {
-   nr_process_chains++;
+   if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+   nr_hardirq_chains++;
+   else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+   nr_softirq_chains++;
+   else
+   nr_process_chains++;
 }
 
-#endif /* CONFIG_TRACE_IRQFLAGS */
+static void dec_chains(int irq_context)
+{
+   if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+   nr_hardirq_chains--;
+   else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+   nr_softirq_chains--;
+   else
+   nr_process_chains--;
+}
 
 static void
 print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
@@ -2847,7 +2849,7 @@ static inline int add_chain_cache(struct task_struct 
*curr,
 
hlist_add_head_rcu(>entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
-   inc_chains();
+   inc_chains(chain->irq_context);
 
return 1;
 }
@@ -3600,7 +3602,8 @@ lock_used:
 
 static inline unsigned int task_irq_context(struct task_struct *task)
 {
-   return 2 * !!task->hardirq_context + !!task->softirq_context;
+   return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
+  LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
 }
 
 static int separate_irq_context(struct task_struct *curr,
@@ -4805,6 +4808,8 @@ recalc:
return;
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, chain_key);
+   dec_chains(chain->irq_context);
+
/*
 * Note: calling hlist_del_rcu() from inside a
 * hlist_for_each_entry_rcu() loop is safe.
@@ -4826,6 +4831,7 @@ recalc:
}
*new_chain = *chain;
hlist_add_head_rcu(_chain->entry, chainhashentry(chain_key));
+   inc_chains(new_chain->irq_context);
 #endif
 }
 
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index 18d85aebbb57f..a525368b8cf61 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
 #define STACK_TRACE_HASH_SIZE  16384
 #endif
 
+/*
+ * Bit definitions for lock_chain.irq_context
+ */
+#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
+#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
+
 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
-- 
2.25.1



[PATCH AUTOSEL 5.4 139/330] bpf: Remove recursion prevention from rcu free callback

2020-09-17 Thread Sasha Levin
From: Thomas Gleixner 

[ Upstream commit 8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 ]

If an element is freed via RCU then recursion into BPF instrumentation
functions is not a concern. The element is already detached from the map
and the RCU callback does not hold any locks on which a kprobe, perf event
or tracepoint attached BPF program could deadlock.

Signed-off-by: Thomas Gleixner 
Signed-off-by: Alexei Starovoitov 
Link: https://lore.kernel.org/bpf/20200224145643.259118...@linutronix.de
Signed-off-by: Sasha Levin 
---
 kernel/bpf/hashtab.c | 8 
 1 file changed, 8 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 039d64b1bfb7d..728ffec52cf36 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -664,15 +664,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
 
-   /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
-* we're calling kfree, otherwise deadlock is possible if kprobes
-* are placed somewhere inside of slub
-*/
-   preempt_disable();
-   __this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
-   __this_cpu_dec(bpf_prog_active);
-   preempt_enable();
 }
 
 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
-- 
2.25.1



[PATCH AUTOSEL 5.4 149/330] ASoC: SOF: ipc: check ipc return value before data copy

2020-09-17 Thread Sasha Levin
From: Jaska Uimonen 

[ Upstream commit 1919b42ca4ad75a2397081164661af3ce5a7b8f4 ]

In tx_wait_done the ipc payload is copied before the DSP transaction
error code is checked. This might lead to corrupted data in kernel side
even though the error would be handled later. It is also pointless to
copy the data in case of error. So change the order of error check and
copy.

Signed-off-by: Pierre-Louis Bossart 
Signed-off-by: Jaska Uimonen 
Link: 
https://lore.kernel.org/r/20200228231850.9226-3-pierre-louis.boss...@linux.intel.com
Signed-off-by: Mark Brown 
Signed-off-by: Sasha Levin 
---
 sound/soc/sof/ipc.c | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index e7b1a80e2a14c..f38f651da2246 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -215,15 +215,17 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct 
snd_sof_ipc_msg *msg,
snd_sof_trace_notify_for_error(ipc->sdev);
ret = -ETIMEDOUT;
} else {
-   /* copy the data returned from DSP */
ret = msg->reply_error;
-   if (msg->reply_size)
-   memcpy(reply_data, msg->reply_data, msg->reply_size);
-   if (ret < 0)
+   if (ret < 0) {
dev_err(sdev->dev, "error: ipc error for 0x%x size 
%zu\n",
hdr->cmd, msg->reply_size);
-   else
+   } else {
ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+   if (msg->reply_size)
+   /* copy the data returned from DSP */
+   memcpy(reply_data, msg->reply_data,
+  msg->reply_size);
+   }
}
 
return ret;
-- 
2.25.1



[PATCH AUTOSEL 5.4 141/330] dmaengine: tegra-apb: Prevent race conditions on channel's freeing

2020-09-17 Thread Sasha Levin
From: Dmitry Osipenko 

[ Upstream commit 8e84172e372bdca20c305d92d51d33640d2da431 ]

It's incorrect to check the channel's "busy" state without taking a lock.
That shouldn't cause any real troubles, nevertheless it's always better
not to have any race conditions in the code.

Signed-off-by: Dmitry Osipenko 
Acked-by: Jon Hunter 
Link: https://lore.kernel.org/r/20200209163356.6439-5-dig...@gmail.com
Signed-off-by: Vinod Koul 
Signed-off-by: Sasha Levin 
---
 drivers/dma/tegra20-apb-dma.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 4a750e29bfb53..3fe27dbde5b2b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan 
*dc)
 
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
 
-   if (tdc->busy)
-   tegra_dma_terminate_all(dc);
+   tegra_dma_terminate_all(dc);
 
spin_lock_irqsave(>lock, flags);
list_splice_init(>pending_sg_req, _req_list);
-- 
2.25.1



[PATCH AUTOSEL 5.4 119/330] drm/amd/display: fix workaround for incorrect double buffer register for DLG ADL and TTU

2020-09-17 Thread Sasha Levin
From: Tony Cheng 

[ Upstream commit 85e148fb963d27152a14e6d399a47aed9bc99c15 ]

[Why]
these registers should have been double buffered. SW workaround we will have SW 
program the more aggressive (lower) values
whenever we are upating this register, so we will not have underflow at expense 
of less optimzal request pattern.

[How]
there is a driver bug where we don't check for 0, which is uninitialzed HW 
default.  since 0 is smaller than any value we need to program,
driver end up with not programming these registers

Signed-off-by: Tony Cheng 
Reviewed-by: Yongqiang Sun 
Acked-by: Bhawanpreet Lakha 
Signed-off-by: Alex Deucher 
Signed-off-by: Sasha Levin 
---
 .../gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 35 +--
 1 file changed, 25 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index a00af513aa2b0..c8f77bd0ce8a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -73,32 +73,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
 {
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
-   uint32_t cur_value;
+   uint32_t refcyc_per_vm_group_vblank;
+   uint32_t refcyc_per_vm_req_vblank;
+   uint32_t refcyc_per_vm_group_flip;
+   uint32_t refcyc_per_vm_req_flip;
+   const uint32_t uninitialized_hw_default = 0;
 
-   REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, _value);
-   if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+   REG_GET(VBLANK_PARAMETERS_5,
+   REFCYC_PER_VM_GROUP_VBLANK, 
_per_vm_group_vblank);
+
+   if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+   refcyc_per_vm_group_vblank > 
dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, 
dlg_attr->refcyc_per_vm_group_vblank);
 
REG_GET(VBLANK_PARAMETERS_6,
-   REFCYC_PER_VM_REQ_VBLANK,
-   _value);
-   if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+   REFCYC_PER_VM_REQ_VBLANK, _per_vm_req_vblank);
+
+   if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+   refcyc_per_vm_req_vblank > 
dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, 
dlg_attr->refcyc_per_vm_req_vblank);
 
-   REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, _value);
-   if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+   REG_GET(FLIP_PARAMETERS_3,
+   REFCYC_PER_VM_GROUP_FLIP, _per_vm_group_flip);
+
+   if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+   refcyc_per_vm_group_flip > 
dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, 
dlg_attr->refcyc_per_vm_group_flip);
 
-   REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, _value);
-   if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+   REG_GET(FLIP_PARAMETERS_4,
+   REFCYC_PER_VM_REQ_FLIP, _per_vm_req_flip);
+
+   if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+   refcyc_per_vm_req_flip > 
dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, 
dlg_attr->refcyc_per_vm_req_flip);
 
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, 
dlg_attr->refcyc_per_pte_group_flip_c);
+
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, 
dlg_attr->refcyc_per_meta_chunk_flip_c);
 }
-- 
2.25.1



[PATCH AUTOSEL 5.4 150/330] media: go7007: Fix URB type for interrupt handling

2020-09-17 Thread Sasha Levin
From: Takashi Iwai 

[ Upstream commit a3ea410cac41b19a5490aad7fe6d9a9a772e646e ]

Josef reported that his old-and-good Plextor ConvertX M402U video
converter spews lots of WARNINGs on the recent kernels, and it turned
out that the device uses a bulk endpoint for interrupt handling just
like 2250 board.

For fixing it, generalize the check with the proper verification of
the endpoint instead of hard-coded board type check.

Fixes: 7e5219d18e93 ("[media] go7007: Fix 2250 urb type")
Reported-and-tested-by: Josef Möllers 
BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1162583
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206427

Signed-off-by: Takashi Iwai 
Signed-off-by: Hans Verkuil 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
---
 drivers/media/usb/go7007/go7007-usb.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/media/usb/go7007/go7007-usb.c 
b/drivers/media/usb/go7007/go7007-usb.c
index ff2aa057c1fbc..f889c9d740cd1 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
struct go7007_usb *usb;
const struct go7007_usb_board *board;
struct usb_device *usbdev = interface_to_usbdev(intf);
+   struct usb_host_endpoint *ep;
unsigned num_i2c_devs;
char *name;
int video_pipe, i, v_urb_len;
@@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
 
-   if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
+   ep = usb->usbdev->ep_in[4];
+   if (usb_endpoint_type(>desc) == USB_ENDPOINT_XFER_BULK)
usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
-- 
2.25.1



[PATCH AUTOSEL 5.4 125/330] drm/omap: fix possible object reference leak

2020-09-17 Thread Sasha Levin
From: Wen Yang 

[ Upstream commit 47340e46f34a3b1d80e40b43ae3d7a8da34a3541 ]

The call to of_find_matching_node returns a node pointer with refcount
incremented thus it must be explicitly decremented after the last
usage.

Detected by coccinelle with the following warnings:
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:212:2-8: ERROR: missing 
of_node_put; acquired a node pointer with refcount incremented on line 209, but 
without a corresponding object release within this function.
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:237:1-7: ERROR: missing 
of_node_put; acquired a node pointer with refcount incremented on line 209, but 
without a corresponding object release within this function.

Signed-off-by: Wen Yang 
Reviewed-by: Laurent Pinchart 
Reviewed-by: Mukesh Ojha 
Cc: Tomi Valkeinen 
Cc: David Airlie 
Cc: Daniel Vetter 
Cc: Sebastian Reichel 
Cc: Laurent Pinchart 
Cc: dri-de...@lists.freedesktop.org
Cc: linux-kernel@vger.kernel.org
Cc: Markus Elfring 
Signed-off-by: Tomi Valkeinen 
Link: 
https://patchwork.freedesktop.org/patch/msgid/1554692313-28882-2-git-send-email-wen.yan...@zte.com.cn
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c 
b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 31502857f013d..ce67891eedd46 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
 
if (dss == NULL || !of_device_is_available(dss))
-   return 0;
+   goto put_node;
 
omapdss_walk_device(dss, true);
 
@@ -217,6 +217,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
 
+put_node:
+   of_node_put(dss);
return 0;
 }
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 124/330] scsi: lpfc: Fix coverity errors in fmdi attribute handling

2020-09-17 Thread Sasha Levin
From: James Smart 

[ Upstream commit 4cb9e1ddaa145be9ed67b6a7de98ca705a43f998 ]

Coverity reported a memory corruption error for the fdmi attributes
routines:

  CID 15768 [Memory Corruption] Out-of-bounds access on FDMI

Sloppy coding of the fmdi structures. In both the lpfc_fdmi_attr_def and
lpfc_fdmi_reg_port_list structures, a field was placed at the start of
payload that may have variable content. The field was given an arbitrary
type (uint32_t). The code then uses the field name to derive an address,
which it used in things such as memset and memcpy. The memset sizes or
memcpy lengths were larger than the arbitrary type, thus coverity reported
an error.

Fix by replacing the arbitrary fields with the real field structures
describing the payload.

Link: https://lore.kernel.org/r/20200128002312.16346-8-jsmart2...@gmail.com
Signed-off-by: Dick Kennedy 
Signed-off-by: James Smart 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
---
 drivers/scsi/lpfc/lpfc_ct.c | 137 ++--
 drivers/scsi/lpfc/lpfc_hw.h |  36 +-
 2 files changed, 85 insertions(+), 88 deletions(-)

diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4a09f21cb235f..e672fa9e842c9 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2056,8 +2056,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct 
lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, sizeof(struct lpfc_name));
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
memcpy(>un.AttrWWN, >fc_sparam.nodeName,
   sizeof(struct lpfc_name));
@@ -2073,8 +2073,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
/* This string MUST be consistent with other FC platforms
 * supported by Broadcom.
@@ -2098,8 +2098,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct 
lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->SerialNumber,
sizeof(ae->un.AttrString));
@@ -2120,8 +2120,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2141,8 +2141,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, phba->ModelDesc,
sizeof(ae->un.AttrString));
@@ -2164,8 +2164,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t i, j, incr, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
@@ -2194,8 +2194,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
strncpy(ae->un.AttrString, lpfc_release_version,
sizeof(ae->un.AttrString));
@@ -2216,8 +2216,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
@@ -2241,8 +2241,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
 
-   ae = (struct lpfc_fdmi_attr_entry *)>AttrValue;
-   memset(ae, 0, 256);
+   ae = >AttrValue;
+   memset(ae, 0, sizeof(*ae));
 
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -2261,8 +2261,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport 

[PATCH AUTOSEL 5.4 120/330] audit: CONFIG_CHANGE don't log internal bookkeeping as an event

2020-09-17 Thread Sasha Levin
From: Steve Grubb 

[ Upstream commit 70b3eeed49e8190d97139806f6fbaf8964306cdb ]

Common Criteria calls out for any action that modifies the audit trail to
be recorded. That usually is interpreted to mean insertion or removal of
rules. It is not required to log modification of the inode information
since the watch is still in effect. Additionally, if the rule is a never
rule and the underlying file is one they do not want events for, they
get an event for this bookkeeping update against their wishes.

Since no device/inode info is logged at insertion and no device/inode
information is logged on update, there is nothing meaningful being
communicated to the admin by the CONFIG_CHANGE updated_rules event. One
can assume that the rule was not "modified" because it is still watching
the intended target. If the device or inode cannot be resolved, then
audit_panic is called which is sufficient.

The correct resolution is to drop logging config_update events since
the watch is still in effect but just on another unknown inode.

Signed-off-by: Steve Grubb 
Signed-off-by: Paul Moore 
Signed-off-by: Sasha Levin 
---
 kernel/audit_watch.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 4508d5e0cf696..8a8fd732ff6d0 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -302,8 +302,6 @@ static void audit_update_watch(struct audit_parent *parent,
if (oentry->rule.exe)
audit_remove_mark(oentry->rule.exe);
 
-   audit_watch_log_rule_change(r, owatch, "updated_rules");
-
call_rcu(>rcu, audit_free_rule_rcu);
}
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 136/330] dmaengine: stm32-mdma: use vchan_terminate_vdesc() in .terminate_all

2020-09-17 Thread Sasha Levin
From: Amelie Delaunay 

[ Upstream commit dfc708812a2acfc0ca56f56233b3c3e7b0d4ffe7 ]

To avoid race with vchan_complete, use the race free way to terminate
running transfer.

Move vdesc->node list_del in stm32_mdma_start_transfer instead of in
stm32_mdma_xfer_end to avoid another race in vchan_dma_desc_free_list.

Signed-off-by: Amelie Delaunay 
Link: https://lore.kernel.org/r/20200127085334.13163-7-amelie.delau...@st.com
Signed-off-by: Vinod Koul 
Signed-off-by: Sasha Levin 
---
 drivers/dma/stm32-mdma.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 5838311cf9900..ee1cbf3be75d5 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct 
stm32_mdma_chan *chan)
return;
}
 
+   list_del(>node);
+
chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0;
@@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head);
 
spin_lock_irqsave(>vchan.lock, flags);
-   if (chan->busy) {
-   stm32_mdma_stop(chan);
+   if (chan->desc) {
+   vchan_terminate_vdesc(>desc->vdesc);
+   if (chan->busy)
+   stm32_mdma_stop(chan);
chan->desc = NULL;
}
vchan_get_all_descriptors(>vchan, );
@@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct 
dma_chan *c,
 
 static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
 {
-   list_del(>desc->vdesc.node);
vchan_cookie_complete(>desc->vdesc);
chan->desc = NULL;
chan->busy = false;
-- 
2.25.1



[PATCH AUTOSEL 5.4 105/330] ext4: make dioread_nolock the default

2020-09-17 Thread Sasha Levin
From: Theodore Ts'o 

[ Upstream commit 244adf6426ee31a83f397b700d964cff12a247d3 ]

This fixes the direct I/O versus writeback race which can reveal stale
data, and it improves the tail latency of commits on slow devices.

Link: https://lore.kernel.org/r/20200125022254.1101588-1-ty...@mit.edu
Signed-off-by: Theodore Ts'o 
Signed-off-by: Sasha Levin 
---
 fs/ext4/super.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4aae7e3e89a12..c32b8161ad3e9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1546,6 +1546,7 @@ static const match_table_t tokens = {
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
{Opt_dioread_nolock, "dioread_nolock"},
+   {Opt_dioread_lock, "nodioread_nolock"},
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
@@ -3750,6 +3751,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
set_opt(sb, XATTR_USER);
+   set_opt(sb, DIOREAD_NOLOCK);
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
 #endif
@@ -3927,9 +3929,8 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
 #endif
 
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
-   printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
-   "with data=journal disables delayed "
-   "allocation and O_DIRECT support!\n");
+   printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with 
data=journal disables delayed allocation, dioread_nolock, and O_DIRECT 
support!\n");
+   clear_opt(sb, DIOREAD_NOLOCK);
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
 "both data=journal and delalloc");
-- 
2.25.1



[PATCH AUTOSEL 5.4 099/330] s390/cpum_sf: Use kzalloc and minor changes

2020-09-17 Thread Sasha Levin
From: Thomas Richter 

[ Upstream commit 32dab6828c42f087439d3e2617dc7283546bd8f7 ]

Use kzalloc() to allocate auxiliary buffer structure initialized
with all zeroes to avoid random value in trace output.

Avoid double access to SBD hardware flags.

Signed-off-by: Thomas Richter 
Signed-off-by: Vasily Gorbik 
Signed-off-by: Sasha Levin 
---
 arch/s390/kernel/perf_cpum_sf.c | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 229e1e2f8253a..996e447ead3a6 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle 
*handle,
idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
-   te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
-   te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
+   te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
+  SDB_TE_ALERT_REQ_MASK);
te->overflow = 0;
}
/* Save the position of empty SDBs */
@@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, 
unsigned long alert_index,
te = aux_sdb_trailer(aux, alert_index);
do {
orig_flags = te->flags;
-   orig_overflow = te->overflow;
-   *overflow = orig_overflow;
+   *overflow = orig_overflow = te->overflow;
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
/*
 * SDB is already set by hardware.
@@ -1712,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, 
void **pages,
}
 
/* Allocate aux_buffer struct for the event */
-   aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+   aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
if (!aux)
goto no_aux;
sfb = >sfb;
-- 
2.25.1



[PATCH AUTOSEL 5.4 101/330] powerpc/powernv/ioda: Fix ref count for devices with their own PE

2020-09-17 Thread Sasha Levin
From: Frederic Barrat 

[ Upstream commit 05dd7da76986937fb288b4213b1fa10dbe0d1b33 ]

The pci_dn structure used to store a pointer to the struct pci_dev, so
taking a reference on the device was required. However, the pci_dev
pointer was later removed from the pci_dn structure, but the reference
was kept for the npu device.
See commit 902bdc57451c ("powerpc/powernv/idoa: Remove unnecessary
pcidev from pci_dn").

We don't need to take a reference on the device when assigning the PE
as the struct pnv_ioda_pe is cleaned up at the same time as
the (physical) device is released. Doing so prevents the device from
being released, which is a problem for opencapi devices, since we want
to be able to remove them through PCI hotplug.

Now the ugly part: nvlink npu devices are not meant to be
released. Because of the above, we've always leaked a reference and
simply removing it now is dangerous and would likely require more
work. There's currently no release device callback for nvlink devices
for example. So to be safe, this patch leaks a reference on the npu
device, but only for nvlink and not opencapi.

Signed-off-by: Frederic Barrat 
Reviewed-by: Andrew Donnellan 
Signed-off-by: Michael Ellerman 
Link: https://lore.kernel.org/r/20191121134918.7155-2-fbar...@linux.ibm.com
Signed-off-by: Sasha Levin 
---
 arch/powerpc/platforms/powernv/pci-ioda.c | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
b/arch/powerpc/platforms/powernv/pci-ioda.c
index 058223233088e..e9cda7e316a50 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1062,14 +1062,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct 
pci_dev *dev)
return NULL;
}
 
-   /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
-* pointer in the PE data structure, both should be destroyed at the
-* same time. However, this needs to be looked at more closely again
-* once we actually start removing things (Hotplug, SR-IOV, ...)
+   /* NOTE: We don't get a reference for the pointer in the PE
+* data structure, both the device and PE structures should be
+* destroyed at the same time. However, removing nvlink
+* devices will need some work.
 *
 * At some point we want to remove the PDN completely anyways
 */
-   pci_dev_get(dev);
pdn->pe_number = pe->pe_number;
pe->flags = PNV_IODA_PE_DEV;
pe->pdev = dev;
@@ -1084,7 +1083,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct 
pci_dev *dev)
pnv_ioda_free_pe(pe);
pdn->pe_number = IODA_INVALID_PE;
pe->pdev = NULL;
-   pci_dev_put(dev);
return NULL;
}
 
@@ -1205,6 +1203,14 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct 
pci_dev *npu_pdev)
struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
struct pnv_phb *phb = hose->private_data;
 
+   /*
+* Intentionally leak a reference on the npu device (for
+* nvlink only; this is not an opencapi path) to make sure it
+* never goes away, as it's been the case all along and some
+* work is needed otherwise.
+*/
+   pci_dev_get(npu_pdev);
+
/*
 * Due to a hardware errata PE#0 on the NPU is reserved for
 * error handling. This means we only have three PEs remaining
@@ -1228,7 +1234,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct 
pci_dev *npu_pdev)
 */
dev_info(_pdev->dev,
"Associating to existing PE %x\n", pe_num);
-   pci_dev_get(npu_pdev);
npu_pdn = pci_get_pdn(npu_pdev);
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
npu_pdn->pe_number = pe_num;
-- 
2.25.1



[PATCH AUTOSEL 5.4 111/330] Bluetooth: Fix refcount use-after-free issue

2020-09-17 Thread Sasha Levin
From: Manish Mandlik 

[ Upstream commit 6c08fc896b60893c5d673764b0668015d76df462 ]

There is no lock preventing both l2cap_sock_release() and
chan->ops->close() from running at the same time.

If we consider Thread A running l2cap_chan_timeout() and Thread B running
l2cap_sock_release(), expected behavior is:
  A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb()
  A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill()
  B::l2cap_sock_release()->sock_orphan()
  B::l2cap_sock_release()->l2cap_sock_kill()

where,
sock_orphan() clears "sk->sk_socket" and l2cap_sock_teardown_cb() marks
socket as SOCK_ZAPPED.

In l2cap_sock_kill(), there is an "if-statement" that checks if both
sock_orphan() and sock_teardown() has been run i.e. sk->sk_socket is NULL
and socket is marked as SOCK_ZAPPED. Socket is killed if the condition is
satisfied.

In the race condition, following occurs:
  A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb()
  B::l2cap_sock_release()->sock_orphan()
  B::l2cap_sock_release()->l2cap_sock_kill()
  A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill()

In this scenario, "if-statement" is true in both B::l2cap_sock_kill() and
A::l2cap_sock_kill() and we hit "refcount: underflow; use-after-free" bug.

Similar condition occurs at other places where teardown/sock_kill is
happening:
  l2cap_disconnect_rsp()->l2cap_chan_del()->l2cap_sock_teardown_cb()
  l2cap_disconnect_rsp()->l2cap_sock_close_cb()->l2cap_sock_kill()

  l2cap_conn_del()->l2cap_chan_del()->l2cap_sock_teardown_cb()
  l2cap_conn_del()->l2cap_sock_close_cb()->l2cap_sock_kill()

  l2cap_disconnect_req()->l2cap_chan_del()->l2cap_sock_teardown_cb()
  l2cap_disconnect_req()->l2cap_sock_close_cb()->l2cap_sock_kill()

  l2cap_sock_cleanup_listen()->l2cap_chan_close()->l2cap_sock_teardown_cb()
  l2cap_sock_cleanup_listen()->l2cap_sock_kill()

Protect teardown/sock_kill and orphan/sock_kill by adding hold_lock on
l2cap channel to ensure that the socket is killed only after marked as
zapped and orphan.

Signed-off-by: Manish Mandlik 
Signed-off-by: Marcel Holtmann 
Signed-off-by: Sasha Levin 
---
 net/bluetooth/l2cap_core.c | 26 +++---
 net/bluetooth/l2cap_sock.c | 16 +---
 2 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a845786258a0b..eb2804ac50756 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -419,6 +419,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 
mutex_lock(>chan_lock);
+   /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+* this work. No need to call l2cap_chan_hold(chan) here again.
+*/
l2cap_chan_lock(chan);
 
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
@@ -431,12 +434,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
 
l2cap_chan_close(chan, reason);
 
-   l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
-   mutex_unlock(>chan_lock);
 
+   l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
+
+   mutex_unlock(>chan_lock);
 }
 
 struct l2cap_chan *l2cap_chan_create(void)
@@ -1734,9 +1737,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
 
l2cap_chan_del(chan, err);
 
-   l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+   l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
 
@@ -4355,6 +4358,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn 
*conn,
return 0;
}
 
+   l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
 
rsp.dcid = cpu_to_le16(chan->scid);
@@ -4363,12 +4367,11 @@ static inline int l2cap_disconnect_req(struct 
l2cap_conn *conn,
 
chan->ops->set_shutdown(chan);
 
-   l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
 
-   l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+   l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
 
mutex_unlock(>chan_lock);
@@ -4400,20 +4403,21 @@ static inline int l2cap_disconnect_rsp(struct 
l2cap_conn *conn,
return 0;
}
 
+   l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
 
if (chan->state != BT_DISCONN) {
l2cap_chan_unlock(chan);
+   l2cap_chan_put(chan);
mutex_unlock(>chan_lock);
return 0;
}
 
-   l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
 
-   l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+   l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
 
mutex_unlock(>chan_lock);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a7be8b59b3c28..ab65304f3f637 100644
--- a/net/bluetooth/l2cap_sock.c
+++ 

[PATCH AUTOSEL 5.4 076/330] xfs: fix log reservation overflows when allocating large rt extents

2020-09-17 Thread Sasha Levin
From: "Darrick J. Wong" 

[ Upstream commit b1de6fc7520fe12949c070af0e8c0e4044cd3420 ]

Omar Sandoval reported that a 4G fallocate on the realtime device causes
filesystem shutdowns due to a log reservation overflow that happens when
we log the rtbitmap updates.  Factor rtbitmap/rtsummary updates into the
the tr_write and tr_itruncate log reservation calculation.

"The following reproducer results in a transaction log overrun warning
for me:

mkfs.xfs -f -r rtdev=/dev/vdc -d rtinherit=1 -m reflink=0 /dev/vdb
mount -o rtdev=/dev/vdc /dev/vdb /mnt
fallocate -l 4G /mnt/foo

Reported-by: Omar Sandoval 
Tested-by: Omar Sandoval 
Signed-off-by: Darrick J. Wong 
Reviewed-by: Brian Foster 
Signed-off-by: Sasha Levin 
---
 fs/xfs/libxfs/xfs_trans_resv.c | 96 +++---
 1 file changed, 77 insertions(+), 19 deletions(-)

diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index d12bbd526e7c0..b3584cd2cc164 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -196,6 +196,24 @@ xfs_calc_inode_chunk_res(
return res;
 }
 
+/*
+ * Per-extent log reservation for the btree changes involved in freeing or
+ * allocating a realtime extent.  We have to be able to log as many rtbitmap
+ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
+ * as well as the realtime summary block.
+ */
+unsigned int
+xfs_rtalloc_log_count(
+   struct xfs_mount*mp,
+   unsigned intnum_ops)
+{
+   unsigned intblksz = XFS_FSB_TO_B(mp, 1);
+   unsigned intrtbmp_bytes;
+
+   rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
+   return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
+}
+
 /*
  * Various log reservation values.
  *
@@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res(
 
 /*
  * In a write transaction we can allocate a maximum of 2
- * extents.  This gives:
+ * extents.  This gives (t1):
  *the inode getting the new extents: inode size
  *the inode's bmap btree: max depth * block size
  *the agfs of the ags from which the extents are allocated: 2 * sector
  *the superblock free block counter: sector size
  *the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block 
size
- * And the bmap_finish transaction can free bmap blocks in a join:
+ * Or, if we're writing to a realtime file (t2):
+ *the inode getting the new extents: inode size
+ *the inode's bmap btree: max depth * block size
+ *the agfs of the ags from which the extents are allocated: 2 * sector
+ *the superblock free block counter: sector size
+ *the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
+ *the realtime summary: 1 block
+ *the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join (t3):
  *the agfs of the ags containing the blocks: 2 * sector size
  *the agfls of the ags containing the blocks: 2 * sector size
  *the super block free block counter: sector size
@@ -234,40 +260,72 @@ STATIC uint
 xfs_calc_write_reservation(
struct xfs_mount*mp)
 {
-   return XFS_DQUOT_LOGRES(mp) +
-   max((xfs_calc_inode_res(mp, 1) +
+   unsigned intt1, t2, t3;
+   unsigned intblksz = XFS_FSB_TO_B(mp, 1);
+
+   t1 = xfs_calc_inode_res(mp, 1) +
+xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
+xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+   if (xfs_sb_version_hasrealtime(>m_sb)) {
+   t2 = xfs_calc_inode_res(mp, 1) +
 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
- XFS_FSB_TO_B(mp, 1)) +
+blksz) +
 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
-xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
- XFS_FSB_TO_B(mp, 1))),
-   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
-xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
- XFS_FSB_TO_B(mp, 1;
+xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
+xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
+   } else {
+   t2 = 0;
+   }
+
+   t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+   return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
 }
 
 /*
- * In truncating a file we free up to two extents at once.  We can modify:
+ * In truncating a file we free up to two extents at once.  We can modify (t1):
  *the inode being truncated: inode size
  *the inode's bmap btree: (max depth + 1) * block size
- * And the bmap_finish 

[PATCH AUTOSEL 5.4 108/330] ceph: ensure we have a new cap before continuing in fill_inode

2020-09-17 Thread Sasha Levin
From: Jeff Layton 

[ Upstream commit 9a6bed4fe0c8bf57785cbc4db9f86086cb9b193d ]

If the caller passes in a NULL cap_reservation, and we can't allocate
one then ensure that we fail gracefully.

Signed-off-by: Jeff Layton 
Signed-off-by: Ilya Dryomov 
Signed-off-by: Sasha Levin 
---
 fs/ceph/inode.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index c07407586ce87..660a878e20ef2 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -754,8 +754,11 @@ static int fill_inode(struct inode *inode, struct page 
*locked_page,
info_caps = le32_to_cpu(info->cap.caps);
 
/* prealloc new cap struct */
-   if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+   if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
new_cap = ceph_get_cap(mdsc, caps_reservation);
+   if (!new_cap)
+   return -ENOMEM;
+   }
 
/*
 * prealloc xattr data, if it looks like we'll need it.  only
-- 
2.25.1



[PATCH AUTOSEL 5.4 112/330] mm/swapfile.c: swap_next should increase position index

2020-09-17 Thread Sasha Levin
From: Vasily Averin 

[ Upstream commit 10c8d69f314d557d94d74ec492575ae6a4f1eb1c ]

If seq_file .next fuction does not change position index, read after
some lseek can generate unexpected output.

In Aug 2018 NeilBrown noticed commit 1f4aace60b0e ("fs/seq_file.c:
simplify seq_file iteration code and interface") "Some ->next functions
do not increment *pos when they return NULL...  Note that such ->next
functions are buggy and should be fixed.  A simple demonstration is

  dd if=/proc/swaps bs=1000 skip=1

Choose any block size larger than the size of /proc/swaps.  This will
always show the whole last line of /proc/swaps"

Described problem is still actual.  If you make lseek into middle of
last output line following read will output end of last line and whole
last line once again.

  $ dd if=/proc/swaps bs=1  # usual output
  Filename  TypeSizeUsedPriority
  /dev/dm-0   partition 4194812 97536   -2
  104+0 records in
  104+0 records out
  104 bytes copied

  $ dd if=/proc/swaps bs=40 skip=1# last line was generated twice
  dd: /proc/swaps: cannot skip to specified offset
  v/dm-0   partition4194812 97536   -2
  /dev/dm-0   partition 4194812 97536   -2
  3+1 records in
  3+1 records out
  131 bytes copied

https://bugzilla.kernel.org/show_bug.cgi?id=206283

Link: 
http://lkml.kernel.org/r/bd8cfd7b-ac95-9b91-f9e7-e8438bd50...@virtuozzo.com
Signed-off-by: Vasily Averin 
Reviewed-by: Andrew Morton 
Cc: Jann Horn 
Cc: Alexander Viro 
Cc: Kees Cook 
Cc: Hugh Dickins 
Signed-off-by: Andrew Morton 
Signed-off-by: Linus Torvalds 
Signed-off-by: Sasha Levin 
---
 mm/swapfile.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 891a3ef486511..646fd0a8e3202 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2737,10 +2737,10 @@ static void *swap_next(struct seq_file *swap, void *v, 
loff_t *pos)
else
type = si->type + 1;
 
+   ++(*pos);
for (; (si = swap_type_to_swap_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
-   ++*pos;
return si;
}
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 065/330] ASoC: max98090: remove msleep in PLL unlocked workaround

2020-09-17 Thread Sasha Levin
From: Tzung-Bi Shih 

[ Upstream commit acb874a7c049ec49d8fc66c893170fb42c01bdf7 ]

It was observed Baytrail-based chromebooks could cause continuous PLL
unlocked when using playback stream and capture stream simultaneously.
Specifically, starting a capture stream after started a playback stream.
As a result, the audio data could corrupt or turn completely silent.

As the datasheet suggested, the maximum PLL lock time should be 7 msec.
The workaround resets the codec softly by toggling SHDN off and on if
PLL failed to lock for 10 msec.  Notably, there is no suggested hold
time for SHDN off.

On Baytrail-based chromebooks, it would easily happen continuous PLL
unlocked if there is a 10 msec delay between SHDN off and on.  Removes
the msleep().

Signed-off-by: Tzung-Bi Shih 
Link: https://lore.kernel.org/r/20191122073114.219945-2-tzun...@google.com
Reviewed-by: Pierre-Louis Bossart 
Signed-off-by: Mark Brown 
Signed-off-by: Sasha Levin 
---
 sound/soc/codecs/max98090.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 45da2b51543e7..6b9d326e11b07 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2112,10 +2112,16 @@ static void max98090_pll_work(struct max98090_priv 
*max98090)
 
dev_info_ratelimited(component->dev, "PLL unlocked\n");
 
+   /*
+* As the datasheet suggested, the maximum PLL lock time should be
+* 7 msec.  The workaround resets the codec softly by toggling SHDN
+* off and on if PLL failed to lock for 10 msec.  Notably, there is
+* no suggested hold time for SHDN off.
+*/
+
/* Toggle shutdown OFF then ON */
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, 0);
-   msleep(10);
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, M98090_SHDNN_MASK);
 
-- 
2.25.1



[PATCH AUTOSEL 5.4 094/330] drm/amd/display: Initialize DSC PPS variables to 0

2020-09-17 Thread Sasha Levin
From: David Francis 

[ Upstream commit b6adc57cff616da18ff8cff028d2ddf585c97334 ]

For DSC MST, sometimes monitors would break out
in full-screen static. The issue traced back to the
PPS generation code, where these variables were being used
uninitialized and were picking up garbage.

memset to 0 to avoid this

Reviewed-by: Nicholas Kazlauskas 
Signed-off-by: David Francis 
Signed-off-by: Mikita Lipski 
Signed-off-by: Alex Deucher 
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c   | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index a519dbc5ecb65..5d6cbaebebc03 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -496,6 +496,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool 
enable)
struct dsc_config dsc_cfg;
uint8_t dsc_packed_pps[128];
 
+   memset(_cfg, 0, sizeof(dsc_cfg));
+   memset(dsc_packed_pps, 0, 128);
+
/* Enable DSC hw block */
dsc_cfg.pic_width = stream->timing.h_addressable + 
stream->timing.h_border_left + stream->timing.h_border_right;
dsc_cfg.pic_height = stream->timing.v_addressable + 
stream->timing.v_border_top + stream->timing.v_border_bottom;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b419407af942..01040501d40e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -207,6 +207,9 @@ static bool dsc2_get_packed_pps(struct 
display_stream_compressor *dsc, const str
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
 
+   memset(_reg_vals, 0, sizeof(dsc_reg_vals));
+   memset(_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
-- 
2.25.1



<    4   5   6   7   8   9   10   11   12   13   >