[ANNOUNCE] v4.14.20-rt17

2018-02-22 Thread Sebastian Andrzej Siewior
Dear RT folks!

I'm pleased to announce the v4.14.20-rt17 patch set. 

Changes since v4.14.20-rt16:

  - A RCU warning was disabled if we schedule() while we acquire a
sleeping lock. The warning was still seen on UP only kernels and is
now disabled. Reported by Grygorii Strashko.

  - The recording of the recursion limit in networking was changed from
per-CPU to per-task on RT. This was done because BH-context can be
preempted on RT and therefore multiple tasks may attempt to send a
packet and so wrongly increase the counter.
The queue lock owner was still recording the CPU which was holding
the lock instead the task. This will lead to a recursion warning if
the same transmit queue is already used (locked) by another task.
Reported by Kurt Kanzenbach.

Known issues
 - A warning triggered in "rcu_note_context_switch" originated from
   SyS_timer_gettime(). The issue was always there, it is now
   visible. Reported by Grygorii Strashko and Daniel Wagner.

The delta patch against v4.14.20-rt16 is appended below and can be found here:
 
 
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/incr/patch-4.14.20-rt16-rt17.patch.xz

You can get this release via the git tree at:

git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git 
v4.14.20-rt17

The RT patch against v4.14.20 can be found here:


https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patch-4.14.20-rt17.patch.xz

The split quilt queue is available at:


https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.20-rt17.tar.xz

Sebastian

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dea370d31dc3..cd0f34dfc5f4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -583,7 +583,11 @@ struct netdev_queue {
  * write-mostly part
  */
spinlock_t  _xmit_lock cacheline_aligned_in_smp;
+#ifdef CONFIG_PREEMPT_RT_FULL
+   struct task_struct  *xmit_lock_owner;
+#else
int xmit_lock_owner;
+#endif
/*
 * Time (in jiffies) of last Tx
 */
@@ -3547,10 +3551,48 @@ static inline u32 netif_msg_init(int debug_value, int 
default_msg_enable_bits)
return (1 << debug_value) - 1;
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+   txq->xmit_lock_owner = current;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+   txq->xmit_lock_owner = NULL;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+   if (txq->xmit_lock_owner != NULL)
+   return true;
+   return false;
+}
+
+#else
+
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+   txq->xmit_lock_owner = cpu;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+   txq->xmit_lock_owner = -1;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+   if (txq->xmit_lock_owner != -1)
+   return true;
+   return false;
+}
+#endif
+
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
spin_lock(>_xmit_lock);
-   txq->xmit_lock_owner = cpu;
+   netdev_queue_set_owner(txq, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3567,32 +3609,32 @@ static inline void __netif_tx_release(struct 
netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
spin_lock_bh(>_xmit_lock);
-   txq->xmit_lock_owner = smp_processor_id();
+   netdev_queue_set_owner(txq, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 {
bool ok = spin_trylock(>_xmit_lock);
if (likely(ok))
-   txq->xmit_lock_owner = smp_processor_id();
+   netdev_queue_set_owner(txq, smp_processor_id());
return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-   txq->xmit_lock_owner = -1;
+   netdev_queue_clear_owner(txq);
spin_unlock(>_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-   txq->xmit_lock_owner = -1;
+   netdev_queue_clear_owner(txq);
spin_unlock_bh(>_xmit_lock);
 }
 
 static inline void txq_trans_update(struct netdev_queue *txq)
 {
-   if (txq->xmit_lock_owner != -1)
+   if (netdev_queue_has_owner(txq))
txq->trans_start = jiffies;
 }
 
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index b0e6248c8a3c..0591df500e9d 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -231,6 +231,15 @@ extern void migrate_enable(void);
 
 int __migrate_disabled(struct task_struct *p);
 
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+static inline int 

[ANNOUNCE] v4.14.20-rt17

2018-02-22 Thread Sebastian Andrzej Siewior
Dear RT folks!

I'm pleased to announce the v4.14.20-rt17 patch set. 

Changes since v4.14.20-rt16:

  - A RCU warning was disabled if we schedule() while we acquire a
sleeping lock. The warning was still seen on UP only kernels and is
now disabled. Reported by Grygorii Strashko.

  - The recording of the recursion limit in networking was changed from
per-CPU to per-task on RT. This was done because BH-context can be
preempted on RT and therefore multiple tasks may attempt to send a
packet and so wrongly increase the counter.
The queue lock owner was still recording the CPU which was holding
the lock instead the task. This will lead to a recursion warning if
the same transmit queue is already used (locked) by another task.
Reported by Kurt Kanzenbach.

Known issues
 - A warning triggered in "rcu_note_context_switch" originated from
   SyS_timer_gettime(). The issue was always there, it is now
   visible. Reported by Grygorii Strashko and Daniel Wagner.

The delta patch against v4.14.20-rt16 is appended below and can be found here:
 
 
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/incr/patch-4.14.20-rt16-rt17.patch.xz

You can get this release via the git tree at:

git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git 
v4.14.20-rt17

The RT patch against v4.14.20 can be found here:


https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patch-4.14.20-rt17.patch.xz

The split quilt queue is available at:


https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.20-rt17.tar.xz

Sebastian

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dea370d31dc3..cd0f34dfc5f4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -583,7 +583,11 @@ struct netdev_queue {
  * write-mostly part
  */
spinlock_t  _xmit_lock cacheline_aligned_in_smp;
+#ifdef CONFIG_PREEMPT_RT_FULL
+   struct task_struct  *xmit_lock_owner;
+#else
int xmit_lock_owner;
+#endif
/*
 * Time (in jiffies) of last Tx
 */
@@ -3547,10 +3551,48 @@ static inline u32 netif_msg_init(int debug_value, int 
default_msg_enable_bits)
return (1 << debug_value) - 1;
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+   txq->xmit_lock_owner = current;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+   txq->xmit_lock_owner = NULL;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+   if (txq->xmit_lock_owner != NULL)
+   return true;
+   return false;
+}
+
+#else
+
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+   txq->xmit_lock_owner = cpu;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+   txq->xmit_lock_owner = -1;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+   if (txq->xmit_lock_owner != -1)
+   return true;
+   return false;
+}
+#endif
+
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
spin_lock(>_xmit_lock);
-   txq->xmit_lock_owner = cpu;
+   netdev_queue_set_owner(txq, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3567,32 +3609,32 @@ static inline void __netif_tx_release(struct 
netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
spin_lock_bh(>_xmit_lock);
-   txq->xmit_lock_owner = smp_processor_id();
+   netdev_queue_set_owner(txq, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 {
bool ok = spin_trylock(>_xmit_lock);
if (likely(ok))
-   txq->xmit_lock_owner = smp_processor_id();
+   netdev_queue_set_owner(txq, smp_processor_id());
return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-   txq->xmit_lock_owner = -1;
+   netdev_queue_clear_owner(txq);
spin_unlock(>_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-   txq->xmit_lock_owner = -1;
+   netdev_queue_clear_owner(txq);
spin_unlock_bh(>_xmit_lock);
 }
 
 static inline void txq_trans_update(struct netdev_queue *txq)
 {
-   if (txq->xmit_lock_owner != -1)
+   if (netdev_queue_has_owner(txq))
txq->trans_start = jiffies;
 }
 
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index b0e6248c8a3c..0591df500e9d 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -231,6 +231,15 @@ extern void migrate_enable(void);
 
 int __migrate_disabled(struct task_struct *p);
 
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+static inline int