Under synflood conditions binding defense_work to system_long_wq may
pin it to a saturated CPU.

We've observed improved throughtput on a DPDK/VPP application with
this change. We attribute this to the reduced context switching.

The defense_work handler has no per-CPU data dependencies and no cache
locality requirements that would justify this.

Signed-off-by: Ismael Luceno <[email protected]>
---
Depends-on: wq/for-7.1 c116737e972e ("workqueue: Add system_dfl_long_wq for 
long unbound works")

 net/netfilter/ipvs/ip_vs_ctl.c | 6 +++---
 net/netfilter/ipvs/ip_vs_est.c | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 35642de2a0fe..948ae5882a70 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -234,7 +234,7 @@ #define DEFENSE_TIMER_PERIOD        1*HZ
        update_defense_level(ipvs);
        if (atomic_read(&ipvs->dropentry))
                ip_vs_random_dropentry(ipvs);
-       queue_delayed_work(system_long_wq, &ipvs->defense_work,
+       queue_delayed_work(system_dfl_long_wq, &ipvs->defense_work,
                           DEFENSE_TIMER_PERIOD);
 }
 #endif
@@ -273,7 +273,7 @@ #define DEFENSE_TIMER_PERIOD        1*HZ
        atomic_set(&ipvs->est_genid_done, genid);
 
        if (repeat)
-               queue_delayed_work(system_long_wq, &ipvs->est_reload_work,
+               queue_delayed_work(system_dfl_long_wq, &ipvs->est_reload_work,
                                   delay);
 
 unlock:
@@ -4377,7 +4377,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user 
*user, int *len)
                goto err;
 
        /* Schedule defense work */
-       queue_delayed_work(system_long_wq, &ipvs->defense_work,
+       queue_delayed_work(system_dfl_long_wq, &ipvs->defense_work,
                           DEFENSE_TIMER_PERIOD);
 
        return 0;
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index b17de33314da..454ea24828cc 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -235,7 +235,7 @@ #define pr_fmt(fmt) "IPVS: " fmt
        ip_vs_est_stopped_recalc(ipvs);
        /* Bump the kthread configuration genid */
        atomic_inc(&ipvs->est_genid);
-       queue_delayed_work(system_long_wq, &ipvs->est_reload_work, 0);
+       queue_delayed_work(system_dfl_long_wq, &ipvs->est_reload_work, 0);
 }
 
 /* Start kthread task with current configuration */

Reply via email to