Use rcu_read_lock_dont_migrate() and rcu_read_unlock_migrate() in
trampoline.c to obtain better performance when PREEMPT_RCU is not enabled.

Signed-off-by: Menglong Dong <dong...@chinatelecom.cn>
---
v2:
- use rcu_read_lock_dont_migrate() instead of rcu_migrate_disable()
---
 kernel/bpf/trampoline.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 0e364614c3a2..5949095e51c3 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -899,8 +899,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
 static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct 
bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -949,8 +948,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog 
*prog, u64 start,
 
        update_prog_stats(prog, start);
        this_cpu_dec(*(prog->active));
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
@@ -960,8 +958,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct 
bpf_prog *prog,
        /* Runtime stats are exported via actual BPF_LSM_CGROUP
         * programs, not the shims.
         */
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -974,8 +971,7 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct 
bpf_prog *prog, u64 start,
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
@@ -1033,8 +1029,7 @@ static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
                                    struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -1048,8 +1043,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog 
*prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
-- 
2.50.1


Reply via email to