From: Ruiqiang Hao <ruiqiang....@windriver.com>

commit e91e87b7adf9(octeontx2-pf: disable preemption while using
per_cpu pointer) introduce following issue fixed by commit 29e9c67bf327
(octeontx2-pf: Fix the use of GFP_KERNEL in atomic context on rt) and
commit 2827c4eb429d (octeontx2-pf: Avoid use of GFP_KERNEL in atomic
context), drop get_cpu()/put_cpu() pair to fix this.

BUG: sleeping function called from invalid context at 
kernel/locking/spinlock_rt.c:46
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 1, name: swapper/0
preempt_count: 1, expected: 0
RCU nest depth: 0, expected: 0
Preemption disabled at:
[<ffff80000894737c>] otx2_rq_aura_pool_init+0x14c/0x290
CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.15.124-rt61-yocto-preempt-rt #2
Hardware name: Marvell OcteonTX CN96XX board (DT)
Call trace:
 dump_backtrace+0x0/0x1c0
 show_stack+0x20/0x30
 dump_stack_lvl+0x68/0x84
 dump_stack+0x18/0x34
 __might_resched+0x154/0x1b0
 rt_spin_lock+0x38/0xd0
 __alloc_pages_bulk+0x2e0/0x5e0
 __page_pool_alloc_pages_slow+0x68/0x39c
 page_pool_alloc_frag+0x158/0x240
 __otx2_alloc_rbuf+0x60/0xc0
 otx2_rq_aura_pool_init+0x1cc/0x290
 otx2_open+0x238/0xa00
 __dev_open+0x100/0x220
 __dev_change_flags+0x1a8/0x230
 dev_change_flags+0x2c/0x70
 ip_auto_config+0x274/0xf58
 do_one_initcall+0x4c/0x2c0
 kernel_init_freeable+0x238/0x2a8
 kernel_init+0x2c/0x12c
 ret_from_fork+0x10/0x20

Signed-off-by: Ruiqiang Hao <ruiqiang....@windriver.com>
---
 .../net/ethernet/marvell/octeontx2/nic/otx2_common.c  | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c 
b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index c3797d623977..e792b0ae399e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1144,7 +1144,6 @@ static void otx2_pool_refill_task(struct work_struct 
*work)
        rbpool = cq->rbpool;
        free_ptrs = cq->pool_ptrs;
 
-       get_cpu();
        while (cq->pool_ptrs) {
                if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
                        /* Schedule a WQ if we fails to free atleast half of the
@@ -1164,7 +1163,6 @@ static void otx2_pool_refill_task(struct work_struct 
*work)
                pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
                cq->pool_ptrs--;
        }
-       put_cpu();
        cq->refill_task_sched = false;
 }
 
@@ -1541,7 +1539,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
        if (err)
                goto fail;
 
-       get_cpu();
        /* Allocate pointers and free them to aura/pool */
        for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
                pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
@@ -1565,7 +1562,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
        }
 
 err_mem:
-       put_cpu();
        return err ? -ENOMEM : 0;
 
 fail:
@@ -1606,21 +1602,18 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
        if (err)
                goto fail;
 
-       get_cpu();
        /* Allocate pointers and free them to aura/pool */
        for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
                pool = &pfvf->qset.pool[pool_id];
                for (ptr = 0; ptr < num_ptrs; ptr++) {
                        err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
                        if (err)
-                               goto err_mem;
+                               return -ENOMEM;
                        pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
                                                   bufptr + OTX2_HEAD_ROOM);
                }
        }
-err_mem:
-       put_cpu();
-       return err ? -ENOMEM : 0;
+       return 0;
 fail:
        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
        otx2_aura_pool_free(pfvf);
-- 
2.35.5

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.
View/Reply Online (#12953): 
https://lists.yoctoproject.org/g/linux-yocto/message/12953
Mute This Topic: https://lists.yoctoproject.org/mt/100639429/21656
Group Owner: linux-yocto+ow...@lists.yoctoproject.org
Unsubscribe: https://lists.yoctoproject.org/g/linux-yocto/unsub 
[arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to