Noticed that we can shuffle the code around in fcoe_percpu_receive_thread a bit
and avoid taking the fcoe_rx_list lock twice per iteration.  This should improve
throughput somewhat.  With this change we take the lock, and check for new
frames in a single critical section.  Only if the list is empty do we drop the
lock and re-acquire it after being signaled to wake up.

Signed-off-by: Neil Horman <nhor...@tuxdriver.com>
CC: Robert Love <robert.w.l...@intel.com>
---
 drivers/scsi/fcoe/fcoe.c |   18 ++++++++++--------
 1 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 0b48c7d..082951f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1820,19 +1820,21 @@ static int fcoe_percpu_receive_thread(void *arg)
 
                spin_lock_bh(&p->fcoe_rx_list.lock);
                skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
-               spin_unlock_bh(&p->fcoe_rx_list.lock);
-
-               while ((skb = __skb_dequeue(&tmp)) != NULL)
-                       fcoe_recv_frame(skb);
 
-               spin_lock_bh(&p->fcoe_rx_list.lock);
-               if (!skb_queue_len(&p->fcoe_rx_list)) {
+               while (!skb_queue_len(&tmp) && !kthread_should_stop()) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        spin_unlock_bh(&p->fcoe_rx_list.lock);
                        schedule();
                        set_current_state(TASK_RUNNING);
-               } else
-                       spin_unlock_bh(&p->fcoe_rx_list.lock);
+                       spin_lock_bh(&p->fcoe_rx_list.lock);
+                       skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+               }
+
+               spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+               while ((skb = __skb_dequeue(&tmp)) != NULL)
+                       fcoe_recv_frame(skb);
+
        }
        return 0;
 }
-- 
1.7.7.6

_______________________________________________
devel mailing list
devel@open-fcoe.org
https://lists.open-fcoe.org/mailman/listinfo/devel

Reply via email to