From: Neil Horman <nhor...@redhat.com>

Missed this in my initial review.  The usage counter that guards against
kthread task is horribly racy.  The atomic is self consistent, but theres
nothing that prevents the service_resp_queue function from re-incrementing
it immediately after the check in disable_with_timeout is complete.  Its
just a improper usage of atomics as a serialization mechanism.

Instead use kthread_park to pause the thread in its activity so that
buffers can be properly freed without racing against usage in
service_resp_queue

Signed-off-by: Neil Horman <nhor...@redhat.com>
Signed-off-by: Benjamin Romer <benjamin.ro...@unisys.com>
---
 drivers/staging/unisys/visornic/visornic_main.c | 23 ++++++-----------------
 kernel/kthread.c                                |  4 ++++
 2 files changed, 10 insertions(+), 17 deletions(-)

diff --git a/drivers/staging/unisys/visornic/visornic_main.c 
b/drivers/staging/unisys/visornic/visornic_main.c
index 4d49937..aeecb14 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -126,7 +126,6 @@ struct visornic_devdata {
        unsigned short old_flags;       /* flags as they were prior to
                                         * set_multicast_list
                                         */
-       atomic_t usage;                 /* count of users */
        int num_rcv_bufs;               /* indicates how many rcv buffers
                                         * the vnic will post
                                         */
@@ -565,19 +564,7 @@ visornic_disable_with_timeout(struct net_device *netdev, 
const int timeout)
                spin_lock_irqsave(&devdata->priv_lock, flags);
        }
 
-       /* Wait for usage to go to 1 (no other users) before freeing
-        * rcv buffers
-        */
-       if (atomic_read(&devdata->usage) > 1) {
-               while (1) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-                       schedule_timeout(msecs_to_jiffies(10));
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       if (atomic_read(&devdata->usage))
-                               break;
-               }
-       }
+       kthread_park(devdata->threadinfo.task);
 
        /* we've set enabled to 0, so we can give up the lock. */
        spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -594,6 +581,7 @@ visornic_disable_with_timeout(struct net_device *netdev, 
const int timeout)
                }
        }
 
+       kthread_unpark(devdata->threadinfo.task);
        return 0;
 }
 
@@ -1622,7 +1610,7 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
  *     Returns when response queue is empty or when the threadd stops.
  */
 static void
-drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
+service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
 {
        unsigned long flags;
        struct net_device *netdev;
@@ -1742,6 +1730,8 @@ process_incoming_rsps(void *v)
                        devdata->rsp_queue, (atomic_read(
                                             &devdata->interrupt_rcvd) == 1),
                                msecs_to_jiffies(devdata->thread_wait_ms));
+               if (kthread_should_park())
+                       kthread_parkme();
 
                /* periodically check to see if there are any rcf bufs which
                 * need to get sent to the IOSP. This can only happen if
@@ -1749,7 +1739,7 @@ process_incoming_rsps(void *v)
                 */
                atomic_set(&devdata->interrupt_rcvd, 0);
                send_rcv_posts_if_needed(devdata);
-               drain_queue(cmdrsp, devdata);
+               service_resp_queue(cmdrsp, devdata);
        }
 
        kfree(cmdrsp);
@@ -1809,7 +1799,6 @@ static int visornic_probe(struct visor_device *dev)
        init_waitqueue_head(&devdata->rsp_queue);
        spin_lock_init(&devdata->priv_lock);
        devdata->enabled = 0; /* not yet */
-       atomic_set(&devdata->usage, 1);
 
        /* Setup rcv bufs */
        channel_offset = offsetof(struct spar_io_channel_protocol,
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c..bad80c1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
 {
        return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 }
+EXPORT_SYMBOL(kthread_should_park);
 
 /**
  * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
 {
        __kthread_parkme(to_kthread(current));
 }
+EXPORT_SYMBOL(kthread_parkme);
 
 static int kthread(void *_create)
 {
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
        if (kthread)
                __kthread_unpark(k, kthread);
 }
+EXPORT_SYMBOL(kthread_unpark);
 
 /**
  * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
        }
        return ret;
 }
+EXPORT_SYMBOL(kthread_park);
 
 /**
  * kthread_stop - stop a thread created by kthread_create().
-- 
2.1.4

_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to