Makes SDT able to track dependencies by wait(waitqueue).

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
---
 include/linux/wait.h |  6 +++++-
 kernel/sched/wait.c  | 16 ++++++++++++++++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/include/linux/wait.h b/include/linux/wait.h
index 851e07d..2133998 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -7,6 +7,7 @@
 #include <linux/list.h>
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
+#include <linux/dept_sdt.h>
 
 #include <asm/current.h>
 #include <uapi/linux/wait.h>
@@ -37,6 +38,7 @@ struct wait_queue_entry {
 struct wait_queue_head {
        spinlock_t              lock;
        struct list_head        head;
+       struct dept_map         dmap;
 };
 typedef struct wait_queue_head wait_queue_head_t;
 
@@ -56,7 +58,8 @@ struct wait_queue_head {
 
 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                  
\
        .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      
\
-       .head           = LIST_HEAD_INIT(name.head) }
+       .head           = LIST_HEAD_INIT(name.head),                            
\
+       .dmap           = DEPT_SDT_MAP_INIT(name) }
 
 #define DECLARE_WAIT_QUEUE_HEAD(name) \
        struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
@@ -67,6 +70,7 @@ struct wait_queue_head {
        do {                                                                    
\
                static struct lock_class_key __key;                             
\
                                                                                
\
+               sdt_map_init(&(wq_head)->dmap);                                 
\
                __init_waitqueue_head((wq_head), #wq_head, &__key);             
\
        } while (0)
 
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index eca3810..fc5a16a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -105,6 +105,7 @@ static int __wake_up_common(struct wait_queue_head 
*wq_head, unsigned int mode,
                if (flags & WQ_FLAG_BOOKMARK)
                        continue;
 
+               sdt_event(&wq_head->dmap);
                ret = curr->func(curr, mode, wake_flags, key);
                if (ret < 0)
                        break;
@@ -268,6 +269,9 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head)
                __add_wait_queue(wq_head, wq_entry);
        set_current_state(state);
        spin_unlock_irqrestore(&wq_head->lock, flags);
+
+       if (state & TASK_NORMAL)
+               sdt_wait_prepare(&wq_head->dmap);
 }
 EXPORT_SYMBOL(prepare_to_wait);
 
@@ -286,6 +290,10 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head)
        }
        set_current_state(state);
        spin_unlock_irqrestore(&wq_head->lock, flags);
+
+       if (state & TASK_NORMAL)
+               sdt_wait_prepare(&wq_head->dmap);
+
        return was_empty;
 }
 EXPORT_SYMBOL(prepare_to_wait_exclusive);
@@ -331,6 +339,9 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, 
struct wait_queue_en
        }
        spin_unlock_irqrestore(&wq_head->lock, flags);
 
+       if (!ret && state & TASK_NORMAL)
+               sdt_wait_prepare(&wq_head->dmap);
+
        return ret;
 }
 EXPORT_SYMBOL(prepare_to_wait_event);
@@ -352,7 +363,9 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t 
*wait)
                return -ERESTARTSYS;
 
        spin_unlock(&wq->lock);
+       sdt_wait_prepare(&wq->dmap);
        schedule();
+       sdt_wait_finish();
        spin_lock(&wq->lock);
 
        return 0;
@@ -369,7 +382,9 @@ int do_wait_intr_irq(wait_queue_head_t *wq, 
wait_queue_entry_t *wait)
                return -ERESTARTSYS;
 
        spin_unlock_irq(&wq->lock);
+       sdt_wait_prepare(&wq->dmap);
        schedule();
+       sdt_wait_finish();
        spin_lock_irq(&wq->lock);
 
        return 0;
@@ -389,6 +404,7 @@ void finish_wait(struct wait_queue_head *wq_head, struct 
wait_queue_entry *wq_en
 {
        unsigned long flags;
 
+       sdt_wait_finish();
        __set_current_state(TASK_RUNNING);
        /*
         * We can check for list emptiness outside the lock
-- 
1.9.1

Reply via email to