Re: [PATCH 1/2] clk: abstract locking out into helper functions

2013-04-02 Thread Ulf Hansson
On 28 March 2013 21:59, Mike Turquette  wrote:
> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.
>
> Signed-off-by: Mike Turquette 
> Cc: Rajagopal Venkat 
> Cc: David Brown 
> Cc: Ulf Hansson 
> Tested-by: Laurent Pinchart 
> Reviewed-by: Thomas Gleixner 
> ---
> Changes since v5:
>  * pass flags by value instead of by reference in clk_enable_{un}lock
>
>  drivers/clk/clk.c |   99 
> +
>  1 file changed, 61 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8..0b5d612 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);
>  static LIST_HEAD(clk_notifier_list);
>
> +/***   locking ***/
> +static void clk_prepare_lock(void)
> +{
> +   mutex_lock(&prepare_lock);
> +}
> +
> +static void clk_prepare_unlock(void)
> +{
> +   mutex_unlock(&prepare_lock);
> +}
> +
> +static unsigned long clk_enable_lock(void)
> +{
> +   unsigned long flags;
> +   spin_lock_irqsave(&enable_lock, flags);
> +   return flags;
> +}
> +
> +static void clk_enable_unlock(unsigned long flags)
> +{
> +   spin_unlock_irqrestore(&enable_lock, flags);
> +}
> +
>  /***debugfs support***/
>
>  #ifdef CONFIG_COMMON_CLK_DEBUG
> @@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
> seq_printf(s, "   clockenable_cnt  
> prepare_cnt  rate\n");
> seq_printf(s, 
> "-\n");
>
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
>
> hlist_for_each_entry(c, &clk_root_list, child_node)
> clk_summary_show_subtree(s, c, 0);
> @@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
> hlist_for_each_entry(c, &clk_orphan_list, child_node)
> clk_summary_show_subtree(s, c, 0);
>
> -   mutex_unlock(&prepare_lock);
> +   clk_prepare_unlock();
>
> return 0;
>  }
> @@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
>
> seq_printf(s, "{");
>
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
>
> hlist_for_each_entry(c, &clk_root_list, child_node) {
> if (!first_node)
> @@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
> clk_dump_subtree(s, c, 0);
> }
>
> -   mutex_unlock(&prepare_lock);
> +   clk_prepare_unlock();
>
> seq_printf(s, "}");
> return 0;
> @@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
> if (!orphandir)
> return -ENOMEM;
>
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
>
> hlist_for_each_entry(clk, &clk_root_list, child_node)
> clk_debug_create_subtree(clk, rootdir);
> @@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
>
> inited = 1;
>
> -   mutex_unlock(&prepare_lock);
> +   clk_prepare_unlock();
>
> return 0;
>  }
> @@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
> hlist_for_each_entry(child, &clk->children, child_node)
> clk_disable_unused_subtree(child);
>
> -   spin_lock_irqsave(&enable_lock, flags);
> +   flags = clk_enable_lock();
>
> if (clk->enable_count)
> goto unlock_out;
> @@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
> }
>
>  unlock_out:
> -   spin_unlock_irqrestore(&enable_lock, flags);
> +   clk_enable_unlock(flags);
>
>  out:
> return;
> @@ -403,7 +426,7 @@ static int clk_disable_unused(void)
>  {
> struct clk *clk;
>
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
>
> hlist_for_each_entry(clk, &clk_root_list, child_node)
> clk_disable_unused_subtree(clk);
> @@ -417,7 +440,7 @@ static int clk_disable_unused(void)
> hlist_for_each_entry(clk, &clk_orphan_list, child_node)
> clk_unprepare_unused_subtree(clk);
>
> -   mutex_unlock(&prepare_lock);
> +   clk_prepare_unlock();
>
> return 0;
>  }
> @@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
>   */
>  void clk_unprepare(struct clk *clk)
>  {
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
> __clk_unprepare(clk);
> -   mutex_unlock(&prepare_lock);
> +   clk_prepare_unlock();
>  }
>  EXPORT_SYMBOL_GPL(clk_unprepare);
>
> @@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
>  {
> int ret;
>
> -   mutex_lock(&prepare_lock);
> +   clk_prepare_lock();
> ret = __clk_prepare(clk);
> -   mutex_unlock(&prepare_lock);
> +   clk

[PATCH 1/2] clk: abstract locking out into helper functions

2013-03-28 Thread Mike Turquette
Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette 
Cc: Rajagopal Venkat 
Cc: David Brown 
Cc: Ulf Hansson 
Tested-by: Laurent Pinchart 
Reviewed-by: Thomas Gleixner 
---
Changes since v5:
 * pass flags by value instead of by reference in clk_enable_{un}lock

 drivers/clk/clk.c |   99 +
 1 file changed, 61 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8..0b5d612 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***   locking ***/
+static void clk_prepare_lock(void)
+{
+   mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+   mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+   unsigned long flags;
+   spin_lock_irqsave(&enable_lock, flags);
+   return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+   spin_unlock_irqrestore(&enable_lock, flags);
+}
+
 /***debugfs support***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, "   clockenable_cnt  prepare_cnt  
rate\n");
seq_printf(s, 
"-\n");
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
seq_printf(s, "{");
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
seq_printf(s, "}");
return 0;
@@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
 
inited = 1;
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
 
-   spin_lock_irqsave(&enable_lock, flags);
+   flags = clk_enable_lock();
 
if (clk->enable_count)
goto unlock_out;
@@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
 
 unlock_out:
-   spin_unlock_irqrestore(&enable_lock, flags);
+   clk_enable_unlock(flags);
 
 out:
return;
@@ -403,7 +426,7 @@ static int clk_disable_unused(void)
 {
struct clk *clk;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@@ -417,7 +440,7 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
__clk_unprepare(clk);
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
 {
int ret;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
ret = __clk_prepare(clk);
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return ret;
 }
@@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
 {
unsigned long flags;
 
-   spin_lock_irqsave(&enable_lock, flags);
+   flags = clk_enable_lock();
__clk_disable(clk);
-   spin_unlock_irqrestore(&enable_lock, flags);
+   clk_enable_unlock(f

Re: [PATCH 1/2] clk: abstract locking out into helper functions

2013-03-28 Thread Thomas Gleixner
On Wed, 27 Mar 2013, Mike Turquette wrote:

> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.

This looks way better. Nitpick below.
 
> +static void clk_enable_lock(unsigned long *flags)
> +{
> + spin_lock_irqsave(&enable_lock, *flags);
> +}

> +static void clk_enable_unlock(unsigned long *flags)

Please just hand in the flags, no need for indirection.

> +{
> + spin_unlock_irqrestore(&enable_lock, *flags);
> +}
> +

Reviewed-by: Thomas Gleixner 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/2] clk: abstract locking out into helper functions

2013-03-27 Thread Mike Turquette
Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette 
Cc: Rajagopal Venkat 
Cc: David Brown 
Cc: Ulf Hansson 
Cc: Laurent Pinchart 
Cc: Thomas Gleixner 
---
 drivers/clk/clk.c |   97 -
 1 file changed, 59 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8..bea47d5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,27 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***   locking ***/
+static void clk_prepare_lock(void)
+{
+   mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+   mutex_unlock(&prepare_lock);
+}
+
+static void clk_enable_lock(unsigned long *flags)
+{
+   spin_lock_irqsave(&enable_lock, *flags);
+}
+
+static void clk_enable_unlock(unsigned long *flags)
+{
+   spin_unlock_irqrestore(&enable_lock, *flags);
+}
+
 /***debugfs support***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +90,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, "   clockenable_cnt  prepare_cnt  
rate\n");
seq_printf(s, 
"-\n");
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@@ -77,7 +98,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -130,7 +151,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
seq_printf(s, "{");
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@@ -144,7 +165,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
seq_printf(s, "}");
return 0;
@@ -316,7 +337,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +347,7 @@ static int __init clk_debug_init(void)
 
inited = 1;
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -372,7 +393,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
 
-   spin_lock_irqsave(&enable_lock, flags);
+   clk_enable_lock(&flags);
 
if (clk->enable_count)
goto unlock_out;
@@ -393,7 +414,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
 
 unlock_out:
-   spin_unlock_irqrestore(&enable_lock, flags);
+   clk_enable_unlock(&flags);
 
 out:
return;
@@ -403,7 +424,7 @@ static int clk_disable_unused(void)
 {
struct clk *clk;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
 
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@@ -417,7 +438,7 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
 
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return 0;
 }
@@ -600,9 +621,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
__clk_unprepare(clk);
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +669,9 @@ int clk_prepare(struct clk *clk)
 {
int ret;
 
-   mutex_lock(&prepare_lock);
+   clk_prepare_lock();
ret = __clk_prepare(clk);
-   mutex_unlock(&prepare_lock);
+   clk_prepare_unlock();
 
return ret;
 }
@@ -692,9 +713,9 @@ void clk_disable(struct clk *clk)
 {
unsigned long flags;
 
-   spin_lock_irqsave(&enable_lock, flags);
+   clk_enable_lock(&flags);
__clk_disable(clk);
-   spin_unlock_irqrestore(&enable_lock, flags);
+   clk_enable_unlock(&flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -745,9 +766,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
 
-