Re: [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Chen Gang
On 02/05/2014 01:00 PM, Masami Hiramatsu wrote:
> (2014/02/05 12:36), Chen Gang wrote:
>> > When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
>> > so need move them to CONFIG_KPROBES area.
>> > 
>> >  - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
>> >  - define kretprobe_flush_task() to let kprobe_flush_task() call.
>> >  - define init_kretprobes() to let init_kprobes() call.
>> > 
>> > 
> Looks good to me ;)
> 
> Acked-by: Masami Hiramatsu 
> 

Thank you very much !!

:-)


>> > Signed-off-by: Chen Gang 
>> > ---
>> >  kernel/kprobes.c | 323 
>> > +++
>> >  1 file changed, 181 insertions(+), 142 deletions(-)
>> > 
>> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
>> > index ceeadfc..0619536 100644
>> > --- a/kernel/kprobes.c
>> > +++ b/kernel/kprobes.c
>> > @@ -69,7 +69,6 @@
>> >  
>> >  static int kprobes_initialized;
>> >  static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
>> > -static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>> >  
>> >  /* NOTE: change this value only with kprobe_mutex held */
>> >  static bool kprobes_all_disarmed;
>> > @@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
>> >  /* This protects kprobe_table and optimizing_list */
>> >  static DEFINE_MUTEX(kprobe_mutex);
>> >  static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
>> > -static struct {
>> > -  raw_spinlock_t lock cacheline_aligned_in_smp;
>> > -} kretprobe_table_locks[KPROBE_TABLE_SIZE];
>> > -
>> > -static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
>> > -{
>> > -  return &(kretprobe_table_locks[hash].lock);
>> > -}
>> >  
>> >  /*
>> >   * Normally, functions that we'd want to prohibit kprobes in, are marked
>> > @@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct 
>> > kprobe *p)
>> >return;
>> >  }
>> >  
>> > -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
>> > -  struct hlist_head *head)
>> > -{
>> > -  struct kretprobe *rp = ri->rp;
>> > -
>> > -  /* remove rp inst off the rprobe_inst_table */
>> > -  hlist_del(>hlist);
>> > -  INIT_HLIST_NODE(>hlist);
>> > -  if (likely(rp)) {
>> > -  raw_spin_lock(>lock);
>> > -  hlist_add_head(>hlist, >free_instances);
>> > -  raw_spin_unlock(>lock);
>> > -  } else
>> > -  /* Unregistering */
>> > -  hlist_add_head(>hlist, head);
>> > -}
>> > -
>> > -void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
>> > -   struct hlist_head **head, unsigned long *flags)
>> > -__acquires(hlist_lock)
>> > -{
>> > -  unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
>> > -  raw_spinlock_t *hlist_lock;
>> > -
>> > -  *head = _inst_table[hash];
>> > -  hlist_lock = kretprobe_table_lock_ptr(hash);
>> > -  raw_spin_lock_irqsave(hlist_lock, *flags);
>> > -}
>> > -
>> > -static void __kprobes kretprobe_table_lock(unsigned long hash,
>> > -  unsigned long *flags)
>> > -__acquires(hlist_lock)
>> > -{
>> > -  raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
>> > -  raw_spin_lock_irqsave(hlist_lock, *flags);
>> > -}
>> > -
>> > -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
>> > -  unsigned long *flags)
>> > -__releases(hlist_lock)
>> > -{
>> > -  unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
>> > -  raw_spinlock_t *hlist_lock;
>> > -
>> > -  hlist_lock = kretprobe_table_lock_ptr(hash);
>> > -  raw_spin_unlock_irqrestore(hlist_lock, *flags);
>> > -}
>> > -
>> > -static void __kprobes kretprobe_table_unlock(unsigned long hash,
>> > -   unsigned long *flags)
>> > -__releases(hlist_lock)
>> > -{
>> > -  raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
>> > -  raw_spin_unlock_irqrestore(hlist_lock, *flags);
>> > -}
>> > -
>> > -/*
>> > - * This function is called from finish_task_switch when task tk becomes 
>> > dead,
>> > - * so that we can recycle any function-return probe instances associated
>> > - * with this task. These left over instances represent probed functions
>> > - * that have been called but will never return.
>> > - */
>> > -void __kprobes kprobe_flush_task(struct task_struct *tk)
>> > -{
>> > -  struct kretprobe_instance *ri;
>> > -  struct hlist_head *head, empty_rp;
>> > -  struct hlist_node *tmp;
>> > -  unsigned long hash, flags = 0;
>> > -
>> > -  if (unlikely(!kprobes_initialized))
>> > -  /* Early boot.  kretprobe_table_locks not yet initialized. */
>> > -  return;
>> > -
>> > -  INIT_HLIST_HEAD(_rp);
>> > -  hash = hash_ptr(tk, KPROBE_HASH_BITS);
>> > -  head = _inst_table[hash];
>> > -  kretprobe_table_lock(hash, );
>> > -  hlist_for_each_entry_safe(ri, tmp, head, hlist) {
>> > -  if (ri->task == tk)
>> > -  recycle_rp_inst(ri, _rp);
>> > -  }
>> > -  kretprobe_table_unlock(hash, );
>> > -  hlist_for_each_entry_safe(ri, tmp, _rp, hlist) {
>> > -  hlist_del(>hlist);
>> > -  kfree(ri);
>> > -  }

Re: [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Masami Hiramatsu
(2014/02/05 12:36), Chen Gang wrote:
> When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
> so need move them to CONFIG_KPROBES area.
> 
>  - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
>  - define kretprobe_flush_task() to let kprobe_flush_task() call.
>  - define init_kretprobes() to let init_kprobes() call.
> 
> 

Looks good to me ;)

Acked-by: Masami Hiramatsu 

> Signed-off-by: Chen Gang 
> ---
>  kernel/kprobes.c | 323 
> +++
>  1 file changed, 181 insertions(+), 142 deletions(-)
> 
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index ceeadfc..0619536 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -69,7 +69,6 @@
>  
>  static int kprobes_initialized;
>  static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> -static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>  
>  /* NOTE: change this value only with kprobe_mutex held */
>  static bool kprobes_all_disarmed;
> @@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
>  /* This protects kprobe_table and optimizing_list */
>  static DEFINE_MUTEX(kprobe_mutex);
>  static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
> -static struct {
> - raw_spinlock_t lock cacheline_aligned_in_smp;
> -} kretprobe_table_locks[KPROBE_TABLE_SIZE];
> -
> -static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> -{
> - return &(kretprobe_table_locks[hash].lock);
> -}
>  
>  /*
>   * Normally, functions that we'd want to prohibit kprobes in, are marked
> @@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct 
> kprobe *p)
>   return;
>  }
>  
> -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
> - struct hlist_head *head)
> -{
> - struct kretprobe *rp = ri->rp;
> -
> - /* remove rp inst off the rprobe_inst_table */
> - hlist_del(>hlist);
> - INIT_HLIST_NODE(>hlist);
> - if (likely(rp)) {
> - raw_spin_lock(>lock);
> - hlist_add_head(>hlist, >free_instances);
> - raw_spin_unlock(>lock);
> - } else
> - /* Unregistering */
> - hlist_add_head(>hlist, head);
> -}
> -
> -void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
> -  struct hlist_head **head, unsigned long *flags)
> -__acquires(hlist_lock)
> -{
> - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> - raw_spinlock_t *hlist_lock;
> -
> - *head = _inst_table[hash];
> - hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_lock_irqsave(hlist_lock, *flags);
> -}
> -
> -static void __kprobes kretprobe_table_lock(unsigned long hash,
> - unsigned long *flags)
> -__acquires(hlist_lock)
> -{
> - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_lock_irqsave(hlist_lock, *flags);
> -}
> -
> -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
> - unsigned long *flags)
> -__releases(hlist_lock)
> -{
> - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> - raw_spinlock_t *hlist_lock;
> -
> - hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_unlock_irqrestore(hlist_lock, *flags);
> -}
> -
> -static void __kprobes kretprobe_table_unlock(unsigned long hash,
> -   unsigned long *flags)
> -__releases(hlist_lock)
> -{
> - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_unlock_irqrestore(hlist_lock, *flags);
> -}
> -
> -/*
> - * This function is called from finish_task_switch when task tk becomes dead,
> - * so that we can recycle any function-return probe instances associated
> - * with this task. These left over instances represent probed functions
> - * that have been called but will never return.
> - */
> -void __kprobes kprobe_flush_task(struct task_struct *tk)
> -{
> - struct kretprobe_instance *ri;
> - struct hlist_head *head, empty_rp;
> - struct hlist_node *tmp;
> - unsigned long hash, flags = 0;
> -
> - if (unlikely(!kprobes_initialized))
> - /* Early boot.  kretprobe_table_locks not yet initialized. */
> - return;
> -
> - INIT_HLIST_HEAD(_rp);
> - hash = hash_ptr(tk, KPROBE_HASH_BITS);
> - head = _inst_table[hash];
> - kretprobe_table_lock(hash, );
> - hlist_for_each_entry_safe(ri, tmp, head, hlist) {
> - if (ri->task == tk)
> - recycle_rp_inst(ri, _rp);
> - }
> - kretprobe_table_unlock(hash, );
> - hlist_for_each_entry_safe(ri, tmp, _rp, hlist) {
> - hlist_del(>hlist);
> - kfree(ri);
> - }
> -}
> -
> -static inline void free_rp_inst(struct kretprobe *rp)
> -{
> - struct kretprobe_instance *ri;
> - struct hlist_node *next;
> -
> - hlist_for_each_entry_safe(ri, next, >free_instances, hlist) {
> - hlist_del(>hlist);
> - kfree(ri);
> - }
> -}
> -
> -static void __kprobes 

[PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Chen Gang
When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
so need move them to CONFIG_KPROBES area.

 - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
 - define kretprobe_flush_task() to let kprobe_flush_task() call.
 - define init_kretprobes() to let init_kprobes() call.


Signed-off-by: Chen Gang 
---
 kernel/kprobes.c | 323 +++
 1 file changed, 181 insertions(+), 142 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ceeadfc..0619536 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -69,7 +69,6 @@
 
 static int kprobes_initialized;
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
-static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
 /* NOTE: change this value only with kprobe_mutex held */
 static bool kprobes_all_disarmed;
@@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
 /* This protects kprobe_table and optimizing_list */
 static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct {
-   raw_spinlock_t lock cacheline_aligned_in_smp;
-} kretprobe_table_locks[KPROBE_TABLE_SIZE];
-
-static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-{
-   return &(kretprobe_table_locks[hash].lock);
-}
 
 /*
  * Normally, functions that we'd want to prohibit kprobes in, are marked
@@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe 
*p)
return;
 }
 
-void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
-   struct hlist_head *head)
-{
-   struct kretprobe *rp = ri->rp;
-
-   /* remove rp inst off the rprobe_inst_table */
-   hlist_del(>hlist);
-   INIT_HLIST_NODE(>hlist);
-   if (likely(rp)) {
-   raw_spin_lock(>lock);
-   hlist_add_head(>hlist, >free_instances);
-   raw_spin_unlock(>lock);
-   } else
-   /* Unregistering */
-   hlist_add_head(>hlist, head);
-}
-
-void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
-struct hlist_head **head, unsigned long *flags)
-__acquires(hlist_lock)
-{
-   unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-   raw_spinlock_t *hlist_lock;
-
-   *head = _inst_table[hash];
-   hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-
-static void __kprobes kretprobe_table_lock(unsigned long hash,
-   unsigned long *flags)
-__acquires(hlist_lock)
-{
-   raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-
-void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
-   unsigned long *flags)
-__releases(hlist_lock)
-{
-   unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-   raw_spinlock_t *hlist_lock;
-
-   hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-
-static void __kprobes kretprobe_table_unlock(unsigned long hash,
-   unsigned long *flags)
-__releases(hlist_lock)
-{
-   raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-
-/*
- * This function is called from finish_task_switch when task tk becomes dead,
- * so that we can recycle any function-return probe instances associated
- * with this task. These left over instances represent probed functions
- * that have been called but will never return.
- */
-void __kprobes kprobe_flush_task(struct task_struct *tk)
-{
-   struct kretprobe_instance *ri;
-   struct hlist_head *head, empty_rp;
-   struct hlist_node *tmp;
-   unsigned long hash, flags = 0;
-
-   if (unlikely(!kprobes_initialized))
-   /* Early boot.  kretprobe_table_locks not yet initialized. */
-   return;
-
-   INIT_HLIST_HEAD(_rp);
-   hash = hash_ptr(tk, KPROBE_HASH_BITS);
-   head = _inst_table[hash];
-   kretprobe_table_lock(hash, );
-   hlist_for_each_entry_safe(ri, tmp, head, hlist) {
-   if (ri->task == tk)
-   recycle_rp_inst(ri, _rp);
-   }
-   kretprobe_table_unlock(hash, );
-   hlist_for_each_entry_safe(ri, tmp, _rp, hlist) {
-   hlist_del(>hlist);
-   kfree(ri);
-   }
-}
-
-static inline void free_rp_inst(struct kretprobe *rp)
-{
-   struct kretprobe_instance *ri;
-   struct hlist_node *next;
-
-   hlist_for_each_entry_safe(ri, next, >free_instances, hlist) {
-   hlist_del(>hlist);
-   kfree(ri);
-   }
-}
-
-static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
-{
-   unsigned long flags, hash;
-   struct kretprobe_instance *ri;
-   struct hlist_node *next;
-   struct hlist_head *head;
-
-   /* No race here */
-   for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
- 

[PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Chen Gang
When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
so need move them to CONFIG_KPROBES area.

 - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
 - define kretprobe_flush_task() to let kprobe_flush_task() call.
 - define init_kretprobes() to let init_kprobes() call.


Signed-off-by: Chen Gang gang.chen.5...@gmail.com
---
 kernel/kprobes.c | 323 +++
 1 file changed, 181 insertions(+), 142 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ceeadfc..0619536 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -69,7 +69,6 @@
 
 static int kprobes_initialized;
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
-static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
 /* NOTE: change this value only with kprobe_mutex held */
 static bool kprobes_all_disarmed;
@@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
 /* This protects kprobe_table and optimizing_list */
 static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct {
-   raw_spinlock_t lock cacheline_aligned_in_smp;
-} kretprobe_table_locks[KPROBE_TABLE_SIZE];
-
-static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-{
-   return (kretprobe_table_locks[hash].lock);
-}
 
 /*
  * Normally, functions that we'd want to prohibit kprobes in, are marked
@@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe 
*p)
return;
 }
 
-void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
-   struct hlist_head *head)
-{
-   struct kretprobe *rp = ri-rp;
-
-   /* remove rp inst off the rprobe_inst_table */
-   hlist_del(ri-hlist);
-   INIT_HLIST_NODE(ri-hlist);
-   if (likely(rp)) {
-   raw_spin_lock(rp-lock);
-   hlist_add_head(ri-hlist, rp-free_instances);
-   raw_spin_unlock(rp-lock);
-   } else
-   /* Unregistering */
-   hlist_add_head(ri-hlist, head);
-}
-
-void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
-struct hlist_head **head, unsigned long *flags)
-__acquires(hlist_lock)
-{
-   unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-   raw_spinlock_t *hlist_lock;
-
-   *head = kretprobe_inst_table[hash];
-   hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-
-static void __kprobes kretprobe_table_lock(unsigned long hash,
-   unsigned long *flags)
-__acquires(hlist_lock)
-{
-   raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-
-void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
-   unsigned long *flags)
-__releases(hlist_lock)
-{
-   unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-   raw_spinlock_t *hlist_lock;
-
-   hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-
-static void __kprobes kretprobe_table_unlock(unsigned long hash,
-   unsigned long *flags)
-__releases(hlist_lock)
-{
-   raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-   raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-
-/*
- * This function is called from finish_task_switch when task tk becomes dead,
- * so that we can recycle any function-return probe instances associated
- * with this task. These left over instances represent probed functions
- * that have been called but will never return.
- */
-void __kprobes kprobe_flush_task(struct task_struct *tk)
-{
-   struct kretprobe_instance *ri;
-   struct hlist_head *head, empty_rp;
-   struct hlist_node *tmp;
-   unsigned long hash, flags = 0;
-
-   if (unlikely(!kprobes_initialized))
-   /* Early boot.  kretprobe_table_locks not yet initialized. */
-   return;
-
-   INIT_HLIST_HEAD(empty_rp);
-   hash = hash_ptr(tk, KPROBE_HASH_BITS);
-   head = kretprobe_inst_table[hash];
-   kretprobe_table_lock(hash, flags);
-   hlist_for_each_entry_safe(ri, tmp, head, hlist) {
-   if (ri-task == tk)
-   recycle_rp_inst(ri, empty_rp);
-   }
-   kretprobe_table_unlock(hash, flags);
-   hlist_for_each_entry_safe(ri, tmp, empty_rp, hlist) {
-   hlist_del(ri-hlist);
-   kfree(ri);
-   }
-}
-
-static inline void free_rp_inst(struct kretprobe *rp)
-{
-   struct kretprobe_instance *ri;
-   struct hlist_node *next;
-
-   hlist_for_each_entry_safe(ri, next, rp-free_instances, hlist) {
-   hlist_del(ri-hlist);
-   kfree(ri);
-   }
-}
-
-static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
-{
-   unsigned long flags, hash;
-   struct kretprobe_instance *ri;
-   struct hlist_node *next;
-   struct hlist_head *head;
-
-   /* No 

Re: [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Masami Hiramatsu
(2014/02/05 12:36), Chen Gang wrote:
 When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
 so need move them to CONFIG_KPROBES area.
 
  - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
  - define kretprobe_flush_task() to let kprobe_flush_task() call.
  - define init_kretprobes() to let init_kprobes() call.
 
 

Looks good to me ;)

Acked-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com

 Signed-off-by: Chen Gang gang.chen.5...@gmail.com
 ---
  kernel/kprobes.c | 323 
 +++
  1 file changed, 181 insertions(+), 142 deletions(-)
 
 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
 index ceeadfc..0619536 100644
 --- a/kernel/kprobes.c
 +++ b/kernel/kprobes.c
 @@ -69,7 +69,6 @@
  
  static int kprobes_initialized;
  static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 -static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  
  /* NOTE: change this value only with kprobe_mutex held */
  static bool kprobes_all_disarmed;
 @@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
  /* This protects kprobe_table and optimizing_list */
  static DEFINE_MUTEX(kprobe_mutex);
  static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 -static struct {
 - raw_spinlock_t lock cacheline_aligned_in_smp;
 -} kretprobe_table_locks[KPROBE_TABLE_SIZE];
 -
 -static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 -{
 - return (kretprobe_table_locks[hash].lock);
 -}
  
  /*
   * Normally, functions that we'd want to prohibit kprobes in, are marked
 @@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct 
 kprobe *p)
   return;
  }
  
 -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
 - struct hlist_head *head)
 -{
 - struct kretprobe *rp = ri-rp;
 -
 - /* remove rp inst off the rprobe_inst_table */
 - hlist_del(ri-hlist);
 - INIT_HLIST_NODE(ri-hlist);
 - if (likely(rp)) {
 - raw_spin_lock(rp-lock);
 - hlist_add_head(ri-hlist, rp-free_instances);
 - raw_spin_unlock(rp-lock);
 - } else
 - /* Unregistering */
 - hlist_add_head(ri-hlist, head);
 -}
 -
 -void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
 -  struct hlist_head **head, unsigned long *flags)
 -__acquires(hlist_lock)
 -{
 - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
 - raw_spinlock_t *hlist_lock;
 -
 - *head = kretprobe_inst_table[hash];
 - hlist_lock = kretprobe_table_lock_ptr(hash);
 - raw_spin_lock_irqsave(hlist_lock, *flags);
 -}
 -
 -static void __kprobes kretprobe_table_lock(unsigned long hash,
 - unsigned long *flags)
 -__acquires(hlist_lock)
 -{
 - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
 - raw_spin_lock_irqsave(hlist_lock, *flags);
 -}
 -
 -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
 - unsigned long *flags)
 -__releases(hlist_lock)
 -{
 - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
 - raw_spinlock_t *hlist_lock;
 -
 - hlist_lock = kretprobe_table_lock_ptr(hash);
 - raw_spin_unlock_irqrestore(hlist_lock, *flags);
 -}
 -
 -static void __kprobes kretprobe_table_unlock(unsigned long hash,
 -   unsigned long *flags)
 -__releases(hlist_lock)
 -{
 - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
 - raw_spin_unlock_irqrestore(hlist_lock, *flags);
 -}
 -
 -/*
 - * This function is called from finish_task_switch when task tk becomes dead,
 - * so that we can recycle any function-return probe instances associated
 - * with this task. These left over instances represent probed functions
 - * that have been called but will never return.
 - */
 -void __kprobes kprobe_flush_task(struct task_struct *tk)
 -{
 - struct kretprobe_instance *ri;
 - struct hlist_head *head, empty_rp;
 - struct hlist_node *tmp;
 - unsigned long hash, flags = 0;
 -
 - if (unlikely(!kprobes_initialized))
 - /* Early boot.  kretprobe_table_locks not yet initialized. */
 - return;
 -
 - INIT_HLIST_HEAD(empty_rp);
 - hash = hash_ptr(tk, KPROBE_HASH_BITS);
 - head = kretprobe_inst_table[hash];
 - kretprobe_table_lock(hash, flags);
 - hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 - if (ri-task == tk)
 - recycle_rp_inst(ri, empty_rp);
 - }
 - kretprobe_table_unlock(hash, flags);
 - hlist_for_each_entry_safe(ri, tmp, empty_rp, hlist) {
 - hlist_del(ri-hlist);
 - kfree(ri);
 - }
 -}
 -
 -static inline void free_rp_inst(struct kretprobe *rp)
 -{
 - struct kretprobe_instance *ri;
 - struct hlist_node *next;
 -
 - hlist_for_each_entry_safe(ri, next, rp-free_instances, hlist) {
 - hlist_del(ri-hlist);
 - kfree(ri);
 - }
 -}
 -
 -static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
 -{
 -   

Re: [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area

2014-02-04 Thread Chen Gang
On 02/05/2014 01:00 PM, Masami Hiramatsu wrote:
 (2014/02/05 12:36), Chen Gang wrote:
  When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
  so need move them to CONFIG_KPROBES area.
  
   - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
   - define kretprobe_flush_task() to let kprobe_flush_task() call.
   - define init_kretprobes() to let init_kprobes() call.
  
  
 Looks good to me ;)
 
 Acked-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com
 

Thank you very much !!

:-)


  Signed-off-by: Chen Gang gang.chen.5...@gmail.com
  ---
   kernel/kprobes.c | 323 
  +++
   1 file changed, 181 insertions(+), 142 deletions(-)
  
  diff --git a/kernel/kprobes.c b/kernel/kprobes.c
  index ceeadfc..0619536 100644
  --- a/kernel/kprobes.c
  +++ b/kernel/kprobes.c
  @@ -69,7 +69,6 @@
   
   static int kprobes_initialized;
   static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  -static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
   
   /* NOTE: change this value only with kprobe_mutex held */
   static bool kprobes_all_disarmed;
  @@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
   /* This protects kprobe_table and optimizing_list */
   static DEFINE_MUTEX(kprobe_mutex);
   static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  -static struct {
  -  raw_spinlock_t lock cacheline_aligned_in_smp;
  -} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  -
  -static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  -{
  -  return (kretprobe_table_locks[hash].lock);
  -}
   
   /*
* Normally, functions that we'd want to prohibit kprobes in, are marked
  @@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct 
  kprobe *p)
 return;
   }
   
  -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  -  struct hlist_head *head)
  -{
  -  struct kretprobe *rp = ri-rp;
  -
  -  /* remove rp inst off the rprobe_inst_table */
  -  hlist_del(ri-hlist);
  -  INIT_HLIST_NODE(ri-hlist);
  -  if (likely(rp)) {
  -  raw_spin_lock(rp-lock);
  -  hlist_add_head(ri-hlist, rp-free_instances);
  -  raw_spin_unlock(rp-lock);
  -  } else
  -  /* Unregistering */
  -  hlist_add_head(ri-hlist, head);
  -}
  -
  -void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
  -   struct hlist_head **head, unsigned long *flags)
  -__acquires(hlist_lock)
  -{
  -  unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  -  raw_spinlock_t *hlist_lock;
  -
  -  *head = kretprobe_inst_table[hash];
  -  hlist_lock = kretprobe_table_lock_ptr(hash);
  -  raw_spin_lock_irqsave(hlist_lock, *flags);
  -}
  -
  -static void __kprobes kretprobe_table_lock(unsigned long hash,
  -  unsigned long *flags)
  -__acquires(hlist_lock)
  -{
  -  raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  -  raw_spin_lock_irqsave(hlist_lock, *flags);
  -}
  -
  -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  -  unsigned long *flags)
  -__releases(hlist_lock)
  -{
  -  unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  -  raw_spinlock_t *hlist_lock;
  -
  -  hlist_lock = kretprobe_table_lock_ptr(hash);
  -  raw_spin_unlock_irqrestore(hlist_lock, *flags);
  -}
  -
  -static void __kprobes kretprobe_table_unlock(unsigned long hash,
  -   unsigned long *flags)
  -__releases(hlist_lock)
  -{
  -  raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  -  raw_spin_unlock_irqrestore(hlist_lock, *flags);
  -}
  -
  -/*
  - * This function is called from finish_task_switch when task tk becomes 
  dead,
  - * so that we can recycle any function-return probe instances associated
  - * with this task. These left over instances represent probed functions
  - * that have been called but will never return.
  - */
  -void __kprobes kprobe_flush_task(struct task_struct *tk)
  -{
  -  struct kretprobe_instance *ri;
  -  struct hlist_head *head, empty_rp;
  -  struct hlist_node *tmp;
  -  unsigned long hash, flags = 0;
  -
  -  if (unlikely(!kprobes_initialized))
  -  /* Early boot.  kretprobe_table_locks not yet initialized. */
  -  return;
  -
  -  INIT_HLIST_HEAD(empty_rp);
  -  hash = hash_ptr(tk, KPROBE_HASH_BITS);
  -  head = kretprobe_inst_table[hash];
  -  kretprobe_table_lock(hash, flags);
  -  hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  -  if (ri-task == tk)
  -  recycle_rp_inst(ri, empty_rp);
  -  }
  -  kretprobe_table_unlock(hash, flags);
  -  hlist_for_each_entry_safe(ri, tmp, empty_rp, hlist) {
  -  hlist_del(ri-hlist);
  -  kfree(ri);
  -  }
  -}
  -
  -static inline void free_rp_inst(struct kretprobe *rp)
  -{
  -  struct kretprobe_instance *ri;
  -  struct hlist_node *next;
  -
  -  hlist_for_each_entry_safe(ri, next, rp-free_instances, hlist) {
  -  hlist_del(ri-hlist);
  -  kfree(ri);
  -  }
  -}
  -
  -static