Currently, perf_event_period() is used by user tools via ioctl. Based on
naming convention, exporting perf_event_period() for kernel users (such
as KVM) who may recalibrate the event period for their assigned counter
according to their requirements.

The perf_event_period() is an external accessor, just like the
perf_event_{en,dis}able() and should thus use perf_event_ctx_lock().

Suggested-by: Kan Liang <kan.li...@linux.intel.com>
Signed-off-by: Like Xu <like...@linux.intel.com>
---
 include/linux/perf_event.h |  5 +++++
 kernel/events/core.c       | 28 +++++++++++++++++++++-------
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61448c19a132..d601df36e671 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1336,6 +1336,7 @@ extern void perf_event_disable_local(struct perf_event 
*event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
 extern int perf_event_account_interrupt(struct perf_event *event);
+extern int perf_event_period(struct perf_event *event, u64 value);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
@@ -1415,6 +1416,10 @@ static inline void perf_event_disable(struct perf_event 
*event)          { }
 static inline int __perf_event_disable(void *info)                     { 
return -1; }
 static inline void perf_event_task_tick(void)                          { }
 static inline int perf_event_release_kernel(struct perf_event *event)  { 
return 0; }
+static inline int perf_event_period(struct perf_event *event, u64 value)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9ec0b0bfddbd..e1b83d2731da 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5106,16 +5106,11 @@ static int perf_event_check_period(struct perf_event 
*event, u64 value)
        return event->pmu->check_period(event, value);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
+static int _perf_event_period(struct perf_event *event, u64 value)
 {
-       u64 value;
-
        if (!is_sampling_event(event))
                return -EINVAL;
 
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
        if (!value)
                return -EINVAL;
 
@@ -5133,6 +5128,19 @@ static int perf_event_period(struct perf_event *event, 
u64 __user *arg)
        return 0;
 }
 
+int perf_event_period(struct perf_event *event, u64 value)
+{
+       struct perf_event_context *ctx;
+       int ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = _perf_event_period(event, value);
+       perf_event_ctx_unlock(event, ctx);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(perf_event_period);
+
 static const struct file_operations perf_fops;
 
 static inline int perf_fget_light(int fd, struct fd *p)
@@ -5176,8 +5184,14 @@ static long _perf_ioctl(struct perf_event *event, 
unsigned int cmd, unsigned lon
                return _perf_event_refresh(event, arg);
 
        case PERF_EVENT_IOC_PERIOD:
-               return perf_event_period(event, (u64 __user *)arg);
+       {
+               u64 value;
+
+               if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
+                       return -EFAULT;
 
+               return _perf_event_period(event, value);
+       }
        case PERF_EVENT_IOC_ID:
        {
                u64 id = primary_event_id(event);
-- 
2.21.0

Reply via email to