On 18/07/19 20:38, Eric Hankland wrote:
> From: ehankland <[email protected]>
> 
> Updates KVM_CAP_PMU_EVENT_FILTER so it can also whitelist or blacklist
> fixed counters.
> 
> Signed-off-by: ehankland <[email protected]>

Very nice, thanks.

There's no need to check the padding for zero (flags can be defined to
give specific meanings in the future to the padding fields), I removed
that loop and queued the patch.

Paolo

> ---
>  Documentation/virtual/kvm/api.txt | 13 ++++++++-----
>  arch/x86/include/uapi/asm/kvm.h   |  9 ++++++---
>  arch/x86/kvm/pmu.c                | 30 +++++++++++++++++++++++++-----
>  3 files changed, 39 insertions(+), 13 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt 
> b/Documentation/virtual/kvm/api.txt
> index 2cd6250b2896..96bcf1aa1931 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -4090,17 +4090,20 @@ Parameters: struct kvm_pmu_event_filter (in)
>  Returns: 0 on success, -1 on error
>  
>  struct kvm_pmu_event_filter {
> -       __u32 action;
> -       __u32 nevents;
> -       __u64 events[0];
> +     __u32 action;
> +     __u32 nevents;
> +     __u32 fixed_counter_bitmap;
> +     __u32 flags;
> +     __u32 pad[4];
> +     __u64 events[0];
>  };
>  
>  This ioctl restricts the set of PMU events that the guest can program.
>  The argument holds a list of events which will be allowed or denied.
>  The eventsel+umask of each event the guest attempts to program is compared
>  against the events field to determine whether the guest should have access.
> -This only affects general purpose counters; fixed purpose counters can
> -be disabled by changing the perfmon CPUID leaf.
> +The events field only controls general purpose counters; fixed purpose
> +counters are controlled by the fixed_counter_bitmap.
>  
>  Valid values for 'action':
>  #define KVM_PMU_EVENT_ALLOW 0
> diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
> index e901b0ab116f..503d3f42da16 100644
> --- a/arch/x86/include/uapi/asm/kvm.h
> +++ b/arch/x86/include/uapi/asm/kvm.h
> @@ -435,9 +435,12 @@ struct kvm_nested_state {
>  
>  /* for KVM_CAP_PMU_EVENT_FILTER */
>  struct kvm_pmu_event_filter {
> -       __u32 action;
> -       __u32 nevents;
> -       __u64 events[0];
> +     __u32 action;
> +     __u32 nevents;
> +     __u32 fixed_counter_bitmap;
> +     __u32 flags;
> +     __u32 pad[4];
> +     __u64 events[0];
>  };
>  
>  #define KVM_PMU_EVENT_ALLOW 0
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index aa5a2597305a..ae5cd1b02086 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -19,8 +19,8 @@
>  #include "lapic.h"
>  #include "pmu.h"
>  
> -/* This keeps the total size of the filter under 4k. */
> -#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63
> +/* This is enough to filter the vast majority of currently defined events. */
> +#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
>  
>  /* NOTE:
>   * - Each perf counter is defined as "struct kvm_pmc";
> @@ -206,12 +206,25 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 
> ctrl, int idx)
>  {
>       unsigned en_field = ctrl & 0x3;
>       bool pmi = ctrl & 0x8;
> +     struct kvm_pmu_event_filter *filter;
> +     struct kvm *kvm = pmc->vcpu->kvm;
> +
>  
>       pmc_stop_counter(pmc);
>  
>       if (!en_field || !pmc_is_enabled(pmc))
>               return;
>  
> +     filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
> +     if (filter) {
> +             if (filter->action == KVM_PMU_EVENT_DENY &&
> +                 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
> +                     return;
> +             if (filter->action == KVM_PMU_EVENT_ALLOW &&
> +                 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
> +                     return;
> +     }
> +
>       pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
>                             kvm_x86_ops->pmu_ops->find_fixed_event(idx),
>                             !(en_field & 0x2), /* exclude user */
> @@ -376,7 +389,7 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, 
> void __user *argp)
>  {
>       struct kvm_pmu_event_filter tmp, *filter;
>       size_t size;
> -     int r;
> +     int r, i;
>  
>       if (copy_from_user(&tmp, argp, sizeof(tmp)))
>               return -EFAULT;
> @@ -385,6 +398,13 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, 
> void __user *argp)
>           tmp.action != KVM_PMU_EVENT_DENY)
>               return -EINVAL;
>  
> +     if (tmp.flags != 0)
> +             return -EINVAL;
> +
> +     for (i = 0; i < ARRAY_SIZE(tmp.pad); i++)
> +             if (tmp.pad[i] != 0)
> +                     return -EINVAL;
> +
>       if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
>               return -E2BIG;
>  
> @@ -406,8 +426,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, 
> void __user *argp)
>       mutex_unlock(&kvm->lock);
>  
>       synchronize_srcu_expedited(&kvm->srcu);
> -     r = 0;
> +     r = 0;
>  cleanup:
>       kfree(filter);
> -     return r;
> +     return r;
>  }
> 

Reply via email to