Re: [rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-30 Thread Mike Galbraith
On Mon, 2018-07-30 at 11:27 +0200, Peter Zijlstra wrote:
> 
> The thing missing from the Changelog is the analysis that all the work
> done under these locks is indeed properly bounded and cannot cause
> excessive latencies.

True, I have no idea what worst case hold times are.  Nothing poked me
dead in the eye when looking around in completely alien code, nor did
cyclictest inspire concern running on box with no base of comparison.

I do know that latency is now < infinity, a modest improvement ;-)

-Mike


Re: [rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-30 Thread Mike Galbraith
On Mon, 2018-07-30 at 11:27 +0200, Peter Zijlstra wrote:
> 
> The thing missing from the Changelog is the analysis that all the work
> done under these locks is indeed properly bounded and cannot cause
> excessive latencies.

True, I have no idea what worst case hold times are.  Nothing poked me
dead in the eye when looking around in completely alien code, nor did
cyclictest inspire concern running on box with no base of comparison.

I do know that latency is now < infinity, a modest improvement ;-)

-Mike


Re: [rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-30 Thread Peter Zijlstra
On Sat, Jul 28, 2018 at 11:07:33AM +0200, Mike Galbraith wrote:
> 
> b103cc3f10c0 ("KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit")
> requires vgic_irq.irq_lock be converted to raw_spinlock_t.
> 
> Problem: kvm_preempt_ops.sched_in = kvm_sched_in;
>kvm_sched_in()
>   kvm_arch_vcpu_load()
>  kvm_timer_vcpu_load() <- b103cc3f10c0 addition
> kvm_timer_vcpu_load_gic()
>kvm_vgic_map_is_active()
>   spin_lock_irqsave(>irq_lock, flags);
> 
> Quoting virt/kvm/arm/vgic/vgic.c, locking order is...
> 
>   kvm->lock (mutex)
> its->cmd_lock (mutex)
>   its->its_lock (mutex)
> vgic_cpu->ap_list_lock must be taken with IRQs disabled
>   kvm->lpi_list_lock   must be taken with IRQs disabled
> vgic_irq->irq_lock must be taken with IRQs disabled
> 
> ...meaning vgic_dist.lpi_list_lock and vgic_cpu.ap_list_lock must be
> converted as well.

The thing missing from the Changelog is the analysis that all the work
done under these locks is indeed properly bounded and cannot cause
excessive latencies.


Re: [rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-30 Thread Peter Zijlstra
On Sat, Jul 28, 2018 at 11:07:33AM +0200, Mike Galbraith wrote:
> 
> b103cc3f10c0 ("KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit")
> requires vgic_irq.irq_lock be converted to raw_spinlock_t.
> 
> Problem: kvm_preempt_ops.sched_in = kvm_sched_in;
>kvm_sched_in()
>   kvm_arch_vcpu_load()
>  kvm_timer_vcpu_load() <- b103cc3f10c0 addition
> kvm_timer_vcpu_load_gic()
>kvm_vgic_map_is_active()
>   spin_lock_irqsave(>irq_lock, flags);
> 
> Quoting virt/kvm/arm/vgic/vgic.c, locking order is...
> 
>   kvm->lock (mutex)
> its->cmd_lock (mutex)
>   its->its_lock (mutex)
> vgic_cpu->ap_list_lock must be taken with IRQs disabled
>   kvm->lpi_list_lock   must be taken with IRQs disabled
> vgic_irq->irq_lock must be taken with IRQs disabled
> 
> ...meaning vgic_dist.lpi_list_lock and vgic_cpu.ap_list_lock must be
> converted as well.

The thing missing from the Changelog is the analysis that all the work
done under these locks is indeed properly bounded and cannot cause
excessive latencies.


[rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-28 Thread Mike Galbraith


b103cc3f10c0 ("KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit")
requires vgic_irq.irq_lock be converted to raw_spinlock_t.

Problem: kvm_preempt_ops.sched_in = kvm_sched_in;
   kvm_sched_in()
  kvm_arch_vcpu_load()
 kvm_timer_vcpu_load() <- b103cc3f10c0 addition
kvm_timer_vcpu_load_gic()
   kvm_vgic_map_is_active()
  spin_lock_irqsave(>irq_lock, flags);

Quoting virt/kvm/arm/vgic/vgic.c, locking order is...

  kvm->lock (mutex)
its->cmd_lock (mutex)
  its->its_lock (mutex)
vgic_cpu->ap_list_lock must be taken with IRQs disabled
  kvm->lpi_list_lock   must be taken with IRQs disabled
vgic_irq->irq_lock must be taken with IRQs disabled

...meaning vgic_dist.lpi_list_lock and vgic_cpu.ap_list_lock must be
converted as well.

Signed-off-by: Mike Galbraith 
---
 include/kvm/arm_vgic.h   |6 -
 virt/kvm/arm/vgic/vgic-debug.c   |4 -
 virt/kvm/arm/vgic/vgic-init.c|8 +-
 virt/kvm/arm/vgic/vgic-its.c |   22 +++
 virt/kvm/arm/vgic/vgic-mmio-v2.c |   14 ++--
 virt/kvm/arm/vgic/vgic-mmio-v3.c |   10 +--
 virt/kvm/arm/vgic/vgic-mmio.c|   34 +--
 virt/kvm/arm/vgic/vgic-v2.c  |4 -
 virt/kvm/arm/vgic/vgic-v3.c  |8 +-
 virt/kvm/arm/vgic/vgic.c |  120 +++
 10 files changed, 115 insertions(+), 115 deletions(-)

--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -96,7 +96,7 @@ enum vgic_irq_config {
 };
 
 struct vgic_irq {
-   spinlock_t irq_lock;/* Protects the content of the struct */
+   raw_spinlock_t irq_lock;/* Protects the content of the struct */
struct list_head lpi_list;  /* Used to link all LPIs together */
struct list_head ap_list;
 
@@ -243,7 +243,7 @@ struct vgic_dist {
u64 propbaser;
 
/* Protects the lpi_list and the count value below. */
-   spinlock_t  lpi_list_lock;
+   raw_spinlock_t  lpi_list_lock;
struct list_headlpi_list_head;
int lpi_list_count;
 
@@ -296,7 +296,7 @@ struct vgic_cpu {
unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
 
-   spinlock_t ap_list_lock;/* Protects the ap_list */
+   raw_spinlock_t ap_list_lock;/* Protects the ap_list */
 
/*
 * List of IRQs that this VCPU should consider because they are either
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -228,9 +228,9 @@ static int vgic_debug_show(struct seq_fi
irq = >arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
}
 
-   spin_lock_irqsave(>irq_lock, flags);
+   raw_spin_lock_irqsave(>irq_lock, flags);
print_irq_state(s, irq, vcpu);
-   spin_unlock_irqrestore(>irq_lock, flags);
+   raw_spin_unlock_irqrestore(>irq_lock, flags);
 
return 0;
 }
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm
struct vgic_dist *dist = >arch.vgic;
 
INIT_LIST_HEAD(>lpi_list_head);
-   spin_lock_init(>lpi_list_lock);
+   raw_spin_lock_init(>lpi_list_lock);
 }
 
 /**
@@ -80,7 +80,7 @@ void kvm_vgic_vcpu_early_init(struct kvm
int i;
 
INIT_LIST_HEAD(_cpu->ap_list_head);
-   spin_lock_init(_cpu->ap_list_lock);
+   raw_spin_lock_init(_cpu->ap_list_lock);
 
/*
 * Enable and configure all SGIs to be edge-triggered and
@@ -90,7 +90,7 @@ void kvm_vgic_vcpu_early_init(struct kvm
struct vgic_irq *irq = _cpu->private_irqs[i];
 
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
irq->intid = i;
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
@@ -214,7 +214,7 @@ static int kvm_vgic_dist_init(struct kvm
 
irq->intid = i + VGIC_NR_PRIVATE_IRQS;
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
kref_init(>refcount);
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,14 +65,14 @@ static struct vgic_irq *vgic_add_lpi(str
 
INIT_LIST_HEAD(>lpi_list);
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
 
irq->config = VGIC_CONFIG_EDGE;
kref_init(>refcount);
irq->intid = intid;
irq->target_vcpu = vcpu;
 
-   spin_lock_irqsave(>lpi_list_lock, flags);
+   raw_spin_lock_irqsave(>lpi_list_lock, flags);
 
/*
 * There could be a race with another vgic_add_lpi(), so we need to
@@ -100,7 +100,7 @@ static struct vgic_irq 

[rt-patch 3/3] arm, KVM: convert vgic_irq.irq_lock to raw_spinlock_t

2018-07-28 Thread Mike Galbraith


b103cc3f10c0 ("KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit")
requires vgic_irq.irq_lock be converted to raw_spinlock_t.

Problem: kvm_preempt_ops.sched_in = kvm_sched_in;
   kvm_sched_in()
  kvm_arch_vcpu_load()
 kvm_timer_vcpu_load() <- b103cc3f10c0 addition
kvm_timer_vcpu_load_gic()
   kvm_vgic_map_is_active()
  spin_lock_irqsave(>irq_lock, flags);

Quoting virt/kvm/arm/vgic/vgic.c, locking order is...

  kvm->lock (mutex)
its->cmd_lock (mutex)
  its->its_lock (mutex)
vgic_cpu->ap_list_lock must be taken with IRQs disabled
  kvm->lpi_list_lock   must be taken with IRQs disabled
vgic_irq->irq_lock must be taken with IRQs disabled

...meaning vgic_dist.lpi_list_lock and vgic_cpu.ap_list_lock must be
converted as well.

Signed-off-by: Mike Galbraith 
---
 include/kvm/arm_vgic.h   |6 -
 virt/kvm/arm/vgic/vgic-debug.c   |4 -
 virt/kvm/arm/vgic/vgic-init.c|8 +-
 virt/kvm/arm/vgic/vgic-its.c |   22 +++
 virt/kvm/arm/vgic/vgic-mmio-v2.c |   14 ++--
 virt/kvm/arm/vgic/vgic-mmio-v3.c |   10 +--
 virt/kvm/arm/vgic/vgic-mmio.c|   34 +--
 virt/kvm/arm/vgic/vgic-v2.c  |4 -
 virt/kvm/arm/vgic/vgic-v3.c  |8 +-
 virt/kvm/arm/vgic/vgic.c |  120 +++
 10 files changed, 115 insertions(+), 115 deletions(-)

--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -96,7 +96,7 @@ enum vgic_irq_config {
 };
 
 struct vgic_irq {
-   spinlock_t irq_lock;/* Protects the content of the struct */
+   raw_spinlock_t irq_lock;/* Protects the content of the struct */
struct list_head lpi_list;  /* Used to link all LPIs together */
struct list_head ap_list;
 
@@ -243,7 +243,7 @@ struct vgic_dist {
u64 propbaser;
 
/* Protects the lpi_list and the count value below. */
-   spinlock_t  lpi_list_lock;
+   raw_spinlock_t  lpi_list_lock;
struct list_headlpi_list_head;
int lpi_list_count;
 
@@ -296,7 +296,7 @@ struct vgic_cpu {
unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
 
-   spinlock_t ap_list_lock;/* Protects the ap_list */
+   raw_spinlock_t ap_list_lock;/* Protects the ap_list */
 
/*
 * List of IRQs that this VCPU should consider because they are either
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -228,9 +228,9 @@ static int vgic_debug_show(struct seq_fi
irq = >arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
}
 
-   spin_lock_irqsave(>irq_lock, flags);
+   raw_spin_lock_irqsave(>irq_lock, flags);
print_irq_state(s, irq, vcpu);
-   spin_unlock_irqrestore(>irq_lock, flags);
+   raw_spin_unlock_irqrestore(>irq_lock, flags);
 
return 0;
 }
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm
struct vgic_dist *dist = >arch.vgic;
 
INIT_LIST_HEAD(>lpi_list_head);
-   spin_lock_init(>lpi_list_lock);
+   raw_spin_lock_init(>lpi_list_lock);
 }
 
 /**
@@ -80,7 +80,7 @@ void kvm_vgic_vcpu_early_init(struct kvm
int i;
 
INIT_LIST_HEAD(_cpu->ap_list_head);
-   spin_lock_init(_cpu->ap_list_lock);
+   raw_spin_lock_init(_cpu->ap_list_lock);
 
/*
 * Enable and configure all SGIs to be edge-triggered and
@@ -90,7 +90,7 @@ void kvm_vgic_vcpu_early_init(struct kvm
struct vgic_irq *irq = _cpu->private_irqs[i];
 
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
irq->intid = i;
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
@@ -214,7 +214,7 @@ static int kvm_vgic_dist_init(struct kvm
 
irq->intid = i + VGIC_NR_PRIVATE_IRQS;
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
kref_init(>refcount);
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,14 +65,14 @@ static struct vgic_irq *vgic_add_lpi(str
 
INIT_LIST_HEAD(>lpi_list);
INIT_LIST_HEAD(>ap_list);
-   spin_lock_init(>irq_lock);
+   raw_spin_lock_init(>irq_lock);
 
irq->config = VGIC_CONFIG_EDGE;
kref_init(>refcount);
irq->intid = intid;
irq->target_vcpu = vcpu;
 
-   spin_lock_irqsave(>lpi_list_lock, flags);
+   raw_spin_lock_irqsave(>lpi_list_lock, flags);
 
/*
 * There could be a race with another vgic_add_lpi(), so we need to
@@ -100,7 +100,7 @@ static struct vgic_irq