Re: [PATCH v3 2/2] arm64: Add software workaround for Falkor erratum 1041

2017-11-14 Thread Stephen Boyd
On 11/12, Shanker Donthineni wrote:
> The ARM architecture defines the memory locations that are permitted
> to be accessed as the result of a speculative instruction fetch from
> an exception level for which all stages of translation are disabled.
> Specifically, the core is permitted to speculatively fetch from the
> 4KB region containing the current program counter 4K and next 4K.
> 
> When translation is changed from enabled to disabled for the running
> exception level (SCTLR_ELn[M] changed from a value of 1 to 0), the
> Falkor core may errantly speculatively access memory locations outside
> of the 4KB region permitted by the architecture. The errant memory
> access may lead to one of the following unexpected behaviors.
> 
> 1) A System Error Interrupt (SEI) being raised by the Falkor core due
>to the errant memory access attempting to access a region of memory
>that is protected by a slave-side memory protection unit.
> 2) Unpredictable device behavior due to a speculative read from device
>memory. This behavior may only occur if the instruction cache is
>disabled prior to or coincident with translation being changed from
>enabled to disabled.
> 
> The conditions leading to this erratum will not occur when either of the
> following occur:
>  1) A higher exception level disables translation of a lower exception level
>(e.g. EL2 changing SCTLR_EL1[M] from a value of 1 to 0).
>  2) An exception level disabling its stage-1 translation if its stage-2
> translation is enabled (e.g. EL1 changing SCTLR_EL1[M] from a value of 1
> to 0 when HCR_EL2[VM] has a value of 1).
> 
> To avoid the errant behavior, software must execute an ISB immediately
> prior to executing the MSR that will change SCTLR_ELn[M] from 1 to 0.
> 

This also applies to Kryo CPUs. I have a patch[1] for the 1003
Falkor errata that adds the Kryo MIDR check which can also be
used for this errata.

[1] https://patchwork.kernel.org/patch/10048987/

-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] arm64: KVM: fix VTTBR_BADDR_MASK BUG_ON off-by-one

2017-11-14 Thread Kristina Martsenko
VTTBR_BADDR_MASK is used to sanity check the size and alignment of the
VTTBR address. It seems to currently be off by one, thereby only
allowing up to 47-bit addresses (instead of 48-bit) and also
insufficiently checking the alignment. This patch fixes it.

As an example, with 4k pages, before this patch we have:

  PHYS_MASK_SHIFT = 48
  VTTBR_X = 37 - 24 = 13
  VTTBR_BADDR_SHIFT = 13 - 1 = 12
  VTTBR_BADDR_MASK = ((1 << 35) - 1) << 12 = 0x7000

Which is wrong, because the mask doesn't allow bit 47 of the VTTBR
address to be set, and only requires the address to be 12-bit (4k)
aligned, while it actually needs to be 13-bit (8k) aligned because we
concatenate two 4k tables.

With this patch, the mask becomes 0xe000, which is what we
want.

Fixes: 0369f6a34b9f ("arm64: KVM: EL2 register definitions")
Cc:  # 3.11.x
Reviewed-by: Suzuki K Poulose 
Signed-off-by: Kristina Martsenko 
---
 arch/arm64/include/asm/kvm_arm.h | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 61d694c2eae5..555d463c0eaa 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
 #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | 
VTCR_EL2_TGRAN_FLAGS)
 #define VTTBR_X(VTTBR_X_TGRAN_MAGIC - 
VTCR_EL2_T0SZ_IPA)
 
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << 
VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << 
VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 10/37] KVM: arm64: Slightly improve debug save/restore functions

2017-11-14 Thread Julien Thierry

Hi Christopher,

On 12/10/17 11:41, Christoffer Dall wrote:

The debug save/restore functions can be improved by using the has_vhe()
static key instead of the instruction alternative.  Using the static key
uses the same paradigm as we're going to use elsewhere, it makes the
code more readable, and it generates slightly better code (no
stack setups and function calls unless necessary).

We also use a static key on the restore path, because it will be
marginally faster than loading a value from memory.

Finally, we don't have to conditionally clear the debug dirty flag if
it's set, we can just clear it.

Signed-off-by: Christoffer Dall 
---
  arch/arm64/kvm/hyp/debug-sr.c | 22 +-
  1 file changed, 9 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc0758..a2291b6 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -75,11 +75,6 @@
  
  #define psb_csync()		asm volatile("hint #17")
  
-static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)

-{
-   /* The vcpu can run. but it can't hide. */
-}
-
  static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
  {
u64 reg;
@@ -109,10 +104,6 @@ static void __hyp_text __debug_save_spe_nvhe(u64 
*pmscr_el1)
dsb(nsh);
  }
  
-static hyp_alternate_select(__debug_save_spe,

-   __debug_save_spe_nvhe, __debug_save_spe_vhe,
-   ARM64_HAS_VIRT_HOST_EXTN);
-
  static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
  {
if (!pmscr_el1)
@@ -174,17 +165,22 @@ void __hyp_text __debug_cond_save_host_state(struct 
kvm_vcpu *vcpu)
  {
__debug_save_state(vcpu, >arch.host_debug_state.regs,
   kern_hyp_va(vcpu->arch.host_cpu_context));
-   __debug_save_spe()(>arch.host_debug_state.pmscr_el1);
+
+   /* Non-VHE: Disable and flush SPE data generation
+* VHE: The vcpu can run. but it can't hide. */
+   if (!has_vhe())
+   __debug_save_spe_nvhe(>arch.host_debug_state.pmscr_el1);
  }
  
  void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)

  {
-   __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
+   if (!has_vhe())
+   __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);


For consistency, would it be worth naming that function 
'__debug_restore_spe_nvhe' ?


Also, looking at __debug_save_spe_nvhe, I'm not sure how we guarantee 
that we might not end up using stale data during the restore_spe 
(though, if this is an issue, it existed before this change).

The save function might exit without setting a value to saved pmscr_el1.

Basically I'm wondering if the following scenario (in non VHE) is 
possible and/or whether it is problematic:


- save spe
- restore spe
- host starts using spi -> !(PMBLIMITR_EL1 & PMBLIMITR_EL1_E)
- save spe -> returns early without setting pmscr_el1
- restore spe with old save instead of doing nothing


Cheers,

--
Julien Thierry
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v4 00/21] SError rework + RAS for firmware first support

2017-11-14 Thread James Morse
Hi Drew,

On 13/11/17 16:14, Andrew Jones wrote:
> On Mon, Nov 13, 2017 at 12:29:46PM +0100, Christoffer Dall wrote:
>> On Thu, Nov 09, 2017 at 06:14:56PM +, James Morse wrote:
>>> On 19/10/17 15:57, James Morse wrote:
 Known issues:
  * KVM-Migration: VDISR_EL2 is exposed to userspace as DISR_EL1, but how 
 should
HCR_EL2.VSE or VSESR_EL2 be migrated when the guest has an SError 
 pending but
hasn't taken it yet...?
>>>
>>> I've been trying to work out how this pending-SError-migration could work.

[..]

>>> To get out of this corner: why not declare pending-SError-migration an 
>>> invalid
>>> thing to do?
>>
>> To answer that question we'd have to know if that is generally a valid
>> thing to require.  How will higher level tools in the stack deal with
>> this (e.g. libvirt, and OpenStack).  Is it really valid to tell them
>> "nope, can't migrate right now".  I'm thinking if you have a failing
>> host and want to signal some error to the guest, that's probably a
>> really good time to migrate your mission-critical VM away to a different
>> host, and being told, "sorry, cannot do this" would be painful.  I'm
>> cc'ing Drew for his insight into libvirt and how this is done on x86,
>> but I'm not really crazy about this idea.

> Without actually confirming, I'm pretty sure it's handled with a best
> effort to cancel the migration, continuing/restoring execution on the
> source host (or there may be other policies that could be set as well).
> Naturally, if the source host is going down and the migration is
> cancelled, then the VM goes down too...

> Anyway, I don't think we would generally want to introduce guest
> controlled migration blockers. IIUC, this migration blocker would remain
> until the guest handled the SError, which it may never unmask.

Yes, given the guest can influence this it needs exposing so it can be migrated.


[...]

>> My suggestion would be to add some set of VCPU exception state,
>> potentially as flags, which can be migrated along with the VM, or at
>> least used by userspace to query the state of the VM, if there exists a
>> reliable mechanism to restore the state again without any side effects.
>>
>> I think we have to comb through Documentation/virtual/kvm/api.txt to see
>> if we can reuse anything, and if not, add something.  We could also
> 
> Maybe KVM_GET/SET_VCPU_EVENTS? Looks like the doc mistakenly states it's
> a VM ioctl, but it's a VCPU ioctl.

Hmm, if I suppress my register-size pedantry we can put the lower 32 bits of
VSESR_EL2 in exception.error_code and use has_error_code to mark it valid.
'exception' in this struct ends up meaning SError on arm64.

(While VSESR_EL2 is 64bit[0], the value gets written into the ESR, which is
32bit, so I doubt the top 32bits can be used, currently they are all reserved.)

I'll go dig into how x86 uses this...


Thanks!

James


[0]
https://static.docs.arm.com/ddi0587/a/RAS%20Extension-release%20candidate_march_29.pdf
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v4 00/21] SError rework + RAS for firmware first support

2017-11-14 Thread James Morse
Hi Christoffer,

On 13/11/17 11:29, Christoffer Dall wrote:
> On Thu, Nov 09, 2017 at 06:14:56PM +, James Morse wrote:
>> On 19/10/17 15:57, James Morse wrote:
>>> Known issues:
>> [...]
>>>  * KVM-Migration: VDISR_EL2 is exposed to userspace as DISR_EL1, but how 
>>> should
>>>HCR_EL2.VSE or VSESR_EL2 be migrated when the guest has an SError 
>>> pending but
>>>hasn't taken it yet...?
>>
>> I've been trying to work out how this pending-SError-migration could work.
>>
>> If HCR_EL2.VSE is set then the guest will take a virtual SError when it next
>> unmasks SError. Today this doesn't get migrated, but only KVM sets this bit 
>> as
>> an attempt to kill the guest.
>>
>> This will be more of a problem with GengDongjiu's SError CAP for triggering
>> guest SError from user-space, which will also allow the VSESR_EL2 to be
>> specified. (this register becomes the guest ESR_EL1 when the virtual SError 
>> is
>> taken and is used to emulate firmware-first's NOTIFY_SEI and eventually
>> kernel-first RAS). These errors are likely to be handled by the guest.
>>
>>
>> We don't want to expose VSESR_EL2 to user-space, and for migration it isn't
>> enough as a value of '0' doesn't tell us if HCR_EL2.VSE is set.
>>
>> To get out of this corner: why not declare pending-SError-migration an 
>> invalid
>> thing to do?

> To answer that question we'd have to know if that is generally a valid
> thing to require.  How will higher level tools in the stack deal with
> this (e.g. libvirt, and OpenStack).  Is it really valid to tell them
> "nope, can't migrate right now".  I'm thinking if you have a failing
> host and want to signal some error to the guest, that's probably a
> really good time to migrate your mission-critical VM away to a different
> host, and being told, "sorry, cannot do this" would be painful.  I'm
> cc'ing Drew for his insight into libvirt and how this is done on x86,

Thanks,


> but I'm not really crazy about this idea.

Excellent, so at the other extreme we could have an API to query all of this
state, and another to set it. On systems without the RAS extensions this just
moves the HCR_EL2.VSE bit. On systems with the RAS extensions it moves VSESR_EL2
too.

I was hoping to avoid exposing different information. I need to look into how
that works. (and this is all while avoiding adding an EL2 register to
vcpu_sysreg [0])


>> We can give Qemu a way to query if a virtual SError is (still) pending. Qemu
>> would need to check this on each vcpu after migration, just before it throws 
>> the
>> switch and the guest runs on the new host. This way the VSESR_EL2 value 
>> doesn't
>> need migrating at all.
>>
>> In the ideal world, Qemu could re-inject the last SError it triggered if 
>> there
>> is still one pending when it migrates... but because KVM injects errors too, 
>> it
>> would need to block migration until this flag is cleared.

> I don't understand your conclusion here.

I was trying to reduce it to exposing just HCR_EL2.VSE as 'bool
serror_still_pending()', then let Qemu re-inject whatever SError it injected
last. This then behaves the same regardless of the RAS support.
But KVM's kvm_inject_vabt() breaks this, Qemu can't know whether this pending
SError was from Qemu, or from KVM.

... So we need VSESR_EL2 on systems which have that register ...

(or, get rid of kvm_inject_vabt(), but that would involve a new exit type, and
some trickery for existing user-space)

> If QEMU can query the virtual SError pending state, it can also inject
> that before running the VM after a restore, and we should have preserved
> the same state.

[..]

>> Can anyone suggest a better way?

> I'm thinking this is analogous to migrating a VM that uses an irqchip in
> userspace and has set the IRQ or FIQ lines using KVM_IRQ_LINE.  My
> feeling is that this is also not supported today.

Does KVM change/update these values behind Qemu's back? It's kvm_inject_vabt()
that is making this tricky. (or at least confusing me)


> My suggestion would be to add some set of VCPU exception state,
> potentially as flags, which can be migrated along with the VM, or at
> least used by userspace to query the state of the VM, if there exists a
> reliable mechanism to restore the state again without any side effects.
> 
> I think we have to comb through Documentation/virtual/kvm/api.txt to see
> if we can reuse anything, and if not, add something.  We could also
> consider adding something to Documentation/virtual/kvm/devices/vcpu.txt,
> where I think we have a large number space to use from.
> 
> Hope this helps?

Yes, I'll go looking for a way to expose VSESR_EL2 to user-space.


Thanks!

James


[0] https://patchwork.kernel.org/patch/9886019/
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v8 0/7] Support RAS virtualization in KVM

2017-11-14 Thread James Morse
Hi Dongjiu Geng,

On 10/11/17 19:54, Dongjiu Geng wrote:
> This series patches mainly do below things:
> 
> 1. Trap RAS ERR* registers Accesses to EL2 from Non-secure EL1,
>KVM will will do a minimum simulation, there registers are simulated
>to RAZ/WI in KVM.
> 2. Route synchronous External Abort exceptions from Non-secure EL0
>and EL1 to EL2. When exception EL3 routing is enabled by firmware,
>system will trap to EL3 firmware instead of EL2 KVM, then firmware
>judges whether El2 routing is enabled, if enabled, jump to EL2 KVM, 
>otherwise jump to EL1 host kernel.
> 3. Enable APEI ARv8 SEI notification to parse the CPER records for SError
>in the ACPI GHES driver, KVM will call handle_guest_sei() to let ACPI
>driver to parse the CPER record for SError which happened in the guest
> 4. Although we can use APEI driver to handle the guest SError, but not all
>system support SEI notification, such as kernel-first. So here KVM will
>also classify the Error through Exception Syndrome Register and do 
> different
>approaches according to Asynchronous Error Type

> 5. If the guest SError error is not propagated and not consumed, then KVM 
> return
>recoverable error status to user-space, user-space will specify the guest 
> ESR

I thought we'd gone over this. There should be no RAS errors/notifications in
user space. Only the symptoms should be sent, using the SIGBUS_MCEERR_A{O,R} if
the kernel has handled as much as it can. This hides the actual mechanisms the
kernel and firmware used.

User-space should not have to know how to handle RAS errors directly. This is a
service the operating system provides for it. This abstraction means the smae
user-space code is portable between x86, arm64, powerpc etc.

What if the firmware uses another notification method? User space should expect
the kernel to hide things like this from it.

If the kernel has no information to interpret a notification, how is user space
supposed to know?

I understand you are trying to work around your 'memory corruption at an unknown
address'[0] problem, but if the kernel can't know where this corrupt memory is
it should really reboot. What stops this corrupt data being swapped to disk?

Killing 'the thing' that was running at the time is not sufficient because we
don't know that this 'got' all the users of the corrupt memory. KSM can merge
pages between guests. This is the difference between the error persisting
forever killing off all the VMs one by one, and the corrupt page being silently
re-read from disk clearing the error.


>and inject a virtual SError. For other Asynchronous Error Type, KVM 
> directly
>injects virtual SError with IMPLEMENTATION DEFINED ESR or KVM is panic if 
> the
>error is fatal. In the RAS extension, guest virtual ESR must be set, 
> because
>all-zero  means 'RAS error: Uncategorized' instead of 'no valid ISS', so 
> set
>this ESR to IMPLEMENTATION DEFINED by default if user space does not 
> specify it.


Thanks,

James


[0] https://www.spinics.net/lists/arm-kernel/msg605345.html
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v8 7/7] arm64: kvm: handle SError Interrupt by categorization

2017-11-14 Thread James Morse
Hi Dongjiu Geng,

On 10/11/17 19:54, Dongjiu Geng wrote:
> If it is not RAS SError, directly inject virtual SError,
> which will keep the old way. If it is RAS SError, firstly
> let host ACPI module to handle it.

> For the ACPI handling,
> if the error address is invalid, APEI driver will not
> identify the address to hwpoison memory and can not notify
> guest to do the recovery.

The guest can't do any recover either. There is no recovery you can do without
some information about what the error is.

This is your memory corruption at an unknown address? We should reboot.

(I agree memory_failure.c's::me_kernel() is ignoring kernel errors, we should
try and fix this. It makes some sense for polled or irq notifications, but not
SEA/SEI).


> In order to safe, KVM continues
> categorizing errors and handle it separately.

> If the RAS error is not propagated, let host user space to
> handle it. 

No. Host user space should not know anything about the kernel or platform RAS
support. Doing so creates an ABI link between EL3 firmware and Qemu. This is
totally unmaintainable.

This thing needs to be portable. The kernel should handle the error, and report
any symptoms to user-space. e.g. 'this memory is gone'.

We shouldn't special case KVM.


> The reason is that sometimes we can only kill the
> guest effected application instead of panic whose guest OS.
> Host user space specifies a valid ESR and inject virtual
> SError, guest can just kill the current application if the
> non-consumed error coming from guest application.
> 
> Signed-off-by: Dongjiu Geng 
> Signed-off-by: Quanming Wu 

The last Signed-off-by should match the person posting the patch. It's a chain
of custody for GPL-signoff purposes, not a 'partially-written-by'. If you want
to credit Quanming Wu you can add CC and they can Ack/Review your patch.


> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 7debb74..1afdc87 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -178,6 +179,66 @@ static exit_handle_fn kvm_get_exit_handler(struct 
> kvm_vcpu *vcpu)
>   return arm_exit_handlers[hsr_ec];
>  }
>  
> +/**
> + * kvm_handle_guest_sei - handles SError interrupt or asynchronous aborts
> + * @vcpu:the VCPU pointer
> + *
> + * For RAS SError interrupt, firstly let host kernel handle it.
> + * If the AET is [ESR_ELx_AET_UER], then let user space handle it,
> + */
> +static int kvm_handle_guest_sei(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + unsigned int esr = kvm_vcpu_get_hsr(vcpu);
> + bool impdef_syndrome =  esr & ESR_ELx_ISV;  /* aka IDS */
> + unsigned int aet = esr & ESR_ELx_AET;
> +
> + /*
> +  * This is not RAS SError
> +  */
> + if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
> + kvm_inject_vabt(vcpu);
> + return 1;
> + }

> + /* The host kernel may handle this abort. */
> + handle_guest_sei();

This has to claim the SError as a notification. If APEI claims the error, KVM
doesn't need to do anything more. You ignore its return code.


> +
> + /*
> +  * In below two conditions, it will directly inject the
> +  * virtual SError:
> +  * 1. The Syndrome is IMPLEMENTATION DEFINED
> +  * 2. It is Uncategorized SEI
> +  */
> + if (impdef_syndrome ||
> + ((esr & ESR_ELx_FSC) != ESR_ELx_FSC_SERROR)) {
> + kvm_inject_vabt(vcpu);
> + return 1;
> + }
> +
> + switch (aet) {
> + case ESR_ELx_AET_CE:/* corrected error */
> + case ESR_ELx_AET_UEO:   /* restartable error, not yet consumed */
> + return 1;   /* continue processing the guest exit */

> + case ESR_ELx_AET_UER:   /* The error has not been propagated */
> + /*
> +  * Userspace only handle the guest SError Interrupt(SEI) if the
> +  * error has not been propagated
> +  */
> + run->exit_reason = KVM_EXIT_EXCEPTION;
> + run->ex.exception = ESR_ELx_EC_SERROR;
> + run->ex.error_code = KVM_SEI_SEV_RECOVERABLE;
> + return 0;

We should not pass RAS notifications to user space. The kernel either handles
them, or it panics(). User space shouldn't even know if the kernel supports RAS
until it gets an MCEERR signal.

You're making your firmware-first notification an EL3->EL0 signal, bypassing 
the OS.

If we get a RAS SError and there are no CPER records or values in the ERR nodes,
we should panic as it looks like the CPU/firmware is broken. (spurious RAS 
errors)


> + default:
> + /*
> +  * Until now, the CPU supports RAS and SEI is fatal, or host
> +  * does not support to handle the SError.
> +  */
> + panic("This Asynchronous SError interrupt is dangerous, panic");
> + }
> +
> + return 0;
> +}
> +
>  /*
>   * Return > 0 to return to guest, < 0 

Re: [PATCH 04/37] KVM: arm/arm64: Get rid of vcpu->arch.irq_lines

2017-11-14 Thread Julien Thierry

Hi Christoffer,

On 12/10/17 11:41, Christoffer Dall wrote:

We currently have a separate read-modify-write of the HCR_EL2 on entry
to the guest for the sole purpose of setting the VF and VI bits, if set.
Since this is most rarely the case (only when using userspace IRQ chip
and interrupts are in flight), let's get rid of this operation and
instead modify the bits in the vcpu->arch.hcr[_el2] directly when
needed.

Signed-off-by: Christoffer Dall 
---
  arch/arm/include/asm/kvm_emulate.h   |  9 ++---
  arch/arm/include/asm/kvm_host.h  |  3 ---
  arch/arm/kvm/emulate.c   |  2 +-
  arch/arm/kvm/hyp/switch.c|  2 +-
  arch/arm64/include/asm/kvm_emulate.h |  9 ++---
  arch/arm64/include/asm/kvm_host.h|  3 ---
  arch/arm64/kvm/hyp/switch.c  |  6 --
  arch/arm64/kvm/inject_fault.c|  2 +-
  virt/kvm/arm/arm.c   | 11 ++-
  virt/kvm/arm/mmu.c   |  6 +++---
  10 files changed, 16 insertions(+), 37 deletions(-)

diff --git a/arch/arm/include/asm/kvm_emulate.h 
b/arch/arm/include/asm/kvm_emulate.h
index 98089ff..34663a8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -62,14 +62,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
  }
  
-static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)

+static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
  {
-   return vcpu->arch.hcr;
-}
-
-static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
-{
-   vcpu->arch.hcr = hcr;
+   return (unsigned long *)>arch.hcr;
  }
  
  static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 4a879f6..1100170 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -153,9 +153,6 @@ struct kvm_vcpu_arch {
/* HYP trapping configuration */
u32 hcr;
  
-	/* Interrupt related fields */

-   u32 irq_lines;  /* IRQ and FIQ levels */
-
/* Exception Information */
struct kvm_vcpu_fault_info fault;
  
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c

index 0064b86..4286a89 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -313,5 +313,5 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long 
addr)
   */
  void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  {
-   vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
+   *vcpu_hcr(vcpu) |= HCR_VA;
  }
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 330c9ce..c3b9799 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -43,7 +43,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu 
*vcpu, u32 *fpexc_host)
isb();
}
  
-	write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);

+   write_sysreg(vcpu->arch.hcr, HCR);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index e5df3fc..1fbfe96 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -51,14 +51,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_RW;
  }
  
-static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)

+static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
  {
-   return vcpu->arch.hcr_el2;
-}
-
-static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
-{
-   vcpu->arch.hcr_el2 = hcr;
+   return (unsigned long *)>arch.hcr_el2;
  }
  
  static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 806ccef..27305e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -266,9 +266,6 @@ struct kvm_vcpu_arch {
/* IO related fields */
struct kvm_decode mmio_decode;
  
-	/* Interrupt related fields */

-   u64 irq_lines;  /* IRQ and FIQ levels */
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
  
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c

index bcf1a79..7703d63 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -168,12 +168,6 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu 
*vcpu)
  
  static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)

  {
-   u64 val;
-
-   val = read_sysreg(hcr_el2);
-   val |= vcpu->arch.irq_lines;
-   write_sysreg(val, hcr_el2);
-
if (static_branch_unlikely(_vgic_global_state.gicv3_cpuif))