Re: [PATCH v7 2/3] arm64: kvm: Introduce MTE VCPU feature

2021-02-04 Thread Steven Price

On 02/02/2021 17:12, Marc Zyngier wrote:

On 2021-01-15 15:28, Steven Price wrote:

Add a new VM feature 'KVM_ARM_CAP_MTE' which enables memory tagging
for a VM. This exposes the feature to the guest and automatically tags
memory pages touched by the VM as PG_mte_tagged (and clears the tags
storage) to ensure that the guest cannot see stale tags, and so that the
tags are correctly saved/restored across swap.

Signed-off-by: Steven Price 
---
 arch/arm64/include/asm/kvm_emulate.h |  3 +++
 arch/arm64/include/asm/kvm_host.h    |  3 +++
 arch/arm64/include/asm/pgtable.h |  2 +-
 arch/arm64/kernel/mte.c  | 36 +---
 arch/arm64/kvm/arm.c |  9 +++
 arch/arm64/kvm/hyp/exception.c   |  3 ++-
 arch/arm64/kvm/mmu.c | 16 +
 arch/arm64/kvm/sys_regs.c    |  6 -
 include/uapi/linux/kvm.h |  1 +
 9 files changed, 62 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h
b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..6bf776c2399c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu 
*vcpu)

 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
 vcpu_el1_is_32bit(vcpu))
 vcpu->arch.hcr_el2 |= HCR_TID2;
+
+    if (kvm_has_mte(vcpu->kvm))
+    vcpu->arch.hcr_el2 |= HCR_ATA;
 }

 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h
b/arch/arm64/include/asm/kvm_host.h
index 51590a397e4b..1ca5785fb0e9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -132,6 +132,8 @@ struct kvm_arch {

 u8 pfr0_csv2;
 u8 pfr0_csv3;
+    /* Memory Tagging Extension enabled for the guest */
+    bool mte_enabled;
 };

 struct kvm_vcpu_fault_info {
@@ -749,6 +751,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu 
*vcpu);

 #define kvm_arm_vcpu_sve_finalized(vcpu) \
 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)

+#define kvm_has_mte(kvm) (system_supports_mte() && 
(kvm)->arch.mte_enabled)

 #define kvm_vcpu_has_pmu(vcpu)    \
 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))

diff --git a/arch/arm64/include/asm/pgtable.h 
b/arch/arm64/include/asm/pgtable.h

index 501562793ce2..27416d52f6a9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -312,7 +312,7 @@ static inline void set_pte_at(struct mm_struct
*mm, unsigned long addr,
 __sync_icache_dcache(pte);

 if (system_supports_mte() &&
-    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
+    pte_present(pte) && pte_valid_user(pte) && !pte_special(pte))
 mte_sync_tags(ptep, pte);


Care to elaborate on this change?


Sorry I should have called this out in the commit message. The change 
here is instead of only calling mte_sync_tags() on pages which are 
already tagged in the PTE, it is called for all (normal) user pages 
instead. See below for why.




 __check_racy_pte_update(mm, ptep, pte);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dc9ada64feed..f9e089be1603 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -25,27 +25,33 @@

 u64 gcr_kernel_excl __ro_after_init;

-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool 
check_swap)
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool 
check_swap,

+   bool pte_is_tagged)
 {
 pte_t old_pte = READ_ONCE(*ptep);

 if (check_swap && is_swap_pte(old_pte)) {
 swp_entry_t entry = pte_to_swp_entry(old_pte);

-    if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+    if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
+    set_bit(PG_mte_tagged, >flags);
 return;
+    }
 }

-    page_kasan_tag_reset(page);
-    /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
-    smp_wmb();
-    mte_clear_page_tags(page_address(page));
+    if (pte_is_tagged) {
+    set_bit(PG_mte_tagged, >flags);
+    page_kasan_tag_reset(page);
+    /*
+ * We need smp_wmb() in between setting the flags and 
clearing the

+ * tags because if another thread reads page->flags and builds a
+ * tagged address out of it, there is an actual dependency to 
the
+ * memory access, but on the current thread we do not 
guarantee that

+ * the new page->flags are visible before the tags were updated.
+ */
+    smp_wmb();
+    

Re: [PATCH v7 2/3] arm64: kvm: Introduce MTE VCPU feature

2021-02-02 Thread Marc Zyngier

On 2021-01-15 15:28, Steven Price wrote:

Add a new VM feature 'KVM_ARM_CAP_MTE' which enables memory tagging
for a VM. This exposes the feature to the guest and automatically tags
memory pages touched by the VM as PG_mte_tagged (and clears the tags
storage) to ensure that the guest cannot see stale tags, and so that 
the

tags are correctly saved/restored across swap.

Signed-off-by: Steven Price 
---
 arch/arm64/include/asm/kvm_emulate.h |  3 +++
 arch/arm64/include/asm/kvm_host.h|  3 +++
 arch/arm64/include/asm/pgtable.h |  2 +-
 arch/arm64/kernel/mte.c  | 36 +---
 arch/arm64/kvm/arm.c |  9 +++
 arch/arm64/kvm/hyp/exception.c   |  3 ++-
 arch/arm64/kvm/mmu.c | 16 +
 arch/arm64/kvm/sys_regs.c|  6 -
 include/uapi/linux/kvm.h |  1 +
 9 files changed, 62 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h
b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..6bf776c2399c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu 
*vcpu)

if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
+
+   if (kvm_has_mte(vcpu->kvm))
+   vcpu->arch.hcr_el2 |= HCR_ATA;
 }

 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h
b/arch/arm64/include/asm/kvm_host.h
index 51590a397e4b..1ca5785fb0e9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -132,6 +132,8 @@ struct kvm_arch {

u8 pfr0_csv2;
u8 pfr0_csv3;
+   /* Memory Tagging Extension enabled for the guest */
+   bool mte_enabled;
 };

 struct kvm_vcpu_fault_info {
@@ -749,6 +751,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu 
*vcpu);

 #define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)

+#define kvm_has_mte(kvm) (system_supports_mte() && 
(kvm)->arch.mte_enabled)

 #define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))

diff --git a/arch/arm64/include/asm/pgtable.h 
b/arch/arm64/include/asm/pgtable.h

index 501562793ce2..27416d52f6a9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -312,7 +312,7 @@ static inline void set_pte_at(struct mm_struct
*mm, unsigned long addr,
__sync_icache_dcache(pte);

if (system_supports_mte() &&
-   pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
+   pte_present(pte) && pte_valid_user(pte) && !pte_special(pte))
mte_sync_tags(ptep, pte);


Care to elaborate on this change?



__check_racy_pte_update(mm, ptep, pte);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dc9ada64feed..f9e089be1603 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -25,27 +25,33 @@

 u64 gcr_kernel_excl __ro_after_init;

-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool 
check_swap)
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool 
check_swap,

+  bool pte_is_tagged)
 {
pte_t old_pte = READ_ONCE(*ptep);

if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);

-   if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+   if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
+   set_bit(PG_mte_tagged, >flags);
return;
+   }
}

-   page_kasan_tag_reset(page);
-   /*
-* We need smp_wmb() in between setting the flags and clearing the
-* tags because if another thread reads page->flags and builds a
-* tagged address out of it, there is an actual dependency to the
-* memory access, but on the current thread we do not guarantee that
-* the new page->flags are visible before the tags were updated.
-*/
-   smp_wmb();
-   mte_clear_page_tags(page_address(page));
+   if (pte_is_tagged) {
+   set_bit(PG_mte_tagged, >flags);
+   page_kasan_tag_reset(page);
+   /*
+* We need smp_wmb() in between setting the flags and clearing 
the
+* tags because if another thread reads page->flags and builds a
+* tagged address out of it, there is an actual dependency to 
the
+* memory access, but on the current thread we do not guarantee 
that
+* the new page->flags are visible before the tags were updated.
+*/
+   smp_wmb();
+   mte_clear_page_tags(page_address(page));
+   }

[PATCH v7 2/3] arm64: kvm: Introduce MTE VCPU feature

2021-01-15 Thread Steven Price
Add a new VM feature 'KVM_ARM_CAP_MTE' which enables memory tagging
for a VM. This exposes the feature to the guest and automatically tags
memory pages touched by the VM as PG_mte_tagged (and clears the tags
storage) to ensure that the guest cannot see stale tags, and so that the
tags are correctly saved/restored across swap.

Signed-off-by: Steven Price 
---
 arch/arm64/include/asm/kvm_emulate.h |  3 +++
 arch/arm64/include/asm/kvm_host.h|  3 +++
 arch/arm64/include/asm/pgtable.h |  2 +-
 arch/arm64/kernel/mte.c  | 36 +---
 arch/arm64/kvm/arm.c |  9 +++
 arch/arm64/kvm/hyp/exception.c   |  3 ++-
 arch/arm64/kvm/mmu.c | 16 +
 arch/arm64/kvm/sys_regs.c|  6 -
 include/uapi/linux/kvm.h |  1 +
 9 files changed, 62 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..6bf776c2399c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
+
+   if (kvm_has_mte(vcpu->kvm))
+   vcpu->arch.hcr_el2 |= HCR_ATA;
 }
 
 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 51590a397e4b..1ca5785fb0e9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -132,6 +132,8 @@ struct kvm_arch {
 
u8 pfr0_csv2;
u8 pfr0_csv3;
+   /* Memory Tagging Extension enabled for the guest */
+   bool mte_enabled;
 };
 
 struct kvm_vcpu_fault_info {
@@ -749,6 +751,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 #define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
 
+#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
 #define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
 
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 501562793ce2..27416d52f6a9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -312,7 +312,7 @@ static inline void set_pte_at(struct mm_struct *mm, 
unsigned long addr,
__sync_icache_dcache(pte);
 
if (system_supports_mte() &&
-   pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
+   pte_present(pte) && pte_valid_user(pte) && !pte_special(pte))
mte_sync_tags(ptep, pte);
 
__check_racy_pte_update(mm, ptep, pte);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dc9ada64feed..f9e089be1603 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -25,27 +25,33 @@
 
 u64 gcr_kernel_excl __ro_after_init;
 
-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap,
+  bool pte_is_tagged)
 {
pte_t old_pte = READ_ONCE(*ptep);
 
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
 
-   if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+   if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
+   set_bit(PG_mte_tagged, >flags);
return;
+   }
}
 
-   page_kasan_tag_reset(page);
-   /*
-* We need smp_wmb() in between setting the flags and clearing the
-* tags because if another thread reads page->flags and builds a
-* tagged address out of it, there is an actual dependency to the
-* memory access, but on the current thread we do not guarantee that
-* the new page->flags are visible before the tags were updated.
-*/
-   smp_wmb();
-   mte_clear_page_tags(page_address(page));
+   if (pte_is_tagged) {
+   set_bit(PG_mte_tagged, >flags);
+   page_kasan_tag_reset(page);
+   /*
+* We need smp_wmb() in between setting the flags and clearing 
the
+* tags because if another thread reads page->flags and builds a
+* tagged address out of it, there is an actual dependency to 
the
+* memory access, but on the current thread we do not guarantee 
that
+* the new page->flags are visible before the tags were updated.
+*/
+   smp_wmb();
+   mte_clear_page_tags(page_address(page));
+   }
 }
 
 void mte_sync_tags(pte_t *ptep, pte_t pte)
@@ -53,11 +59,13 @@ void