The page table walker does not visit block PTEs in post-order. But there
are some cases where doing so would be beneficial, for example: breaking a
1G block PTE into a full tree in post-order avoids visiting the new tree.

Allow post order visits of block PTEs. This will be used in a subsequent
commit for eagerly breaking huge pages.

Signed-off-by: Ricardo Koller <ricar...@google.com>
---
 arch/arm64/include/asm/kvm_pgtable.h |  4 ++--
 arch/arm64/kvm/hyp/nvhe/setup.c      |  2 +-
 arch/arm64/kvm/hyp/pgtable.c         | 25 ++++++++++++-------------
 3 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h 
b/arch/arm64/include/asm/kvm_pgtable.h
index e2edeed462e8..d2e4a5032146 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -255,7 +255,7 @@ struct kvm_pgtable {
  *                                     entries.
  * @KVM_PGTABLE_WALK_TABLE_PRE:                Visit table entries before their
  *                                     children.
- * @KVM_PGTABLE_WALK_TABLE_POST:       Visit table entries after their
+ * @KVM_PGTABLE_WALK_POST:             Visit leaf or table entries after their
  *                                     children.
  * @KVM_PGTABLE_WALK_SHARED:           Indicates the page-tables may be shared
  *                                     with other software walkers.
@@ -263,7 +263,7 @@ struct kvm_pgtable {
 enum kvm_pgtable_walk_flags {
        KVM_PGTABLE_WALK_LEAF                   = BIT(0),
        KVM_PGTABLE_WALK_TABLE_PRE              = BIT(1),
-       KVM_PGTABLE_WALK_TABLE_POST             = BIT(2),
+       KVM_PGTABLE_WALK_POST                   = BIT(2),
        KVM_PGTABLE_WALK_SHARED                 = BIT(3),
 };
 
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index b47d969ae4d3..b0c1618d053b 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -265,7 +265,7 @@ static int fix_hyp_pgtable_refcnt(void)
 {
        struct kvm_pgtable_walker walker = {
                .cb     = fix_hyp_pgtable_refcnt_walker,
-               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_POST,
                .arg    = pkvm_pgtable.mm_ops,
        };
 
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index b16107bf917c..1b371f6dbac2 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -206,16 +206,15 @@ static inline int __kvm_pgtable_visit(struct 
kvm_pgtable_walk_data *data,
        if (!table) {
                data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
                data->addr += kvm_granule_size(level);
-               goto out;
+       } else {
+               childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
+               ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
+               if (ret)
+                       goto out;
        }
 
-       childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
-       ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
-       if (ret)
-               goto out;
-
-       if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
-               ret = kvm_pgtable_visitor_cb(data, &ctx, 
KVM_PGTABLE_WALK_TABLE_POST);
+       if (ctx.flags & KVM_PGTABLE_WALK_POST)
+               ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_POST);
 
 out:
        return ret;
@@ -494,7 +493,7 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 
addr, u64 size)
        struct kvm_pgtable_walker walker = {
                .cb     = hyp_unmap_walker,
                .arg    = &unmapped,
-               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_POST,
        };
 
        if (!pgt->mm_ops->page_count)
@@ -542,7 +541,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
 {
        struct kvm_pgtable_walker walker = {
                .cb     = hyp_free_walker,
-               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_POST,
        };
 
        WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
@@ -1003,7 +1002,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 
addr, u64 size)
        struct kvm_pgtable_walker walker = {
                .cb     = stage2_unmap_walker,
                .arg    = pgt,
-               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+               .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_POST,
        };
 
        return kvm_pgtable_walk(pgt, addr, size, &walker);
@@ -1234,7 +1233,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
        struct kvm_pgtable_walker walker = {
                .cb     = stage2_free_walker,
                .flags  = KVM_PGTABLE_WALK_LEAF |
-                         KVM_PGTABLE_WALK_TABLE_POST,
+                         KVM_PGTABLE_WALK_POST,
        };
 
        WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
@@ -1249,7 +1248,7 @@ void kvm_pgtable_stage2_free_removed(struct 
kvm_pgtable_mm_ops *mm_ops, void *pg
        struct kvm_pgtable_walker walker = {
                .cb     = stage2_free_walker,
                .flags  = KVM_PGTABLE_WALK_LEAF |
-                         KVM_PGTABLE_WALK_TABLE_POST,
+                         KVM_PGTABLE_WALK_POST,
        };
        struct kvm_pgtable_walk_data data = {
                .walker = &walker,
-- 
2.38.1.431.g37b22c650d-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to