On Monday 26 Jul 2021 at 11:35:10 (+0100), Marc Zyngier wrote:
> On Mon, 26 Jul 2021 10:28:53 +0100,
> Quentin Perret <[email protected]> wrote:
> > +static int host_stage2_find_range(u64 addr, struct kvm_mem_range *range)
> 
> nit: I find 'find_range' a bit odd. We already have found a
> range. We're just trying to narrow it down to something that fits in a
> single block mapping. How about 'host_stage2_adjust_range'?

Ack.

> > +{
> > +   u64 granule, start, end;
> > +   kvm_pte_t pte;
> > +   u32 level;
> > +   int ret;
> > +
> > +   ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
> > +   if (ret)
> > +           return ret;
> > +
> > +   if (kvm_pte_valid(pte))
> > +           return -EAGAIN;
> > +
> > +   if (pte)
> > +           return -EPERM;
> > +
> > +   do {
> > +           granule = kvm_granule_size(level);
> > +           start = ALIGN_DOWN(addr, granule);
> > +           end = start + granule;
> > +           level++;
> > +   } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
> > +                   (!kvm_level_supports_block_mapping(level) ||
> > +                    start < range->start || range->end < end));
> > +
> 
> This expression does my head in. You are trying to find the largest
> block mapping that entirely fits in range, right? Can we just express
> that directly (with a global negation for the purpose of the loop)?
> 
>       do {
>               [...]
>       } while (level < KVM_PGTABLE_MAX_LEVELS &&
>                !(kvm_level_supports_block_mapping(level) &&
>                  start >= range->start &&
>                  end <= range->end));
> 
> I personally find this much more readable, because it expresses the
> condition we are looking for rather than a lot of conditions forcing
> us to continue.
> 
> You could also use a kvm_mem_range for the iteration, and add a helper
> that checks for the inclusion.

Something like this (untested)?

diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c 
b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 75273166d2c5..07d228163090 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -234,9 +234,15 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
                __ret;                                                  \
         })

+static inline bool range_included(struct kvm_mem_range *child,
+                                 struct kvm_mem_range *parent)
+{
+       return parent->start <= child->start && child->end <= parent->end;
+}
+
 static int host_stage2_find_range(u64 addr, struct kvm_mem_range *range)
 {
-       u64 granule, start, end;
+       struct kvm_mem_range cur;
        kvm_pte_t pte;
        u32 level;
        int ret;
@@ -252,16 +258,15 @@ static int host_stage2_find_range(u64 addr, struct 
kvm_mem_range *range)
                return -EPERM;

        do {
-               granule = kvm_granule_size(level);
-               start = ALIGN_DOWN(addr, granule);
-               end = start + granule;
+               u64 granule = kvm_granule_size(level);
+               cur.start = ALIGN_DOWN(addr, granule);
+               cur.end = cur.start + granule;
                level++;
        } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
-                       (!kvm_level_supports_block_mapping(level) ||
-                        start < range->start || range->end < end));
+                       !(kvm_level_supports_block_mapping(level) &&
+                         range_included(&cur, parent)));

-       range->start = start;
-       range->end = end;
+       *range = cur;

        return 0;
 }
_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to