On Mon, Jun 20, 2011 at 11:29:47PM +0900, Takuya Yoshikawa wrote:
> From: Takuya Yoshikawa <[email protected]>
> 
> Avoid two step jump to the error handling part.  This eliminates the use
> of the variables present and rsvd_fault.
> 
> We also use the const type qualifier to show that write/user/fetch_fault
> do not change in the function.
> 
> Both of these were suggested by Ingo Molnar.
> 
> Cc: Ingo Molnar <[email protected]>
> Signed-off-by: Takuya Yoshikawa <[email protected]>
> ---
>  v2-v3: only changelog update
> 
>  arch/x86/kvm/paging_tmpl.h |   64 +++++++++++++++++++------------------------
>  1 files changed, 28 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 1caeb4d..137aa45 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker 
> *walker,
>       gfn_t table_gfn;
>       unsigned index, pt_access, uninitialized_var(pte_access);
>       gpa_t pte_gpa;
> -     bool eperm, present, rsvd_fault;
> -     int offset, write_fault, user_fault, fetch_fault;
> -
> -     write_fault = access & PFERR_WRITE_MASK;
> -     user_fault = access & PFERR_USER_MASK;
> -     fetch_fault = access & PFERR_FETCH_MASK;
> +     bool eperm;
> +     int offset;
> +     const int write_fault = access & PFERR_WRITE_MASK;
> +     const int user_fault  = access & PFERR_USER_MASK;
> +     const int fetch_fault = access & PFERR_FETCH_MASK;
> +     u16 errcode = 0;
>  
>       trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
>                                    fetch_fault);
>  walk:
> -     present = true;
> -     eperm = rsvd_fault = false;
> +     eperm = false;
>       walker->level = mmu->root_level;
>       pte           = mmu->get_cr3(vcpu);
>  
> @@ -145,7 +144,7 @@ walk:
>               pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
>               trace_kvm_mmu_paging_element(pte, walker->level);
>               if (!is_present_gpte(pte)) {
> -                     present = false;
> +                     errcode |= PFERR_PRESENT_MASK;
>                       goto error;
>               }
>               --walker->level;
> @@ -171,34 +170,34 @@ walk:
>               real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
>                                             PFERR_USER_MASK|PFERR_WRITE_MASK);
>               if (unlikely(real_gfn == UNMAPPED_GVA)) {
> -                     present = false;
> -                     break;
> +                     errcode |= PFERR_PRESENT_MASK;
> +                     goto error;
>               }
>               real_gfn = gpa_to_gfn(real_gfn);
>  
>               host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
>               if (unlikely(kvm_is_error_hva(host_addr))) {
> -                     present = false;
> -                     break;
> +                     errcode |= PFERR_PRESENT_MASK;
> +                     goto error;
>               }
>  
>               ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
>               if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
> -                     present = false;
> -                     break;
> +                     errcode |= PFERR_PRESENT_MASK;
> +                     goto error;
>               }
>  
>               trace_kvm_mmu_paging_element(pte, walker->level);
>  
>               if (unlikely(!is_present_gpte(pte))) {
> -                     present = false;
> -                     break;
> +                     errcode |= PFERR_PRESENT_MASK;
> +                     goto error;
>               }

Assignment of PFERR_PRESENT_MASK is inverted.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to