On 08/11/2012 01:51 AM, Marcelo Tosatti wrote:
> On Tue, Aug 07, 2012 at 05:51:05PM +0800, Xiao Guangrong wrote:
>> We do too many things in hva_to_pfn, this patch reorganize the code,
>> let it be better readable
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
>> ---
>>  virt/kvm/kvm_main.c |  159 
>> +++++++++++++++++++++++++++++++--------------------
>>  1 files changed, 97 insertions(+), 62 deletions(-)
>>
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 26ffc87..dd01bcb 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -1043,83 +1043,118 @@ static inline int check_user_page_hwpoison(unsigned 
>> long addr)
>>      return rc == -EHWPOISON;
>>  }
>>
>> -static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
>> -                    bool write_fault, bool *writable)
>> +/*
>> + * The atomic path to get the writable pfn which will be stored in @pfn,
>> + * true indicates success, otherwise false is returned.
>> + */
>> +static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
>> +                        bool write_fault, bool *writable, pfn_t *pfn)
>>  {
>>      struct page *page[1];
>> -    int npages = 0;
>> -    pfn_t pfn;
>> +    int npages;
>>
>> -    /* we can do it either atomically or asynchronously, not both */
>> -    BUG_ON(atomic && async);
>> +    if (!(async || atomic))
>> +            return false;
>>
>> -    BUG_ON(!write_fault && !writable);
>> +    npages = __get_user_pages_fast(addr, 1, 1, page);
>> +    if (npages == 1) {
>> +            *pfn = page_to_pfn(page[0]);
>>
>> -    if (writable)
>> -            *writable = true;
>> +            if (writable)
>> +                    *writable = true;
>> +            return true;
>> +    }
>> +
>> +    return false;
>> +}
>>
>> -    if (atomic || async)
>> -            npages = __get_user_pages_fast(addr, 1, 1, page);
>> +/*
>> + * The slow path to get the pfn of the specified host virtual address,
>> + * 1 indicates success, -errno is returned if error is detected.
>> + */
>> +static int hva_to_pfn_slow(unsigned long addr, bool *async, bool 
>> write_fault,
>> +                       bool *writable, pfn_t *pfn)
>> +{
>> +    struct page *page[1];
>> +    int npages = 0;
>>
>> -    if (unlikely(npages != 1) && !atomic) {
>> -            might_sleep();
>> +    might_sleep();
>>
>> -            if (writable)
>> -                    *writable = write_fault;
>> -
>> -            if (async) {
>> -                    down_read(&current->mm->mmap_sem);
>> -                    npages = get_user_page_nowait(current, current->mm,
>> -                                                 addr, write_fault, page);
>> -                    up_read(&current->mm->mmap_sem);
>> -            } else
>> -                    npages = get_user_pages_fast(addr, 1, write_fault,
>> -                                                 page);
>> -
>> -            /* map read fault as writable if possible */
>> -            if (unlikely(!write_fault) && npages == 1) {
>> -                    struct page *wpage[1];
>> -
>> -                    npages = __get_user_pages_fast(addr, 1, 1, wpage);
>> -                    if (npages == 1) {
>> -                            *writable = true;
>> -                            put_page(page[0]);
>> -                            page[0] = wpage[0];
>> -                    }
>> -                    npages = 1;
>> +    if (writable)
>> +            *writable = write_fault;
>> +
>> +    if (async) {
>> +            down_read(&current->mm->mmap_sem);
>> +            npages = get_user_page_nowait(current, current->mm,
>> +                                          addr, write_fault, page);
>> +            up_read(&current->mm->mmap_sem);
>> +    } else
>> +            npages = get_user_pages_fast(addr, 1, write_fault,
>> +                                         page);
>> +    if (npages != 1)
>> +            return npages;
> 
>  * Returns number of pages pinned. This may be fewer than the number
>  * requested. If nr_pages is 0 or negative, returns 0. If no pages
>  * were pinned, returns -errno.
>  */
> int get_user_pages_fast(unsigned long start, int nr_pages, int write,
>                         struct page **pages)
> 
> 
> Current behaviour is
> 
>         if (atomic || async)
>                 npages = __get_user_pages_fast(addr, 1, 1, page);
> 
>       if (npages != 1) 
>               slow path retry;
> 
> The changes above change this, don't they?

Marcelo,

Sorry, I do not know why you thought the logic was changed, in this patch,
the logic is:

        /* return true if it is successful. */
        if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
                return pfn;

        /* atomic can not go to slow path. */
        if (atomic)
                return KVM_PFN_ERR_FAULT;

        /* get pfn by the slow path */
        npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
        if (npages == 1)
                return pfn;

        /* the error-handle path. */
        ......


Did i miss something?


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to