On Tue, Oct 19, 2010 at 01:05:22PM +0200, Gleb Natapov wrote:
> On Mon, Oct 18, 2010 at 08:24:03PM -0200, Marcelo Tosatti wrote:
> >
> > Only attempt async pagefault if address is contained within vma.
> >
> > Signed-off-by: Marcelo Tosatti <[email protected]>
> >
> > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > index 5d57ec9..a9cfbd2 100644
> > --- a/virt/kvm/kvm_main.c
> > +++ b/virt/kvm/kvm_main.c
> > @@ -996,8 +996,9 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long
> > addr, bool atomic,
> >
> > if (vma == NULL || addr < vma->vm_start ||
> > !(vma->vm_flags & VM_PFNMAP)) {
> > - if (async && !(vma->vm_flags & VM_PFNMAP) &&
> > - (vma->vm_flags & VM_WRITE))
> > + if (async && vma && !(vma->vm_flags & VM_PFNMAP) &&
> actually we should check for addr < vma->vm_start here too. find_vma()
> is very strange function.
>
> What about patch below (only compile tested for now)?
>
Works for me.
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 796b06e..29e40e5 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -954,6 +954,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
> }
> EXPORT_SYMBOL_GPL(gfn_to_hva);
>
> +static pfn_t get_fault_pfn(void)
> +{
> + get_page(fault_page);
> + return fault_pfn;
> +}
> +
> static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
> bool *async)
> {
> @@ -983,7 +989,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long
> addr, bool atomic,
> struct vm_area_struct *vma;
>
> if (atomic)
> - goto return_fault_page;
> + return get_fault_pfn();
>
> down_read(¤t->mm->mmap_sem);
> if (is_hwpoison_address(addr)) {
> @@ -992,22 +998,20 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long
> addr, bool atomic,
> return page_to_pfn(hwpoison_page);
> }
>
> - vma = find_vma(current->mm, addr);
> + vma = find_vma_intersection(current->mm, addr, addr+1);
>
> - if (vma == NULL || addr < vma->vm_start ||
> - !(vma->vm_flags & VM_PFNMAP)) {
> - if (async && !(vma->vm_flags & VM_PFNMAP) &&
> - (vma->vm_flags & VM_WRITE))
> + if (vma == NULL)
> + return get_fault_pfn();
> + if ((vma->vm_flags & VM_PFNMAP)) {
> + pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> + vma->vm_pgoff;
> + BUG_ON(!kvm_is_mmio_pfn(pfn));
> + } else {
> + if (async && (vma->vm_flags & VM_WRITE))
> *async = true;
> - up_read(¤t->mm->mmap_sem);
> -return_fault_page:
> - get_page(fault_page);
> - return page_to_pfn(fault_page);
> + pfn = get_fault_pfn();
> }
> -
> - pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
> up_read(¤t->mm->mmap_sem);
> - BUG_ON(!kvm_is_mmio_pfn(pfn));
> } else
> pfn = page_to_pfn(page[0]);
>
> --
> Gleb.
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html