Re: [Xen-devel] [PATCH v4] xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE

2018-04-11 Thread Paul Durrant
> -Original Message-
> From: Julien Grall [mailto:julien.gr...@arm.com]
> Sent: 11 April 2018 10:46
> To: Paul Durrant ; xen-devel@lists.xenproject.org;
> linux-arm-ker...@lists.infradead.org; linux-ker...@vger.kernel.org
> Cc: Juergen Gross ; Thomas Gleixner
> ; Stefano Stabellini ; Ingo
> Molnar 
> Subject: Re: [PATCH v4] xen/privcmd: add
> IOCTL_PRIVCMD_MMAP_RESOURCE
> 
> Hi,
> 
> On 10/04/18 08:58, Paul Durrant wrote:
> > +static long privcmd_ioctl_mmap_resource(struct file *file, void __user
> *udata)
> > +{
> > +   struct privcmd_data *data = file->private_data;
> > +   struct mm_struct *mm = current->mm;
> > +   struct vm_area_struct *vma;
> > +   struct privcmd_mmap_resource kdata;
> > +   xen_pfn_t *pfns = NULL;
> > +   struct xen_mem_acquire_resource xdata;
> > +   int rc;
> > +
> > +   if (copy_from_user(&kdata, udata, sizeof(kdata)))
> > +   return -EFAULT;
> > +
> > +   /* If restriction is in place, check the domid matches */
> > +   if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
> > +   return -EPERM;
> > +
> > +   down_write(&mm->mmap_sem);
> > +
> > +   vma = find_vma(mm, kdata.addr);
> > +   if (!vma || vma->vm_ops != &privcmd_vm_ops) {
> > +   rc = -EINVAL;
> > +   goto out;
> > +   }
> > +
> > +   pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
> > +   if (!pfns) {
> > +   rc = -ENOMEM;
> > +   goto out;
> > +   }
> > +
> > +   if (xen_feature(XENFEAT_auto_translated_physmap)) {
> > +   struct page **pages;
> > +   unsigned int i;
> > +
> > +   rc = alloc_empty_pages(vma, kdata.num);
> > +   if (rc < 0)
> > +   goto out;
> > +
> > +   pages = vma->vm_private_data;
> > +   for (i = 0; i < kdata.num; i++)
> > +   pfns[i] = page_to_pfn(pages[i]);
> 
> I don't think this is going to work well if the hypervisor is using a
> different granularity for the page.
> 
> Imagine Xen is using 4K but the kernel 64K. You would end up to have the
> resource not mapped contiguously in the memory.

Good point. I do need to take account of the kernel page size in this case.

  Paul

> 
> Cheers,
> 
> --
> Julien Grall
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v4] xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE

2018-04-11 Thread Julien Grall

Hi,

On 10/04/18 08:58, Paul Durrant wrote:

+static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+{
+   struct privcmd_data *data = file->private_data;
+   struct mm_struct *mm = current->mm;
+   struct vm_area_struct *vma;
+   struct privcmd_mmap_resource kdata;
+   xen_pfn_t *pfns = NULL;
+   struct xen_mem_acquire_resource xdata;
+   int rc;
+
+   if (copy_from_user(&kdata, udata, sizeof(kdata)))
+   return -EFAULT;
+
+   /* If restriction is in place, check the domid matches */
+   if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
+   return -EPERM;
+
+   down_write(&mm->mmap_sem);
+
+   vma = find_vma(mm, kdata.addr);
+   if (!vma || vma->vm_ops != &privcmd_vm_ops) {
+   rc = -EINVAL;
+   goto out;
+   }
+
+   pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
+   if (!pfns) {
+   rc = -ENOMEM;
+   goto out;
+   }
+
+   if (xen_feature(XENFEAT_auto_translated_physmap)) {
+   struct page **pages;
+   unsigned int i;
+
+   rc = alloc_empty_pages(vma, kdata.num);
+   if (rc < 0)
+   goto out;
+
+   pages = vma->vm_private_data;
+   for (i = 0; i < kdata.num; i++)
+   pfns[i] = page_to_pfn(pages[i]);


I don't think this is going to work well if the hypervisor is using a 
different granularity for the page.


Imagine Xen is using 4K but the kernel 64K. You would end up to have the 
resource not mapped contiguously in the memory.


Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v4] xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE

2018-04-10 Thread Paul Durrant
My recent Xen patch series introduces a new HYPERVISOR_memory_op to
support direct priv-mapping of certain guest resources (such as ioreq
pages, used by emulators) by a tools domain, rather than having to access
such resources via the guest P2M.

This patch adds the necessary infrastructure to the privcmd driver and
Xen MMU code to support direct resource mapping.

NOTE: The adjustment in the MMU code is partially cosmetic. Xen will now
  allow a PV tools domain to map guest pages either by GFN or MFN, thus
  the term 'mfn' has been swapped for 'pfn' in the lower layers of the
  remap code.

Signed-off-by: Paul Durrant 
Reviewed-by: Boris Ostrovsky 
---
Cc: Juergen Gross 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: Stefano Stabellini 

v4:
 - Remove stray line of debug that causes a build warning on ARM 32-bit

v3:
 - Addres comments from Boris
 - Fix ARM build

v2:
 - Fix bug when mapping multiple pages of a resource
---
 arch/arm/xen/enlighten.c   |  11 
 arch/x86/xen/mmu.c |  60 +--
 drivers/xen/privcmd.c  | 128 +
 include/uapi/xen/privcmd.h |  11 
 include/xen/interface/memory.h |  66 +
 include/xen/interface/xen.h|   7 ++-
 include/xen/xen-ops.h  |  24 +++-
 7 files changed, 286 insertions(+), 21 deletions(-)

diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index ba7f4c8f5c3e..8073625371f5 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -89,6 +89,17 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
 
+/* Not used by XENFEAT_auto_translated guests. */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+  unsigned long addr,
+  xen_pfn_t *mfn, int nr,
+  int *err_ptr, pgprot_t prot,
+  unsigned int domid, struct page **pages)
+{
+   return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
 static void xen_read_wallclock(struct timespec64 *ts)
 {
u32 version;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d33e7dbe3129..af2960cb7a3e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -65,37 +65,44 @@ static void xen_flush_tlb_all(void)
 #define REMAP_BATCH_SIZE 16
 
 struct remap_data {
-   xen_pfn_t *mfn;
+   xen_pfn_t *pfn;
bool contiguous;
+   bool no_translate;
pgprot_t prot;
struct mmu_update *mmu_update;
 };
 
-static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
 unsigned long addr, void *data)
 {
struct remap_data *rmd = data;
-   pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+   pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
 
-   /* If we have a contiguous range, just update the mfn itself,
-  else update pointer to be "next mfn". */
+   /*
+* If we have a contiguous range, just update the pfn itself,
+* else update pointer to be "next pfn".
+*/
if (rmd->contiguous)
-   (*rmd->mfn)++;
+   (*rmd->pfn)++;
else
-   rmd->mfn++;
+   rmd->pfn++;
 
-   rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | 
MMU_NORMAL_PT_UPDATE;
+   rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+   rmd->mmu_update->ptr |= rmd->no_translate ?
+   MMU_PT_UPDATE_NO_TRANSLATE :
+   MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
 
return 0;
 }
 
-static int do_remap_gfn(struct vm_area_struct *vma,
+static int do_remap_pfn(struct vm_area_struct *vma,
unsigned long addr,
-   xen_pfn_t *gfn, int nr,
+   xen_pfn_t *pfn, int nr,
int *err_ptr, pgprot_t prot,
-   unsigned domid,
+   unsigned int domid,
+   bool no_translate,
struct page **pages)
 {
int err = 0;
@@ -106,11 +113,14 @@ static int do_remap_gfn(struct vm_area_struct *vma,
 
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
-   rmd.mfn = gfn;
+   rmd.pfn = pfn;
rmd.prot = prot;
-   /* We use the err_ptr to indicate if there we are doing a contiguous
-* mapping or a discontigious mapping. */
+   /*
+* We use the err_ptr to indicate if there we are doing a contiguous
+* mapping or a discontigious mapping.
+*/
rmd.contiguous = !err_ptr;
+   rmd.no_translate = no_translate;
 
while (nr) {
int index = 0;
@@ -121,7 +131,7 @@ static int do_remap_gfn(struct vm_area_struct *vma,
 
rmd.mmu_