We cannot access protected pages directly. Use ioremap() to
create a temporary mapping of the page. The mapping is destroyed
on __kvm_unmap_gfn().

The new interface gfn_to_pfn_memslot_protected() is used to detect if
the page is protected.

ioremap_cache_force() is a hack to bypass IORES_MAP_SYSTEM_RAM check in
the x86 ioremap code. We need a better solution.

Signed-off-by: Kirill A. Shutemov <[email protected]>
---
 arch/x86/include/asm/io.h            |  2 ++
 arch/x86/include/asm/pgtable_types.h |  1 +
 arch/x86/mm/ioremap.c                | 16 +++++++++++++---
 include/linux/kvm_host.h             |  1 +
 virt/kvm/kvm_main.c                  | 14 +++++++++++---
 5 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index c58d52fd7bf2..a3e1bfad1026 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -184,6 +184,8 @@ extern void __iomem *ioremap_uc(resource_size_t offset, 
unsigned long size);
 #define ioremap_uc ioremap_uc
 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 #define ioremap_cache ioremap_cache
+extern void __iomem *ioremap_cache_force(resource_size_t offset, unsigned long 
size);
+#define ioremap_cache_force ioremap_cache_force
 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 
unsigned long prot_val);
 #define ioremap_prot ioremap_prot
 extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned 
long size);
diff --git a/arch/x86/include/asm/pgtable_types.h 
b/arch/x86/include/asm/pgtable_types.h
index b6606fe6cfdf..66cc22abda7b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -147,6 +147,7 @@ enum page_cache_mode {
        _PAGE_CACHE_MODE_UC       = 3,
        _PAGE_CACHE_MODE_WT       = 4,
        _PAGE_CACHE_MODE_WP       = 5,
+       _PAGE_CACHE_MODE_WB_FORCE = 6,
 
        _PAGE_CACHE_MODE_NUM      = 8
 };
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 18c637c0dc6f..e48fc0e130b2 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -202,9 +202,12 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long 
size,
        __ioremap_check_mem(phys_addr, size, &io_desc);
 
        /*
-        * Don't allow anybody to remap normal RAM that we're using..
+        * Don't allow anybody to remap normal RAM that we're using, unless
+        * _PAGE_CACHE_MODE_WB_FORCE is used.
         */
-       if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
+       if (pcm == _PAGE_CACHE_MODE_WB_FORCE) {
+           pcm = _PAGE_CACHE_MODE_WB;
+       } else if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
                WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
                          &phys_addr, &last_addr);
                return NULL;
@@ -419,6 +422,13 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, 
unsigned long size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
+void __iomem *ioremap_cache_force(resource_size_t phys_addr, unsigned long 
size)
+{
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB_FORCE,
+                               __builtin_return_address(0), false);
+}
+EXPORT_SYMBOL(ioremap_cache_force);
+
 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
                                unsigned long prot_val)
 {
@@ -467,7 +477,7 @@ void iounmap(volatile void __iomem *addr)
        p = find_vm_area((void __force *)addr);
 
        if (!p) {
-               printk(KERN_ERR "iounmap: bad address %p\n", addr);
+               printk(KERN_ERR "iounmap: bad address %px\n", addr);
                dump_stack();
                return;
        }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eca18ef9b1f4..b6944f88033d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -237,6 +237,7 @@ struct kvm_host_map {
        void *hva;
        kvm_pfn_t pfn;
        kvm_pfn_t gfn;
+       bool protected;
 };
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8bcf3201304a..71aac117357f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2091,6 +2091,7 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, 
gfn_t gfn,
        void *hva = NULL;
        struct page *page = KVM_UNMAPPED_PAGE;
        struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+       bool protected = false;
        u64 gen = slots->generation;
 
        if (!map)
@@ -2107,12 +2108,16 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, 
gfn_t gfn,
        } else {
                if (atomic)
                        return -EAGAIN;
-               pfn = gfn_to_pfn_memslot(slot, gfn);
+               pfn = gfn_to_pfn_memslot_protected(slot, gfn, &protected);
        }
        if (is_error_noslot_pfn(pfn))
                return -EINVAL;
 
-       if (pfn_valid(pfn)) {
+       if (protected) {
+               if (atomic)
+                       return -EAGAIN;
+               hva = ioremap_cache_force(pfn_to_hpa(pfn), PAGE_SIZE);
+       } else if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
                if (atomic)
                        hva = kmap_atomic(page);
@@ -2133,6 +2138,7 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, 
gfn_t gfn,
        map->hva = hva;
        map->pfn = pfn;
        map->gfn = gfn;
+       map->protected = protected;
 
        return 0;
 }
@@ -2163,7 +2169,9 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot 
*memslot,
        if (!map->hva)
                return;
 
-       if (map->page != KVM_UNMAPPED_PAGE) {
+       if (map->protected) {
+               iounmap(map->hva);
+       } else if (map->page != KVM_UNMAPPED_PAGE) {
                if (atomic)
                        kunmap_atomic(map->hva);
                else
-- 
2.26.2

Reply via email to