From: Juerg Haefliger <juerg.haefli...@hpe.com>

Signed-off-by: Juerg Haefliger <juerg.haefli...@canonical.com>
Tested-by: Tycho Andersen <ty...@docker.com>
---
 include/linux/xpfo.h | 4 ++++
 lib/swiotlb.c        | 3 ++-
 mm/xpfo.c            | 9 +++++++++
 3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
index 1ff2d1976837..6b61f7b820f4 100644
--- a/include/linux/xpfo.h
+++ b/include/linux/xpfo.h
@@ -27,6 +27,8 @@ void xpfo_kunmap(void *kaddr, struct page *page);
 void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp);
 void xpfo_free_pages(struct page *page, int order);
 
+bool xpfo_page_is_unmapped(struct page *page);
+
 #else /* !CONFIG_XPFO */
 
 static inline void xpfo_kmap(void *kaddr, struct page *page) { }
@@ -34,6 +36,8 @@ static inline void xpfo_kunmap(void *kaddr, struct page 
*page) { }
 static inline void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp) { 
}
 static inline void xpfo_free_pages(struct page *page, int order) { }
 
+static inline bool xpfo_page_is_unmapped(struct page *page) { return false; }
+
 #endif /* CONFIG_XPFO */
 
 #endif /* _LINUX_XPFO_H */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a8d74a733a38..d4fee5ca2d9e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -420,8 +420,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, 
phys_addr_t tlb_addr,
 {
        unsigned long pfn = PFN_DOWN(orig_addr);
        unsigned char *vaddr = phys_to_virt(tlb_addr);
+       struct page *page = pfn_to_page(pfn);
 
-       if (PageHighMem(pfn_to_page(pfn))) {
+       if (PageHighMem(page) || xpfo_page_is_unmapped(page)) {
                /* The buffer does not have a mapping.  Map it in and copy */
                unsigned int offset = orig_addr & ~PAGE_MASK;
                char *buffer;
diff --git a/mm/xpfo.c b/mm/xpfo.c
index 3cd45f68b5ad..3f305f31a072 100644
--- a/mm/xpfo.c
+++ b/mm/xpfo.c
@@ -206,3 +206,12 @@ void xpfo_kunmap(void *kaddr, struct page *page)
        spin_unlock_irqrestore(&xpfo->maplock, flags);
 }
 EXPORT_SYMBOL(xpfo_kunmap);
+
+inline bool xpfo_page_is_unmapped(struct page *page)
+{
+       if (!static_branch_unlikely(&xpfo_inited))
+               return false;
+
+       return test_bit(XPFO_PAGE_UNMAPPED, &lookup_xpfo(page)->flags);
+}
+EXPORT_SYMBOL(xpfo_page_is_unmapped);
-- 
2.11.0

Reply via email to