From: Julian Stecklina <jstec...@amazon.de>

Only the xpfo_kunmap call that needs to actually unmap the page
needs to be serialized. We need to be careful to handle the case,
where after the atomic decrement of the mapcount, a xpfo_kmap
increased the mapcount again. In this case, we can safely skip
modifying the page table.

Model-checked with up to 4 concurrent callers with Spin.

Signed-off-by: Julian Stecklina <jstec...@amazon.de>
Signed-off-by: Khalid Aziz <khalid.a...@oracle.com>
Cc: Khalid Aziz <kha...@gonehiking.org>
Cc: x...@kernel.org
Cc: kernel-harden...@lists.openwall.com
Cc: Vasileios P. Kemerlis <v...@cs.columbia.edu>
Cc: Juerg Haefliger <juerg.haefli...@canonical.com>
Cc: Tycho Andersen <ty...@tycho.ws>
Cc: Marco Benatto <marco.antonio....@gmail.com>
Cc: David Woodhouse <dw...@infradead.org>
---
 include/linux/xpfo.h | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
index 2318c7eb5fb7..37e7f52fa6ce 100644
--- a/include/linux/xpfo.h
+++ b/include/linux/xpfo.h
@@ -61,6 +61,7 @@ static inline void xpfo_kmap(void *kaddr, struct page *page)
 static inline void xpfo_kunmap(void *kaddr, struct page *page)
 {
        unsigned long flags;
+       bool flush_tlb = false;
 
        if (!static_branch_unlikely(&xpfo_inited))
                return;
@@ -72,18 +73,23 @@ static inline void xpfo_kunmap(void *kaddr, struct page 
*page)
         * The page is to be allocated back to user space, so unmap it from
         * the kernel, flush the TLB and tag it as a user page.
         */
-       spin_lock_irqsave(&page->xpfo_lock, flags);
-
        if (atomic_dec_return(&page->xpfo_mapcount) == 0) {
-#ifdef CONFIG_XPFO_DEBUG
-               WARN_ON(PageXpfoUnmapped(page));
-#endif
-               SetPageXpfoUnmapped(page);
-               set_kpte(kaddr, page, __pgprot(0));
-               xpfo_flush_kernel_tlb(page, 0);
+               spin_lock_irqsave(&page->xpfo_lock, flags);
+
+               /*
+                * In the case, where we raced with kmap after the
+                * atomic_dec_return, we must not nuke the mapping.
+                */
+               if (atomic_read(&page->xpfo_mapcount) == 0) {
+                       SetPageXpfoUnmapped(page);
+                       set_kpte(kaddr, page, __pgprot(0));
+                       flush_tlb = true;
+               }
+               spin_unlock_irqrestore(&page->xpfo_lock, flags);
        }
 
-       spin_unlock_irqrestore(&page->xpfo_lock, flags);
+       if (flush_tlb)
+               xpfo_flush_kernel_tlb(page, 0);
 }
 
 void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp, bool will_map);
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to