Author: luporl
Date: Fri Jun  7 17:58:59 2019
New Revision: 348783
URL: https://svnweb.freebsd.org/changeset/base/348783

Log:
  [PPC64] Support QEMU/KVM pseries without hugepages
  
  This set of changes make it possible to run FreeBSD for PowerPC64/pseries,
  under QEMU/KVM, without requiring the host to make hugepages available to the
  guest.
  
  While there was already this possibility, by means of setting hw_direct_map to
  0, on PowerPC64 there were a couple of issues/wrong assumptions that prevented
  this from working, before this changelist.
  
  Reviewed by:  jhibbits
  Differential Revision:        https://reviews.freebsd.org/D20522

Modified:
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/aim/mmu_oea64.h
  head/sys/powerpc/aim/slb.c
  head/sys/powerpc/pseries/mmu_phyp.c

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Fri Jun  7 17:54:26 2019        
(r348782)
+++ head/sys/powerpc/aim/mmu_oea64.c    Fri Jun  7 17:58:59 2019        
(r348783)
@@ -638,7 +638,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernel
 {
        struct pvo_entry *pvo;
        register_t msr;
-       vm_paddr_t pa;
+       vm_paddr_t pa, pkernelstart, pkernelend;
        vm_offset_t size, off;
        uint64_t pte_lo;
        int i;
@@ -686,9 +686,11 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernel
         */
 
        if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) {
-               for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
+               pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
+               pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
+               for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
                    pa += PAGE_SIZE)
-                       moea64_kenter(mmup, pa, pa);
+                       moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
        }
 
        if (!hw_direct_map) {
@@ -696,6 +698,10 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernel
                off = (vm_offset_t)(moea64_bpvo_pool);
                for (pa = off; pa < off + size; pa += PAGE_SIZE)
                        moea64_kenter(mmup, pa, pa);
+
+               /* Map exception vectors */
+               for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
+                       moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
        }
        ENABLE_TRANS(msr);
 
@@ -875,7 +881,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelsta
         * Initialise the bootstrap pvo pool.
         */
        moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
-               moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
+               moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
        moea64_bpvo_pool_index = 0;
 
        /* Place at address usable through the direct map */
@@ -1169,15 +1175,19 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, v
  */
 
 static __inline
-void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
+void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
+{
+       struct pvo_entry *pvo;
 
        KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
        mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
 
-       moea64_scratchpage_pvo[which]->pvo_pte.pa =
+       pvo = moea64_scratchpage_pvo[which];
+       PMAP_LOCK(pvo->pvo_pmap);
+       pvo->pvo_pte.pa =
            moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
-       MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
-           MOEA64_PTE_INVALIDATE);
+       MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE);
+       PMAP_UNLOCK(pvo->pvo_pmap);
        isync();
 }
 

Modified: head/sys/powerpc/aim/mmu_oea64.h
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.h    Fri Jun  7 17:54:26 2019        
(r348782)
+++ head/sys/powerpc/aim/mmu_oea64.h    Fri Jun  7 17:58:59 2019        
(r348783)
@@ -81,6 +81,7 @@ extern u_int  moea64_pte_overflow;
 
 extern int             moea64_large_page_shift;
 extern uint64_t                moea64_large_page_size;
+extern uint64_t                moea64_large_page_mask;
 extern u_long          moea64_pteg_count;
 extern u_long          moea64_pteg_mask;
 extern int             n_slbs;

Modified: head/sys/powerpc/aim/slb.c
==============================================================================
--- head/sys/powerpc/aim/slb.c  Fri Jun  7 17:54:26 2019        (r348782)
+++ head/sys/powerpc/aim/slb.c  Fri Jun  7 17:58:59 2019        (r348783)
@@ -500,10 +500,12 @@ slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, i
        if (m == NULL)
                return (NULL);
 
-       va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-
-       if (!hw_direct_map)
+       if (hw_direct_map)
+               va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+       else {
+               va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
                pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
+       }
 
        if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
                bzero(va, PAGE_SIZE);

Modified: head/sys/powerpc/pseries/mmu_phyp.c
==============================================================================
--- head/sys/powerpc/pseries/mmu_phyp.c Fri Jun  7 17:54:26 2019        
(r348782)
+++ head/sys/powerpc/pseries/mmu_phyp.c Fri Jun  7 17:58:59 2019        
(r348783)
@@ -59,6 +59,16 @@ __FBSDID("$FreeBSD$");
 
 #include "phyp-hvcall.h"
 
+#define MMU_PHYP_DEBUG 0
+#define MMU_PHYP_ID "mmu_phyp: "
+#if MMU_PHYP_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
+#define dprintf0(fmt, ...) dprintf(MMU_PHYP_ID fmt, ## __VA_ARGS__)
+#else
+#define dprintf(fmt, args...) do { ; } while(0)
+#define dprintf0(fmt, args...) do { ; } while(0)
+#endif
+
 static struct rmlock mphyp_eviction_lock;
 
 /*
@@ -149,6 +159,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, v
        res = OF_getencprop(node, "ibm,slb-size", prop, sizeof(prop[0]));
        if (res > 0)
                n_slbs = prop[0];
+       dprintf0("slb-size=%i\n", n_slbs);
 
        moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
 
@@ -185,11 +196,22 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, v
                        shift = arr[idx];
                        slb_encoding = arr[idx + 1];
                        nptlp = arr[idx + 2];
+
+                       dprintf0("Segment Page Size: "
+                           "%uKB, slb_enc=0x%X: {size, encoding}[%u] =",
+                           shift > 10? 1 << (shift-10) : 0,
+                           slb_encoding, nptlp);
+
                        idx += 3;
                        len -= 3;
                        while (len > 0 && nptlp) {
                                lp_size = arr[idx];
                                lp_encoding = arr[idx+1];
+
+                               dprintf(" {%uKB, 0x%X}",
+                                   lp_size > 10? 1 << (lp_size-10) : 0,
+                                   lp_encoding);
+
                                if (slb_encoding == SLBV_L && lp_encoding == 0)
                                        break;
 
@@ -197,17 +219,28 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, v
                                len -= 2;
                                nptlp--;
                        }
+                       dprintf("\n");
                        if (nptlp && slb_encoding == SLBV_L && lp_encoding == 0)
                                break;
                }
 
-               if (len == 0)
-                       panic("Standard large pages (SLB[L] = 1, PTE[LP] = 0) "
-                           "not supported by this system. Please enable huge "
-                           "page backing if running under PowerKVM.");
-
-               moea64_large_page_shift = shift;
-               moea64_large_page_size = 1ULL << lp_size;
+               if (len > 0) {
+                       moea64_large_page_shift = shift;
+                       moea64_large_page_size = 1ULL << lp_size;
+                       moea64_large_page_mask = moea64_large_page_size - 1;
+                       hw_direct_map = 1;
+                       printf(MMU_PHYP_ID
+                           "Support for hugepages of %uKB detected\n",
+                           moea64_large_page_shift > 10?
+                               1 << (moea64_large_page_shift-10) : 0);
+               } else {
+                       moea64_large_page_size = 0;
+                       moea64_large_page_shift = 0;
+                       moea64_large_page_mask = 0;
+                       hw_direct_map = 0;
+                       printf(MMU_PHYP_ID
+                           "Support for hugepages not found\n");
+               }
        }
 
        moea64_mid_bootstrap(mmup, kernelstart, kernelend);
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to