Mike Larkin and I came up with the folowing diff that keeps mapping the
framebuffer early. We tested this on a small number of machines here
that have the framebuffer < 4GB.
It'd be great if we can confirm this also works on machine where it is >
4GB.
Thanks,
Mark
? arch/amd64/compile/GENERIC.MP/obj
Index: arch/amd64/amd64/efifb.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/efifb.c,v
retrieving revision 1.26
diff -u -p -r1.26 efifb.c
--- arch/amd64/amd64/efifb.c 26 Nov 2019 02:20:50 -0000 1.26
+++ arch/amd64/amd64/efifb.c 24 Jan 2020 00:42:15 -0000
@@ -105,6 +105,8 @@ int efifb_load_font(void *, void *, str
void efifb_scrollback(void *, void *, int lines);
void efifb_efiinfo_init(struct efifb *);
void efifb_cnattach_common(void);
+vaddr_t efifb_early_map(paddr_t);
+void efifb_early_cleanup(void);
struct cb_framebuffer *cb_find_fb(paddr_t);
@@ -430,7 +432,7 @@ efifb_cnattach_common(void)
struct rasops_info *ri = &fb->rinfo;
long defattr = 0;
- ri->ri_bits = (u_char *)PMAP_DIRECT_MAP(fb->paddr);
+ ri->ri_bits = (u_char *)efifb_early_map(fb->paddr);
efifb_rasops_preinit(fb);
@@ -459,14 +461,17 @@ efifb_cnremap(void)
return;
if (_bus_space_map(iot, fb->paddr, fb->psize,
- BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR, &ioh) == 0)
- ri->ri_origbits = bus_space_vaddr(iot, ioh);
+ BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR, &ioh))
+ panic("can't remap framebuffer");
+ ri->ri_origbits = bus_space_vaddr(iot, ioh);
efifb_rasops_preinit(fb);
ri->ri_flg &= ~RI_CLEAR;
ri->ri_flg |= RI_CENTER | RI_WRONLY;
rasops_init(ri, efifb_std_descr.nrows, efifb_std_descr.ncols);
+
+ efifb_early_cleanup();
}
int
@@ -636,4 +641,16 @@ efifb_stolen(void)
{
struct efifb *fb = &efifb_console;
return fb->psize;
+}
+
+vaddr_t
+efifb_early_map(paddr_t pa)
+{
+ return pmap_set_pml4_early(pa);
+}
+
+void
+efifb_early_cleanup(void)
+{
+ pmap_clear_pml4_early();
}
Index: arch/amd64/amd64/locore0.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/locore0.S,v
retrieving revision 1.17
diff -u -p -r1.17 locore0.S
--- arch/amd64/amd64/locore0.S 18 Feb 2019 08:26:20 -0000 1.17
+++ arch/amd64/amd64/locore0.S 24 Jan 2020 00:42:15 -0000
@@ -321,7 +321,7 @@ cont:
#define PROC0_DMP2_OFF (PROC0_DMP3_OFF + NDML3_ENTRIES * NBPG)
#define TABLESIZE \
((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES + \
- NDML3_ENTRIES + NDML2_ENTRIES) * NBPG)
+ NDML3_ENTRIES + NDML2_ENTRIES + 3) * NBPG)
#define fillkpt \
1: movl %eax,(%ebx) ; /* store phys addr */ \
@@ -669,10 +669,11 @@ longmode_hi:
movw %ax,%gs
movw %ax,%fs
- /* XXX merge these */
leaq TABLESIZE(%rsi),%rdi
- call _C_LABEL(init_x86_64)
+ subq $(NBPG*3), %rdi
+ /* XXX merge these */
+ call _C_LABEL(init_x86_64)
call _C_LABEL(main)
.section .codepatch,"a"
Index: arch/amd64/amd64/machdep.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/machdep.c,v
retrieving revision 1.260
diff -u -p -r1.260 machdep.c
--- arch/amd64/amd64/machdep.c 20 Dec 2019 07:49:31 -0000 1.260
+++ arch/amd64/amd64/machdep.c 24 Jan 2020 00:42:15 -0000
@@ -1365,6 +1365,8 @@ map_tramps(void)
typedef void (vector)(void);
extern vector *IDTVEC(exceptions)[];
+paddr_t early_pte_pages;
+
void
init_x86_64(paddr_t first_avail)
{
@@ -1372,6 +1374,15 @@ init_x86_64(paddr_t first_avail)
bios_memmap_t *bmp;
int x, ist;
uint64_t max_dm_size = ((uint64_t)512 * NUM_L4_SLOT_DIRECT) << 30;
+
+ /*
+ * locore0 mapped 3 pages for use before the pmap is initialized
+ * starting at first_avail. These pages are currently used by
+ * efifb to create early-use VAs for the framebuffer before efifb
+ * is attached.
+ */
+ early_pte_pages = first_avail;
+ first_avail += 3 * NBPG;
cpu_init_msrs(&cpu_info_primary);
Index: arch/amd64/amd64/pmap.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/pmap.c,v
retrieving revision 1.137
diff -u -p -r1.137 pmap.c
--- arch/amd64/amd64/pmap.c 19 Dec 2019 17:42:17 -0000 1.137
+++ arch/amd64/amd64/pmap.c 24 Jan 2020 00:42:15 -0000
@@ -544,6 +544,78 @@ pmap_kremove(vaddr_t sva, vsize_t len)
}
/*
+ * pmap_set_pml4_early
+ *
+ * Utility function to map 2GB of 2MB pages to 'pa'. The VA that is assigned
+ * is the pml4 entry for 'early mappings' (see pmap.h). This function is used
+ * by display drivers that need to map their framebuffers early, before the
+ * pmap is fully initialized (eg, to show panic messages).
+ *
+ * Users of this function must call pmap_clear_pml4_early to remove the
+ * mapping when finished.
+ *
+ * Parameters:
+ * pa: phys addr to map
+ *
+ * Return value:
+ * VA mapping to 'pa'. This mapping is 2GB in size and starts at the base
+ * of the 2MB region containing 'va'.
+ */
+vaddr_t
+pmap_set_pml4_early(paddr_t pa)
+{
+ extern paddr_t early_pte_pages;
+ pt_entry_t *pml4e, *pte;
+ int i, j, off;
+ paddr_t curpa;
+ vaddr_t va;
+
+ pml4e = (pt_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE);
+ pml4e[PDIR_SLOT_EARLY] = (pd_entry_t)early_pte_pages | PG_V | PG_RW;
+
+ off = pa & PAGE_MASK_L2;
+ curpa = pa & L2_FRAME;
+
+ pte = (pt_entry_t *)PMAP_DIRECT_MAP(early_pte_pages);
+ memset(pte, 0, 3 * NBPG);
+
+ pte[0] = (early_pte_pages + NBPG) | PG_V | PG_RW;
+ pte[1] = (early_pte_pages + 2 * NBPG) | PG_V | PG_RW;
+
+ pte = (pt_entry_t *)PMAP_DIRECT_MAP(early_pte_pages + NBPG);
+ for (i = 0; i < 2; i++) {
+ /* 2 early pages of mappings */
+ for (j = 0; j < 512; j++) {
+ /* j[0..511] : 2MB mappings per page */
+ pte[(i * 512) + j] = curpa | PG_V | PG_RW | PG_PS;
+ curpa += (2 * 1024 * 1024);
+ }
+ }
+
+ va = (vaddr_t)((PDIR_SLOT_EARLY * 512ULL) << L3_SHIFT) + off;
+ return VA_SIGN_NEG(va);
+}
+
+/*
+ * pmap_clear_pml4_early
+ *
+ * Clears the mapping previously established with pmap_set_pml4_early.
+ */
+void
+pmap_clear_pml4_early(void)
+{
+ extern paddr_t early_pte_pages;
+ pt_entry_t *pml4e, *pte;
+
+ pte = (pt_entry_t *)PMAP_DIRECT_MAP(early_pte_pages);
+ memset(pte, 0, 3 * NBPG);
+
+ pml4e = (pd_entry_t *)pmap_kernel()->pm_pdir;
+ pml4e[PDIR_SLOT_EARLY] = 0;
+ tlbflush();
+}
+
+/*
* p m a p i n i t f u n c t i o n s
*
* pmap_bootstrap and pmap_init are called during system startup
Index: arch/amd64/include/pmap.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/pmap.h,v
retrieving revision 1.76
diff -u -p -r1.76 pmap.h
--- arch/amd64/include/pmap.h 19 Dec 2019 17:42:17 -0000 1.76
+++ arch/amd64/include/pmap.h 24 Jan 2020 00:42:15 -0000
@@ -143,10 +143,12 @@
#define L4_SLOT_KERNBASE 511
#define NUM_L4_SLOT_DIRECT 4
#define L4_SLOT_DIRECT (L4_SLOT_KERNBASE - NUM_L4_SLOT_DIRECT)
+#define L4_SLOT_EARLY (L4_SLOT_DIRECT - 1)
#define PDIR_SLOT_KERN L4_SLOT_KERN
#define PDIR_SLOT_PTE L4_SLOT_PTE
#define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
+#define PDIR_SLOT_EARLY L4_SLOT_EARLY
/*
* the following defines give the virtual addresses of various MMU
@@ -401,6 +403,8 @@ void pagezero(vaddr_t);
int pmap_convert(struct pmap *, int);
void pmap_enter_special(vaddr_t, paddr_t, vm_prot_t);
+vaddr_t pmap_set_pml4_early(paddr_t pa);
+void pmap_clear_pml4_early(void);
/*
* functions for flushing the cache for vaddrs and pages.