On Wed, Jul 05, 2017 at 09:40:41PM +0300, Artturi Alm wrote: > > diff --git a/sys/arch/armv7/armv7/armv7_machdep.c > b/sys/arch/armv7/armv7/armv7_machdep.c > index aa1c549b29b..105fb3333f1 100644 > --- a/sys/arch/armv7/armv7/armv7_machdep.c > +++ b/sys/arch/armv7/armv7/armv7_machdep.c > @@ -356,6 +356,30 @@ copy_io_area_map(pd_entry_t *new_pd) > } > } > > +static inline paddr_t > +_bs_alloc(size_t sz) > +{ > + paddr_t addr, pa = 0; > + > + for (sz = round_page(sz); sz > 0; sz -= PAGE_SIZE) { > + if (uvm_page_physget(&addr) == FALSE) > + panic("uvm_page_physget() failed"); > + memset((char *)addr, 0, PAGE_SIZE); > + if (pa == 0) > + pa = addr; > + } > + return pa; > +} > + > +/* RelativePA 2 KVA */ > +#define _BS_RPA2KVA(x, y) (KERNEL_BASE + (x) - (y)) > +static inline void > +_bs_valloc(pv_addr_t *pv, vsize_t sz, paddr_t off) > +{ > + pv->pv_pa = _bs_alloc(sz); > + pv->pv_va = _BS_RPA2KVA(pv->pv_pa, off); > +} > + > /* > * u_int initarm(...) > * > @@ -379,7 +403,7 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > paddr_t memstart; > psize_t memsize; > paddr_t memend; > - void *config; > + void *config = arg2; > size_t size; > void *node; > extern uint32_t esym; /* &_end if no symbols are loaded */ > @@ -420,18 +444,8 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > tmp_bs_tag.bs_map = bootstrap_bs_map; > > /* > - * Now, map the FDT area. > - * > - * As we don't know the size of a possible FDT, map the size of a > - * typical bootstrap bs map. The FDT might not be aligned, so this > - * might take up to two L1_S_SIZEd mappings. > - * > - * XXX: There's (currently) no way to unmap a bootstrap mapping, so > - * we might lose a bit of the bootstrap address space. > + * Now, init the FDT @ PA, reloc and reinit to KVA later. > */ > - bootstrap_bs_map(NULL, (bus_addr_t)arg2, L1_S_SIZE, 0, > - (bus_space_handle_t *)&config); > - > if (!fdt_init(config) || fdt_get_size(config) == 0) > panic("initarm: no FDT"); > > @@ -477,6 +491,33 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > > physmem = (physical_end - physical_start) / PAGE_SIZE; > > + /* Load memory into UVM. */ > +#ifdef VERBOSE_INIT_ARM > + printf("page "); > +#endif > + uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ > + uvm_page_physload(atop(physical_freestart), atop(physical_freeend), > + atop(physical_freestart), atop(physical_freeend), 0); > + > + if (physical_start < loadaddr) { > + uvm_page_physload(atop(physical_start), atop(loadaddr), > + atop(physical_start), atop(loadaddr), 0); > + physsegs--; > + } > + > + for (i = 1; i < physsegs; i++) { > + if (fdt_get_reg(node, i, ®)) > + break; > + if (reg.size == 0) > + continue; > + > + memstart = reg.addr; > + memend = MIN(reg.addr + reg.size, (paddr_t)-PAGE_SIZE); > + physmem += atop(memend - memstart); > + uvm_page_physload(atop(memstart), atop(memend), > + atop(memstart), atop(memend), 0); > + } > + > #ifdef DEBUG > /* Tell the user about the memory */ > printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem, > @@ -514,27 +555,27 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > > /* Define a macro to simplify memory allocation */ > #define valloc_pages(var, np) \ > - alloc_pages((var).pv_pa, (np)); \ > - (var).pv_va = KERNEL_BASE + (var).pv_pa - loadaddr; > + _bs_valloc(&(var), ptoa((np)), loadaddr) > > #define alloc_pages(var, np) \ > - (var) = physical_freestart; \ > - physical_freestart += ((np) * PAGE_SIZE); \ > - if (physical_freeend < physical_freestart) \ > - panic("initarm: out of memory"); \ > - free_pages -= (np); \ > - memset((char *)(var), 0, ((np) * PAGE_SIZE)); > + (var) = _bs_alloc(ptoa((np))) > > loop1 = 0; > kernel_l1pt.pv_pa = 0; > + physical_freestart = _bs_alloc(ptoa(NUM_KERNEL_PTS) + L1_TABLE_SIZE); > for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { > /* Are we 16KB aligned for an L1 ? */ > if (((physical_freestart) & (L1_TABLE_SIZE - 1)) == 0 > && kernel_l1pt.pv_pa == 0) { > - valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); > + kernel_l1pt.pv_pa = physical_freestart; > + kernel_l1pt.pv_va = > + _BS_RPA2KVA(physical_freestart, loadaddr); > + physical_freestart += L1_TABLE_SIZE; > } else { > - valloc_pages(kernel_pt_table[loop1], > - L2_TABLE_SIZE / PAGE_SIZE); > + kernel_pt_table[loop1].pv_pa = physical_freestart; > + kernel_pt_table[loop1].pv_va = > + _BS_RPA2KVA(physical_freestart, loadaddr); > + physical_freestart += L2_TABLE_SIZE; > ++loop1; > } > } > @@ -572,11 +613,15 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > #endif > > /* > - * Allocate pages for an FDT copy. > + * Allocate pages for FDT, copy it there, and zero the original. > */ > size = fdt_get_size(config); > valloc_pages(fdt, round_page(size) / PAGE_SIZE); > memcpy((void *)fdt.pv_pa, config, size); > + memset(config, 0, size); > + > + /* Now we must reinit the FDT, using the virtual address. */ > + fdt_init((void *)fdt.pv_va); > > /* > * XXX Defer this to later so that we can reclaim the memory > @@ -726,43 +771,12 @@ initarm(void *arg0, void *arg1, void *arg2, paddr_t > loadaddr) > prefetch_abort_handler_address = (u_int)prefetch_abort_handler; > undefined_handler_address = (u_int)undefinedinstruction_bounce; > > - /* Now we can reinit the FDT, using the virtual address. */ > - fdt_init((void *)fdt.pv_va); > - > /* Initialise the undefined instruction handlers */ > #ifdef VERBOSE_INIT_ARM > printf("undefined "); > #endif > undefined_init(); > > - /* Load memory into UVM. */ > -#ifdef VERBOSE_INIT_ARM > - printf("page "); > -#endif > - uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ > - uvm_page_physload(atop(physical_freestart), atop(physical_freeend), > - atop(physical_freestart), atop(physical_freeend), 0); > - > - if (physical_start < loadaddr) { > - uvm_page_physload(atop(physical_start), atop(loadaddr), > - atop(physical_start), atop(loadaddr), 0); > - physsegs--; > - } > - > - node = fdt_find_node("/memory"); > - for (i = 1; i < physsegs; i++) { > - if (fdt_get_reg(node, i, ®)) > - break; > - if (reg.size == 0) > - continue; > - > - memstart = reg.addr; > - memend = MIN(reg.addr + reg.size, (paddr_t)-PAGE_SIZE); > - physmem += (memend - memstart) / PAGE_SIZE; > - uvm_page_physload(atop(memstart), atop(memend), > - atop(memstart), atop(memend), 0); > - } > - > /* Boot strap pmap telling it where the kernel page table is */ > #ifdef VERBOSE_INIT_ARM > printf("pmap "); > diff --git a/sys/arch/armv7/armv7/locore0.S b/sys/arch/armv7/armv7/locore0.S > index 2a4e98cbe8c..f889dfeb204 100644 > --- a/sys/arch/armv7/armv7/locore0.S > +++ b/sys/arch/armv7/armv7/locore0.S > @@ -136,6 +136,14 @@ _C_LABEL(bootstrap_start): > str r3, [r4, #8] > str r3, [r4, #0x14] // ram address for 0xc0000000 > > + mov r1, r8, lsr #20 > + mov r2, r1, lsl #2 > + str r2, [r4, #(24 + 4)] > + mov r1, r2, lsl #18 > + ldr r3, [r4, #(24 + 8)] > + orr r3, r3, r1 > + str r3, [r4, #(24 + 8)] > + > /* > * the first entry has two fields that need to be updated for > * specific ram configuration of this board. > @@ -191,6 +199,9 @@ mmu_init_table: > /* map VA 0xc0000000..0xc3ffffff to PA */ > MMU_INIT(0xc0000000, 0x00000000, 64, > L1_TYPE_S|L1_S_C|L1_S_V7_AP(AP_KRW)|L1_S_V7_AF) > + /* map SDRAM VA==PA, WT cacheable @ pa = trunc_section(r8(=fdt)) */ > + MMU_INIT(0x00000000, 0x00000000, 4, > + L1_TYPE_S|L1_S_C|L1_S_V7_AP(AP_KRW)|L1_S_V7_AF) > > .word 0 /* end of table */ >
Hi, would it be waste of time to work on minimal version of the next step to the above? Next step would be making uvm_pageboot_alloc() useable, which might mean a visit to pmap7.c would be needed, but seeing how kernel L1 is being built in initarm() nowadays, i don't think i would be crossing any uncrossed-yet boundaries there. -Artturi