Module Name: src Committed By: skrll Date: Wed Mar 7 22:07:13 UTC 2012
Modified Files: src/sys/arch/hppa/hppa: trap.c vm_machdep.c Log Message: Deal with setting of space registers and protection ids for posix_spawn. Thanks to martin for the help. To generate a diff of this commit: cvs rdiff -u -r1.98 -r1.99 src/sys/arch/hppa/hppa/trap.c cvs rdiff -u -r1.50 -r1.51 src/sys/arch/hppa/hppa/vm_machdep.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/hppa/hppa/trap.c diff -u src/sys/arch/hppa/hppa/trap.c:1.98 src/sys/arch/hppa/hppa/trap.c:1.99 --- src/sys/arch/hppa/hppa/trap.c:1.98 Sun Feb 19 21:06:08 2012 +++ src/sys/arch/hppa/hppa/trap.c Wed Mar 7 22:07:13 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.98 2012/02/19 21:06:08 rmind Exp $ */ +/* $NetBSD: trap.c,v 1.99 2012/03/07 22:07:13 skrll Exp $ */ /*- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. @@ -58,7 +58,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.98 2012/02/19 21:06:08 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.99 2012/03/07 22:07:13 skrll Exp $"); /* #define INTRDEBUG */ /* #define TRAPDEBUG */ @@ -998,7 +998,28 @@ child_return(void *arg) void cpu_spawn_return(struct lwp *l) { - + struct proc *p = l->l_proc; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + pa_space_t space = pmap->pm_space; + struct trapframe *tf = l->l_md.md_regs; + + /* Load all of the user's space registers. */ + tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = tf->tf_sr2 = + tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space; + tf->tf_iisq_head = tf->tf_iisq_tail = space; + + /* Load the protection registers */ + tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid; + + /* + * theoretically these could be inherited from the father, + * but just in case. + */ + tf->tf_sr7 = HPPA_SID_KERNEL; + mfctl(CR_EIEM, tf->tf_eiem); + tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ | + (curcpu()->ci_psw & PSW_O); + userret(l, l->l_md.md_regs->tf_iioq_head, 0); #ifdef DEBUG frame_sanity_check(__func__, __LINE__, 0, l->l_md.md_regs, l); Index: src/sys/arch/hppa/hppa/vm_machdep.c diff -u src/sys/arch/hppa/hppa/vm_machdep.c:1.50 src/sys/arch/hppa/hppa/vm_machdep.c:1.51 --- src/sys/arch/hppa/hppa/vm_machdep.c:1.50 Sun Feb 19 21:06:08 2012 +++ src/sys/arch/hppa/hppa/vm_machdep.c Wed Mar 7 22:07:13 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: vm_machdep.c,v 1.50 2012/02/19 21:06:08 rmind Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.51 2012/03/07 22:07:13 skrll Exp $ */ /* $OpenBSD: vm_machdep.c,v 1.64 2008/09/30 18:54:26 miod Exp $ */ @@ -29,7 +29,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.50 2012/02/19 21:06:08 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.51 2012/03/07 22:07:13 skrll Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -81,9 +81,6 @@ void cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { - struct proc *p = l2->l_proc; - pmap_t pmap = p->p_vmspace->vm_map.pmap; - pa_space_t space = pmap->pm_space; struct pcb *pcb1, *pcb2; struct trapframe *tf; register_t sp, osp; @@ -128,22 +125,28 @@ cpu_lwp_fork(struct lwp *l1, struct lwp /* Fill out all the PAs we are going to need in locore. */ cpu_activate_pcb(l2); - /* Load all of the user's space registers. */ - tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = tf->tf_sr2 = - tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space; - tf->tf_iisq_head = tf->tf_iisq_tail = space; - - /* Load the protection registers */ - tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid; - - /* - * theoretically these could be inherited from the father, - * but just in case. - */ - tf->tf_sr7 = HPPA_SID_KERNEL; - mfctl(CR_EIEM, tf->tf_eiem); - tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ | - (curcpu()->ci_psw & PSW_O); + if (__predict_true(l2->l_proc->p_vmspace != NULL)) { + struct proc *p = l2->l_proc; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + pa_space_t space = pmap->pm_space; + + /* Load all of the user's space registers. */ + tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = tf->tf_sr2 = + tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space; + tf->tf_iisq_head = tf->tf_iisq_tail = space; + + /* Load the protection registers */ + tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid; + + /* + * theoretically these could be inherited from the father, + * but just in case. + */ + tf->tf_sr7 = HPPA_SID_KERNEL; + mfctl(CR_EIEM, tf->tf_eiem); + tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ | + (curcpu()->ci_psw & PSW_O); + } /* * Set up return value registers as libc:fork() expects @@ -275,3 +278,130 @@ cpu_lwp_setprivate(lwp_t *l, void *addr) mtctl(addr, CR_TLS); return 0; } + + +#ifdef __HAVE_CPU_UAREA_ROUTINES +void * +cpu_uarea_alloc(bool system) +{ +#ifdef PMAP_MAP_POOLPAGE + struct pglist pglist; + int error; + + /* + * Allocate a new physically contiguous uarea which can be + * direct-mapped. + */ + error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1); + if (error) { + if (!system) + return NULL; + panic("%s: uvm_pglistalloc failed: %d", __func__, error); + } + + /* + * Get the physical address from the first page. + */ + const struct vm_page * const pg = TAILQ_FIRST(&pglist); + KASSERT(pg != NULL); + const paddr_t pa = VM_PAGE_TO_PHYS(pg); + + /* + * We need to return a direct-mapped VA for the pa. + */ + + return (void *)PMAP_MAP_POOLPAGE(pa); +#else + return NULL; +#endif +} + +/* + * Return true if we freed it, false if we didn't. + */ +bool +cpu_uarea_free(void *vva) +{ +#ifdef PMAP_UNMAP_POOLPAGE + vaddr_t va = (vaddr_t) vva; + if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) + return false; + + /* + * Since the pages are physically contiguous, the vm_page structure + * will be as well. + */ + struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va)); + KASSERT(pg != NULL); + for (size_t i = 0; i < UPAGES; i++, pg++) { + uvm_pagefree(pg); + } + return true; +#else + return false; +#endif +} +#endif /* __HAVE_CPU_UAREA_ROUTINES */ + +#ifdef __HAVE_CPU_UAREA_ROUTINES +void * +cpu_uarea_alloc(bool system) +{ +#ifdef PMAP_MAP_POOLPAGE + struct pglist pglist; + int error; + + /* + * Allocate a new physically contiguous uarea which can be + * direct-mapped. + */ + error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1); + if (error) { + if (!system) + return NULL; + panic("%s: uvm_pglistalloc failed: %d", __func__, error); + } + + /* + * Get the physical address from the first page. + */ + const struct vm_page * const pg = TAILQ_FIRST(&pglist); + KASSERT(pg != NULL); + const paddr_t pa = VM_PAGE_TO_PHYS(pg); + + /* + * We need to return a direct-mapped VA for the pa. + */ + + return (void *)PMAP_MAP_POOLPAGE(pa); +#else + return NULL; +#endif +} + +/* + * Return true if we freed it, false if we didn't. + */ +bool +cpu_uarea_free(void *vva) +{ +#ifdef PMAP_UNMAP_POOLPAGE + vaddr_t va = (vaddr_t) vva; + if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) + return false; + + /* + * Since the pages are physically contiguous, the vm_page structure + * will be as well. + */ + struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va)); + KASSERT(pg != NULL); + for (size_t i = 0; i < UPAGES; i++, pg++) { + uvm_pagefree(pg); + } + return true; +#else + return false; +#endif +} +#endif /* __HAVE_CPU_UAREA_ROUTINES */ \ No newline at end of file