This commit just splits out the state into two different states, but does not differentiate between them yet. Everywhere that was checking DYING before now checks DYING and DYING_ABORT equally.
We will need this split to deal with issues closing FDs when processes are DYING. In short, if the chan release methods attempt to block on a rendez, the syscalls will abort immediately, since DYING is set. We DYING_ABORT is a DYING state, but specifically one where we want all syscalls to abort. Signed-off-by: Barret Rhoden <[email protected]> --- Documentation/processes.txt | 1 + kern/include/process.h | 36 +++++++++++++++++++++++++----------- kern/src/arsc.c | 2 +- kern/src/event.c | 2 +- kern/src/mm.c | 1 + kern/src/process.c | 14 +++++++++++++- kern/src/schedule.c | 12 ++++++------ kern/src/syscall.c | 6 +++--- 8 files changed, 51 insertions(+), 23 deletions(-) diff --git a/Documentation/processes.txt b/Documentation/processes.txt index a843890fbdaa..43335edb67b2 100644 --- a/Documentation/processes.txt +++ b/Documentation/processes.txt @@ -112,6 +112,7 @@ PROC_RUNNABLE_S PROC_RUNNING_S PROC_WAITING PROC_DYING +PROC_DYING_ABORT PROC_RUNNABLE_M PROC_RUNNING_M diff --git a/kern/include/process.h b/kern/include/process.h index 6c2537eb0c94..1ccab3b5600f 100644 --- a/kern/include/process.h +++ b/kern/include/process.h @@ -30,6 +30,13 @@ * a while. Possibly even using its local APIC timer. * - A process in an _M state will be informed about changes to its state, e.g., * will have a handler run in the event of a page fault + * + * DYING vs. DYING_ABORT: + * - DYING is the initial stage when a process is dying, but before all of its + * syscalls should abort. At this point, we start closing FDs and blocking + * certain new operations. + * - DYING_ABORT is after all FDs were closed and all outstanding syscalls are + * aborted. */ #define PROC_CREATED 0x01 @@ -37,22 +44,29 @@ #define PROC_RUNNING_S 0x04 #define PROC_WAITING 0x08 // can split out to INT and UINT #define PROC_DYING 0x10 -#define PROC_RUNNABLE_M 0x20 -#define PROC_RUNNING_M 0x40 - -#define procstate2str(state) ((state)==PROC_CREATED ? "CREATED" : \ - (state)==PROC_RUNNABLE_S ? "RUNNABLE_S" : \ - (state)==PROC_RUNNING_S ? "RUNNING_S" : \ - (state)==PROC_WAITING ? "WAITING" : \ - (state)==PROC_DYING ? "DYING" : \ - (state)==PROC_RUNNABLE_M ? "RUNNABLE_M" : \ - (state)==PROC_RUNNING_M ? "RUNNING_M" : \ - "UNKNOWN") +#define PROC_DYING_ABORT 0x20 +#define PROC_RUNNABLE_M 0x40 +#define PROC_RUNNING_M 0x80 + +#define procstate2str(state) ((state) == PROC_CREATED ? "CREATED" : \ + (state) == PROC_RUNNABLE_S ? "RUNNABLE_S" : \ + (state) == PROC_RUNNING_S ? "RUNNING_S" : \ + (state) == PROC_WAITING ? "WAITING" : \ + (state) == PROC_DYING ? "DYING" : \ + (state) == PROC_DYING_ABORT ? "DYING_ABORT" : \ + (state) == PROC_RUNNABLE_M ? "RUNNABLE_M" : \ + (state) == PROC_RUNNING_M ? "RUNNING_M" : \ + "UNKNOWN") #define DEFAULT_PROGNAME "" #include <env.h> +static bool proc_is_dying(struct proc *p) +{ + return (p->state == PROC_DYING) || (p->state == PROC_DYING_ABORT); +} + struct process_set { size_t num_processes; size_t size; diff --git a/kern/src/arsc.c b/kern/src/arsc.c index 85ea1281f120..d2f0b1ccc592 100644 --- a/kern/src/arsc.c +++ b/kern/src/arsc.c @@ -67,7 +67,7 @@ void arsc_server(uint32_t srcid, long a0, long a1, long a2) /* Probably want to try to process a dying process's syscalls. If * not, just move it to an else case */ process_generic_syscalls (p, MAX_ASRC_BATCH); - if (p->state == PROC_DYING) { + if (proc_is_dying(p)) { TAILQ_REMOVE(&arsc_proc_list, p, proc_arsc_link); proc_decref(p); /* Need to break out, so the TAILQ_FOREACH doesn't flip out. diff --git a/kern/src/event.c b/kern/src/event.c index 5386b9c09efa..ed5fe3943ff2 100644 --- a/kern/src/event.c +++ b/kern/src/event.c @@ -367,7 +367,7 @@ void send_event(struct proc *p, struct event_queue *ev_q, struct event_msg *msg, assert(!in_irq_ctx(&per_cpu_info[core_id()])); assert(p); - if (p->state == PROC_DYING) + if (proc_is_dying(p)) return; printd("[kernel] sending msg to proc %p, ev_q %p\n", p, ev_q); if (!ev_q) { diff --git a/kern/src/mm.c b/kern/src/mm.c index 80c0d7e63693..1202f0f52e15 100644 --- a/kern/src/mm.c +++ b/kern/src/mm.c @@ -951,6 +951,7 @@ static int __hpf_load_page(struct proc *p, struct page_map *pm, spin_unlock(&p->proc_lock); return -EAGAIN; /* will get reflected back to userspace */ case (PROC_DYING): + case (PROC_DYING_ABORT): spin_unlock(&p->proc_lock); return -EINVAL; default: diff --git a/kern/src/process.c b/kern/src/process.c index 22b26e78a032..ce190cec43d8 100644 --- a/kern/src/process.c +++ b/kern/src/process.c @@ -128,6 +128,7 @@ int __proc_set_state(struct proc *p, uint32_t state) * RGM -> RBS * RGS -> D * RGM -> D + * D -> DA * * These ought to be implemented later (allowed, not thought through yet). * RBS -> D @@ -154,9 +155,12 @@ int __proc_set_state(struct proc *p, uint32_t state) panic("Invalid State Transition! PROC_WAITING to %02x", state); break; case PROC_DYING: - if (state != PROC_CREATED) // when it is reused (TODO) + if (state != PROC_DYING_ABORT) panic("Invalid State Transition! PROC_DYING to %02x", state); break; + case PROC_DYING_ABORT: + panic("Invalid State Transition! PROC_DYING to %02x", state); + break; case PROC_RUNNABLE_M: if (!(state & (PROC_RUNNING_M | PROC_DYING))) panic("Invalid State Transition! PROC_RUNNABLE_M to %02x", state); @@ -589,6 +593,7 @@ void proc_run_s(struct proc *p) spin_lock(&p->proc_lock); switch (p->state) { case (PROC_DYING): + case (PROC_DYING_ABORT): spin_unlock(&p->proc_lock); printk("[kernel] _S %d not starting due to async death\n", p->pid); return; @@ -691,6 +696,7 @@ void __proc_run_m(struct proc *p) switch (p->state) { case (PROC_WAITING): case (PROC_DYING): + case (PROC_DYING_ABORT): warn("ksched tried to run proc %d in state %s\n", p->pid, procstate2str(p->state)); return; @@ -816,6 +822,7 @@ void proc_destroy(struct proc *p) uint32_t pc_arr[p->procinfo->num_vcores]; switch (p->state) { case PROC_DYING: /* someone else killed this already. */ + case (PROC_DYING_ABORT): spin_unlock(&p->proc_lock); return; case PROC_CREATED: @@ -976,6 +983,7 @@ int proc_change_to_m(struct proc *p) warn("Not supporting RUNNABLE_S -> RUNNABLE_M yet.\n"); goto error_out; case (PROC_DYING): + case (PROC_DYING_ABORT): warn("Dying, core request coming from %d\n", core_id()); goto error_out; default: @@ -1162,6 +1170,7 @@ void proc_yield(struct proc *p, bool being_nice) case (PROC_RUNNING_M): break; /* will handle this stuff below */ case (PROC_DYING): /* incoming __death */ + case (PROC_DYING_ABORT): case (PROC_RUNNABLE_M): /* incoming (bulk) preempt/myield TODO:(BULK) */ goto out_failed; default: @@ -1329,6 +1338,7 @@ void proc_wakeup(struct proc *p) case (PROC_RUNNABLE_S): case (PROC_RUNNING_S): case (PROC_DYING): + case (PROC_DYING_ABORT): spin_unlock(&p->proc_lock); return; case (PROC_RUNNABLE_M): @@ -1618,6 +1628,7 @@ int __proc_give_cores(struct proc *p, uint32_t *pc_arr, uint32_t num) warn("Don't give cores to a process in a *_S state!\n"); return -1; case (PROC_DYING): + case (PROC_DYING_ABORT): case (PROC_WAITING): /* can't accept, just fail */ return -1; @@ -1990,6 +2001,7 @@ int proc_change_to_vcore(struct proc *p, uint32_t new_vcoreid, break; /* the only case we can proceed */ case (PROC_RUNNING_S): /* user bug, just return */ case (PROC_DYING): /* incoming __death */ + case (PROC_DYING_ABORT): case (PROC_RUNNABLE_M): /* incoming (bulk) preempt/myield TODO:(BULK) */ goto out_locked; default: diff --git a/kern/src/schedule.c b/kern/src/schedule.c index aeceb1b769bf..7590dcd27d67 100644 --- a/kern/src/schedule.c +++ b/kern/src/schedule.c @@ -159,7 +159,7 @@ static void remove_from_any_list(struct proc *p) * DYING */ void __sched_proc_register(struct proc *p) { - assert(p->state != PROC_DYING); /* shouldn't be abel to happen yet */ + assert(!proc_is_dying(p)); /* shouldn't be able to happen yet */ /* one ref for the proc's existence, cradle-to-grave */ proc_incref(p, 1); /* need at least this OR the 'one for existing' */ spin_lock(&sched_lock); @@ -175,7 +175,7 @@ void __sched_proc_change_to_m(struct proc *p) /* Need to make sure they aren't dying. if so, we already dealt with their * list membership, etc (or soon will). taking advantage of the 'immutable * state' of dying (so long as refs are held). */ - if (p->state == PROC_DYING) { + if (proc_is_dying(p)) { spin_unlock(&sched_lock); return; } @@ -219,7 +219,7 @@ void __sched_proc_destroy(struct proc *p, uint32_t *pc_arr, uint32_t nr_cores) void __sched_mcp_wakeup(struct proc *p) { spin_lock(&sched_lock); - if (p->state == PROC_DYING) { + if (proc_is_dying(p)) { spin_unlock(&sched_lock); return; } @@ -233,7 +233,7 @@ void __sched_mcp_wakeup(struct proc *p) void __sched_scp_wakeup(struct proc *p) { spin_lock(&sched_lock); - if (p->state == PROC_DYING) { + if (proc_is_dying(p)) { spin_unlock(&sched_lock); return; } @@ -298,7 +298,7 @@ static bool __schedule_scp(void) spin_lock(&pcpui->owning_proc->proc_lock); /* process might be dying, with a KMSG to clean it up waiting on * this core. can't do much, so we'll attempt to restart */ - if (pcpui->owning_proc->state == PROC_DYING) { + if (proc_is_dying(pcpui->owning_proc)) { send_kernel_message(core_id(), __just_sched, 0, 0, 0, KMSG_ROUTINE); spin_unlock(&pcpui->owning_proc->proc_lock); @@ -418,7 +418,7 @@ static void __run_mcp_ksched(void *arg) * DYING, it'll remain DYING until we decref. And if there is a * concurrent death, that will spin on the ksched lock (which we * hold, and which protects the proc lists). */ - if (p->state != PROC_DYING) + if (proc_is_dying(p)) add_to_list(p, secondary_mcps); proc_decref(p); /* fyi, this may trigger __proc_free */ /* need to break: the proc lists may have changed when we unlocked diff --git a/kern/src/syscall.c b/kern/src/syscall.c index 43ce6926b998..593dfee78fab 100644 --- a/kern/src/syscall.c +++ b/kern/src/syscall.c @@ -1068,7 +1068,7 @@ all_out: static pid_t try_wait(struct proc *parent, struct proc *child, int *ret_status, int options) { - if (child->state == PROC_DYING) { + if (proc_is_dying(child)) { /* Disown returns -1 if it's already been disowned or we should o/w * abort. This can happen if we have concurrent waiters, both with * pointers to the child (only one should reap). Note that if we don't @@ -1130,7 +1130,7 @@ static pid_t wait_one(struct proc *parent, struct proc *child, int *ret_status, /* If we're dying, then we don't need to worry about waiting. We don't * do this yet, but we'll need this outlet when we deal with orphaned * children and having init inherit them. */ - if (parent->state == PROC_DYING) + if (proc_is_dying(parent)) goto out_unlock; /* Any child can wake us up, but we check for the particular child we * care about */ @@ -1160,7 +1160,7 @@ static pid_t wait_any(struct proc *parent, int *ret_status, int options) while (!retval) { cpu_relax(); cv_wait(&parent->child_wait); - if (parent->state == PROC_DYING) + if (proc_is_dying(parent)) goto out_unlock; /* Any child can wake us up from the CV. This is a linear try_wait * scan. If we have a lot of children, we could optimize this. */ -- 2.8.0.rc3.226.g39d4020 -- You received this message because you are subscribed to the Google Groups "Akaros" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. To post to this group, send email to [email protected]. For more options, visit https://groups.google.com/d/optout.
