Partial contexts are those where the full context is split between the
parts in the struct and the parts still loaded in hardware.

When entering the kernel, the kernel only needs to save enough of the
context so that it can perform its work.  The hardware may have other
bits of context that remain, and we'll copy out that state once we think
we are not going to return to the (previously) current context.

The rule is that if we ever either:
        1) run the context somewhere else, or
        2) run another context here

then we need to finalize the context, by ripping it off the core
completely.

Signed-off-by: Barret Rhoden <[email protected]>
---
 kern/arch/riscv/trap.h |  9 +++++++++
 kern/arch/x86/trap64.h | 33 +++++++++++++++++++++++++++++++++
 kern/include/trap.h    | 28 ++++++++++++++++++++++++++++
 kern/src/process.c     | 11 +++++++++--
 kern/src/trap.c        |  3 +++
 5 files changed, 82 insertions(+), 2 deletions(-)

diff --git a/kern/arch/riscv/trap.h b/kern/arch/riscv/trap.h
index 9ff985b51de4..28c313adc1dd 100644
--- a/kern/arch/riscv/trap.h
+++ b/kern/arch/riscv/trap.h
@@ -72,4 +72,13 @@ set_frame_pointer(uintptr_t fp)
 void handle_trap(struct hw_trapframe *hw_tf);
 int emulate_fpu(struct hw_trapframe *hw_tf);
 
+static inline bool arch_ctx_is_partial(struct user_context *ctx)
+{
+       return FALSE;
+}
+
+static inline void arch_finalize_ctx(struct user_context *ctx)
+{
+}
+
 #endif
diff --git a/kern/arch/x86/trap64.h b/kern/arch/x86/trap64.h
index 4e1e6b22f110..73bc420d310e 100644
--- a/kern/arch/x86/trap64.h
+++ b/kern/arch/x86/trap64.h
@@ -107,3 +107,36 @@ static inline void x86_set_stacktop_tss(struct taskstate 
*tss, uintptr_t top)
 {
        tss->ts_rsp0 = top;
 }
+
+static inline bool x86_hwtf_is_partial(struct hw_trapframe *tf)
+{
+       return FALSE;
+}
+
+static inline bool x86_swtf_is_partial(struct sw_trapframe *tf)
+{
+       return FALSE;
+}
+
+static inline bool arch_ctx_is_partial(struct user_context *ctx)
+{
+       switch (ctx->type) {
+       case (ROS_HW_CTX):
+               return x86_hwtf_is_partial(&ctx->tf.hw_tf);
+       case (ROS_SW_CTX):
+               return x86_swtf_is_partial(&ctx->tf.sw_tf);
+       }
+       return FALSE;
+}
+
+/* Makes sure that the user context is fully saved into ctx and not split 
across
+ * the struct and HW, meaning it is not a "partial context". */
+static inline void arch_finalize_ctx(struct user_context *ctx)
+{
+       switch (ctx->type) {
+       case (ROS_HW_CTX):
+               break;
+       case (ROS_SW_CTX):
+               break;
+       }
+}
diff --git a/kern/include/trap.h b/kern/include/trap.h
index 3fef422ef0dc..118c81d1a8da 100644
--- a/kern/include/trap.h
+++ b/kern/include/trap.h
@@ -40,6 +40,34 @@ void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, 
unsigned int trap_nr,
 uintptr_t get_user_ctx_pc(struct user_context *ctx);
 uintptr_t get_user_ctx_fp(struct user_context *ctx);
 
+/* Partial contexts are those where the full context is split between the parts
+ * in the struct and the parts still loaded in hardware.
+ *
+ * Finalizing a context ensures that the full context is saved in the struct 
and
+ * nothing remains in hardware.  Finalize does two things: makes sure the
+ * context can be run again on another core and makes sure the core can run
+ * another context.
+ *
+ * arch_finalize_ctx() must be idempotent and have no effect on a full context.
+ * It is up to the architecture to keep track of whether or not a context is
+ * full or partial and handle finalize calls on a context that might not be
+ * partial.  They can do so in the ctx itself, in their own arch-dependent
+ * manner.
+ *
+ * The kernel's guarantee to the arches is that:
+ * - finalize will be called after proc_pop_ctx (i.e. after it runs) at least
+ * once, before that context is used again on another core or before another
+ * context is used on this core.
+ * - the arches can store the partial status and anything else it wants in the
+ * *ctx without fear of it being tampered with.
+ * - user-provided contexts will be passed to proc_secure_ctx, and those
+ * contexts are full/finalized already.  Anything else is a user bug.  The
+ * arches enforce this.
+ * - an arch will never be asked to pop a partial context that was not already
+ * loaded onto the current core.
+ * - contexts will be finalized before handing them back to the user. */
+extern inline void arch_finalize_ctx(struct user_context *ctx);
+extern inline bool arch_ctx_is_partial(struct user_context *ctx);
 void copy_current_ctx_to(struct user_context *to_ctx);
 
 /* Kernel messages.  This is an in-order 'active message' style messaging
diff --git a/kern/src/process.c b/kern/src/process.c
index 5624b32912f9..ebd043b3dea3 100644
--- a/kern/src/process.c
+++ b/kern/src/process.c
@@ -1251,6 +1251,8 @@ void proc_yield(struct proc *p, bool being_nice)
                __proc_set_state(p, PROC_WAITING);
        }
        spin_unlock(&p->proc_lock);
+       /* We discard the current context, but we still need to restore the 
core */
+       arch_finalize_ctx(pcpui->cur_ctx);
        /* Hand the now-idle core to the ksched */
        __sched_put_idle_core(p, pcoreid);
        goto out_yield_core;
@@ -2001,7 +2003,11 @@ int proc_change_to_vcore(struct proc *p, uint32_t 
new_vcoreid,
                 * and we don't care about either the uthread_ctx or the 
vcore_ctx. */
                caller_vcpd->notif_disabled = FALSE;
                /* Don't need to save the FPU.  There should be no uthread or 
other
-                * reason to return to the FPU state. */
+                * reason to return to the FPU state.  But we do need to 
finalize the
+                * context, even though we are throwing it away.  We need to 
return the
+                * pcore to a state where it can run any context and not be 
bound to
+                * the old context. */
+               arch_finalize_ctx(pcpui->cur_ctx);
        } else {
                /* need to set up the calling vcore's ctx so that it'll get 
restarted by
                 * __startcore, to make the caller look like it was preempted. 
*/
@@ -2211,7 +2217,8 @@ void __death(uint32_t srcid, long a0, long a1, long a2)
                vcore_account_offline(p, vcoreid);      /* in case anyone is 
counting */
                /* We won't restart the process later.  current gets cleared 
later when
                 * we notice there is no owning_proc and we have nothing to do
-                * (smp_idle, restartcore, etc) */
+                * (smp_idle, restartcore, etc). */
+               arch_finalize_ctx(pcpui->cur_ctx);
                clear_owning_proc(coreid);
        }
 }
diff --git a/kern/src/trap.c b/kern/src/trap.c
index c228ba2bbaa5..bb58a567f537 100644
--- a/kern/src/trap.c
+++ b/kern/src/trap.c
@@ -108,6 +108,9 @@ void copy_current_ctx_to(struct user_context *to_ctx)
 {
        struct user_context *cur_ctx = current_ctx;
 
+       /* Be sure to finalize into cur_ctx, not the to_ctx.  o/w the arch 
could get
+        * confused by other calls to finalize. */
+       arch_finalize_ctx(cur_ctx);
        *to_ctx = *cur_ctx;
 }
 
-- 
2.6.0.rc2.230.g3dd15c0

-- 
You received this message because you are subscribed to the Google Groups 
"Akaros" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To post to this group, send email to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to