Shadow stack accesses are those that are performed by the CPU where it
expects to encounter a shadow stack mapping.  These accesses are performed
implicitly by CALL/RET at the site of the shadow stack pointer.  These
accesses are made explicitly by shadow stack management instructions like
WRUSSQ.

Shadow stacks accesses to shadow-stack mapping can see faults in normal,
valid operation just like regular accesses to regular mappings.  Shadow
stacks need some of the same features like delayed allocation, swap and
copy-on-write.

Shadow stack accesses can also result in errors, such as when a shadow
stack overflows, or if a shadow stack access occurs to a non-shadow-stack
mapping.

In handling a shadow stack page fault, verify it occurs within a shadow
stack mapping.  It is always an error otherwise.  For valid shadow stack
accesses, set FAULT_FLAG_WRITE to effect copy-on-write.  Because clearing
_PAGE_DIRTY_HW (vs. _PAGE_RW) is used to trigger the fault, shadow stack
read fault and shadow stack write fault are not differentiated and both are
handled as a write access.

Signed-off-by: Yu-cheng Yu <yu-cheng...@intel.com>
Reviewed-by: Kees Cook <keesc...@chromium.org>
---
v10:
-Revise commit log.

 arch/x86/include/asm/traps.h |  2 ++
 arch/x86/mm/fault.c          | 19 +++++++++++++++++++
 2 files changed, 21 insertions(+)

diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 9bf804709ee6..b4f4c725a350 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -168,6 +168,7 @@ enum {
  *   bit 3 ==                          1: use of reserved bit detected
  *   bit 4 ==                          1: fault was an instruction fetch
  *   bit 5 ==                          1: protection keys block access
+ *   bit 6 ==                          1: shadow stack access fault
  */
 enum x86_pf_error_code {
        X86_PF_PROT     =               1 << 0,
@@ -176,5 +177,6 @@ enum x86_pf_error_code {
        X86_PF_RSVD     =               1 << 3,
        X86_PF_INSTR    =               1 << 4,
        X86_PF_PK       =               1 << 5,
+       X86_PF_SHSTK    =               1 << 6,
 };
 #endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a51df516b87b..a4a3c8f016f0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1210,6 +1210,17 @@ access_error(unsigned long error_code, struct 
vm_area_struct *vma)
                                       (error_code & X86_PF_INSTR), foreign))
                return 1;
 
+       /*
+        * Verify a shadow stack access is within a shadow stack VMA.
+        * It is always an error otherwise.  Normal data access to a
+        * shadow stack area is checked in the case followed.
+        */
+       if (error_code & X86_PF_SHSTK) {
+               if (!(vma->vm_flags & VM_SHSTK))
+                       return 1;
+               return 0;
+       }
+
        if (error_code & X86_PF_WRITE) {
                /* write, present and write, not present: */
                if (unlikely(!(vma->vm_flags & VM_WRITE)))
@@ -1367,6 +1378,14 @@ void do_user_addr_fault(struct pt_regs *regs,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+       /*
+        * Clearing _PAGE_DIRTY_HW is used to detect shadow stack access.
+        * This method cannot distinguish shadow stack read vs. write.
+        * For valid shadow stack accesses, set FAULT_FLAG_WRITE to effect
+        * copy-on-write.
+        */
+       if (hw_error_code & X86_PF_SHSTK)
+               flags |= FAULT_FLAG_WRITE;
        if (hw_error_code & X86_PF_WRITE)
                flags |= FAULT_FLAG_WRITE;
        if (hw_error_code & X86_PF_INSTR)
-- 
2.21.0

Reply via email to