raiden00pl commented on code in PR #14957:
URL: https://github.com/apache/nuttx/pull/14957#discussion_r1860884466
##########
arch/x86_64/src/intel64/intel64_head.S:
##########
@@ -517,6 +531,176 @@ __enable_pcid:
.size __enable_pcid, . - __enable_pcid
+#ifdef CONFIG_LIB_SYSCALL
+ /****************************************************************************
+ * Name: x86_64_syscall_entry
+ *
+ * Description:
+ * Landing point for syscall instruction.
+ *
+ * At this point RFLAGS are masked with MSR_FMASK
+ * - IF=0 (interrupts are disabled)
+ * - DF=0
+ *
+ ****************************************************************************/
+
+ .type x86_64_syscall_entry, @function
+x86_64_syscall_entry:
+ /* Store current RSP on CPU private data first */
+ movq %rsp, %gs:X86_64_CPUPRIV_USTACK_OFFSET
+
+#ifdef CONFIG_ARCH_KERNEL_STACK
+ /* If current RSP is greater than kernel stack, we have to switch stack.
+ * Otherwise we are in nested syscall and we can't modify stack pointer
+ */
+ cmp %gs:X86_64_CPUPRIV_UVBASE_OFFSET, %rsp
+ jb no_kstack_switch
+
+ /* Change to kernel stack */
+ movq %gs:X86_64_CPUPRIV_KTOPSTK_OFFSET, %rsp
+no_kstack_switch:
+#endif
+
+ /* Store some registers on stack.
+ * We need some free registers here to handle stored registers alignment
+ * and kernel stack for nested syscalls but at this point we don't have
any
+ *
+ * RDI is needed only for CONFIG_ARCH_KERNEL_STACK=y but to simplify
+ * the logic here - we always release it.
+ */
+ pushq %rcx
+ pushq %rdi
+
+ /* Get original kernel stack for this call */
+
+ movq %rsp, %rdi
+ add $16, %rdi
+
+ /* Get aligned registers area */
+ movq %rsp, %rcx
+ sub $8, %rcx
+ sub $((XCPTCONTEXT_REGS + 8) * 8), %rcx
+ add $(0x3f), %rcx
+ and $(0xffffffffffffffc0), %rcx
+
+ /* Syscall arguments */
+ movq %rax, (8*REG_RAX)(%rcx)
+ movq %rsi, (8*REG_RSI)(%rcx)
+ movq %rdx, (8*REG_RDX)(%rcx)
+ movq %r10, (8*REG_R10)(%rcx)
+ movq %r8, (8*REG_R8)(%rcx)
+ movq %r9, (8*REG_R9)(%rcx)
+
+ /* Callee registers */
+ movq %rbx, (8*REG_RBX)(%rcx)
+ movq %r11, (8*REG_R11)(%rcx)
+ movq %r12, (8*REG_R12)(%rcx)
+ movq %r13, (8*REG_R13)(%rcx)
+ movq %r14, (8*REG_R14)(%rcx)
+ movq %r15, (8*REG_R15)(%rcx)
+ movq %rbp, (8*REG_RBP)(%rcx)
+
+#ifndef CONFIG_ARCH_X86_64_HAVE_XSAVE
+ /* Save xmm registers */
+ fxsaveq (%rcx)
+#else
+# ifdef CONFIG_ARCH_CHIP_INTEL64_QEMU
+ /* BUGFIX for QEMU: make sure that xsave header is zeroed!
+ * QEMU desn't clear these fields during xsave, so if the memory region
+ * for xsave state was not cleared before use, there may be junk data
there,
+ * that cause xrstor to crash later.
+ */
+ movq $0, (X86_XSAVE_XSTATEBV_OFFSET)(%rcx)
+ movq $0, (X86_XSAVE_XCOMPBC_OFFSET)(%rcx)
+ movq $0, (X86_XSAVE_RESERVED0_OFFSET)(%rcx)
+# endif
+
+ movl $XSAVE_STATE_COMPONENTS, %eax
+ xor %edx, %edx
+ xsave (%rcx)
+#endif
+
+ /* Save RDI and RCX which are on stack now */
+ popq (8*REG_RDI)(%rcx)
+ popq (8*REG_RCX)(%rcx)
+
+ /* Store user stack pointer. We can't movq directly here.
+ * NOTE: for nested syscalls this value points to kernel stack.
+ */
+ pushq %gs:X86_64_CPUPRIV_USTACK_OFFSET
+ popq (8*REG_RSP)(%rcx)
+
+ /* Move stack pointer after registers area */
+ movq %rcx, %rsp
+
+#ifdef CONFIG_ARCH_KERNEL_STACK
+ /* Update stored kernel stack */
+ movq %rsp, %gs:X86_64_CPUPRIV_KTOPSTK_OFFSET
+
+ /* Store original kernel stack on stack which is now in RDI */
+ pushq %rdi
+#endif
+
+ /* Re-enable interrupts */
+ sti
Review Comment:
syscalls in this implementation are implemented with `SYSCALL` instruction
(Fast System Call, https://www.felixcloutier.com/x86/syscall) which is not
exception but more like "jump to a predefined address and setup kernel
execution environment". So it's not affected by any masking mechanism.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]