forked from nuttx/nuttx-update
arch/x86_64: add kernel stack support
arch/x86_64: add kernel stack support Signed-off-by: p-szafonimateusz <p-szafonimateusz@xiaomi.com>
This commit is contained in:
parent
712e8d9cc7
commit
ce22c28e88
15 changed files with 377 additions and 18 deletions
|
@ -528,6 +528,15 @@ struct xcptcontext
|
|||
uint64_t saved_rflags;
|
||||
uint64_t saved_rsp;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* For kernel stack enabled we can't use tcb->xcp.regs[REG_RSP] as it may
|
||||
* point to kernel stack if signaled task is waiting now in
|
||||
* up_switch_context()
|
||||
*/
|
||||
|
||||
uint64_t saved_ursp;
|
||||
#endif
|
||||
|
||||
/* Register save area - allocated from stack in up_initial_state() */
|
||||
|
||||
uint64_t *regs;
|
||||
|
@ -540,6 +549,23 @@ struct xcptcontext
|
|||
uint8_t nsyscalls;
|
||||
struct xcpt_syscall_s syscall[CONFIG_SYS_NNEST];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
# ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* In this configuration, all syscalls execute from an internal kernel
|
||||
* stack. Why? Because when we instantiate and initialize the address
|
||||
* environment of the new user process, we will temporarily lose the
|
||||
* address environment of the old user process, including its stack
|
||||
* contents. The kernel C logic will crash immediately with no valid
|
||||
* stack in place.
|
||||
*/
|
||||
|
||||
uintptr_t *ustkptr; /* Saved user stack pointer */
|
||||
uintptr_t *kstack; /* Allocate base of the (aligned) kernel stack */
|
||||
uintptr_t *ktopstk; /* Top of kernel stack */
|
||||
uintptr_t *kstkptr; /* Saved kernel stack pointer */
|
||||
# endif
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -49,8 +49,9 @@
|
|||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#define X86_64_CPUPRIV_USTACK_OFFSET (16)
|
||||
#define X86_64_CPUPRIV_UVBASE_OFFSET (24)
|
||||
#define X86_64_CPUPRIV_USTACK_OFFSET (16)
|
||||
#define X86_64_CPUPRIV_UVBASE_OFFSET (24)
|
||||
#define X86_64_CPUPRIV_KTOPSTK_OFFSET (32)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Data
|
||||
|
@ -82,6 +83,19 @@ struct intel64_cpu_s
|
|||
|
||||
uint64_t *uvbase;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Kernel stack pointer.
|
||||
*
|
||||
* We have to track the current kernel stack pointer to handle
|
||||
* syscalls in kernel mode. All registers are occupied when entering
|
||||
* syscall, so we cannot get this value from tcb in syscall handler.
|
||||
* We keep referenve to kernel stack in CPU private data and update it
|
||||
* at each context switch.
|
||||
*/
|
||||
|
||||
uint64_t *ktopstk;
|
||||
#endif
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -68,4 +68,13 @@ if(NOT CONFIG_ALARM_ARCH)
|
|||
list(APPEND SRCS x86_64_udelay.c x86_64_mdelay.c)
|
||||
endif()
|
||||
|
||||
if(CONFIG_ARCH_KERNEL_STACK)
|
||||
list(APPEND SRCS x86_64_addrenv_kstack.c)
|
||||
endif()
|
||||
|
||||
if(NOT CONFIG_BUILD_FLAT)
|
||||
list(APPEND SRCS x86_64_task_start.c x86_64_pthread_start.c
|
||||
x86_64_signal_dispatch.c)
|
||||
endif()
|
||||
|
||||
target_sources(arch PRIVATE ${SRCS})
|
||||
|
|
|
@ -58,7 +58,18 @@ CMN_CSRCS += x86_64_mmu.c
|
|||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_ADDRENV),y)
|
||||
CMN_CSRCS += x86_64_addrenv.c x86_64_addrenv_perms.c
|
||||
CMN_CSRCS += x86_64_addrenv.c x86_64_pgalloc.c x86_64_addrenv_perms.c
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_KERNEL_STACK),y)
|
||||
CMN_CSRCS += x86_64_addrenv_kstack.c
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_BUILD_FLAT),y)
|
||||
CMN_CSRCS += x86_64_task_start.c
|
||||
CMN_CSRCS += x86_64_pthread_start.c
|
||||
CMN_CSRCS += x86_64_signal_dispatch.c
|
||||
CMN_UASRCS += x86_64_signal_handler.S
|
||||
endif
|
||||
|
||||
ifndef CONFIG_ALARM_ARCH
|
||||
|
|
109
arch/x86_64/src/common/x86_64_addrenv_kstack.c
Normal file
109
arch/x86_64/src/common/x86_64_addrenv_kstack.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
/****************************************************************************
|
||||
* arch/x86_64/src/common/x86_64_addrenv_kstack.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include <nuttx/sched.h>
|
||||
#include <nuttx/kmalloc.h>
|
||||
#include <nuttx/addrenv.h>
|
||||
#include <nuttx/arch.h>
|
||||
|
||||
#include "addrenv.h"
|
||||
|
||||
#if defined(CONFIG_ARCH_ADDRENV) && defined(CONFIG_ARCH_KERNEL_STACK)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_addrenv_kstackalloc
|
||||
*
|
||||
* Description:
|
||||
* This function is called when a new thread is created to allocate
|
||||
* the new thread's kernel stack. This function may be called for certain
|
||||
* terminating threads which have no kernel stack. It must be tolerant of
|
||||
* that case.
|
||||
*
|
||||
* Input Parameters:
|
||||
* tcb - The TCB of the thread that requires the kernel stack.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero (OK) on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_addrenv_kstackalloc(struct tcb_s *tcb)
|
||||
{
|
||||
DEBUGASSERT(tcb && tcb->xcp.kstack == NULL);
|
||||
|
||||
/* Allocate the kernel stack */
|
||||
|
||||
tcb->xcp.kstack = kmm_memalign(STACK_ALIGNMENT, ARCH_KERNEL_STACKSIZE);
|
||||
if (!tcb->xcp.kstack)
|
||||
{
|
||||
berr("ERROR: Failed to allocate the kernel stack\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_addrenv_kstackfree
|
||||
*
|
||||
* Description:
|
||||
* This function is called when any thread exits. This function frees
|
||||
* the kernel stack.
|
||||
*
|
||||
* Input Parameters:
|
||||
* tcb - The TCB of the thread that no longer requires the kernel stack.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero (OK) on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_addrenv_kstackfree(struct tcb_s *tcb)
|
||||
{
|
||||
DEBUGASSERT(tcb);
|
||||
|
||||
/* Does the exiting thread have a kernel stack? */
|
||||
|
||||
if (tcb->xcp.kstack)
|
||||
{
|
||||
/* Yes.. Free the kernel stack */
|
||||
|
||||
kmm_free(tcb->xcp.kstack);
|
||||
tcb->xcp.kstack = NULL;
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_ADDRENV && CONFIG_ARCH_KERNEL_STACK */
|
|
@ -89,6 +89,12 @@ void up_exit(int status)
|
|||
|
||||
restore_critical_section(tcb, this_cpu());
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Update kernel stack top pointer */
|
||||
|
||||
x86_64_set_ktopstk(tcb->xcp.ktopstk);
|
||||
#endif
|
||||
|
||||
/* Then switch contexts */
|
||||
|
||||
x86_64_fullcontextrestore(tcb->xcp.regs);
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
# include <nuttx/sched.h>
|
||||
# include <stdint.h>
|
||||
# include <arch/io.h>
|
||||
# include <arch/irq.h>
|
||||
# include <arch/multiboot2.h>
|
||||
#endif
|
||||
|
||||
|
@ -191,15 +192,34 @@ extern uint8_t _stbss[]; /* Start of .tbss */
|
|||
extern uint8_t _etbss[]; /* End+1 of .tbss */
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/****************************************************************************
|
||||
* Inline Functions
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
static inline_function uint64_t *x86_64_get_ktopstk(void)
|
||||
{
|
||||
uint64_t *ktopstk;
|
||||
__asm__ volatile("movq %%gs:(%c1), %0"
|
||||
: "=rm" (ktopstk)
|
||||
: "i" (offsetof(struct intel64_cpu_s, ktopstk)));
|
||||
return ktopstk;
|
||||
}
|
||||
|
||||
static inline_function void x86_64_set_ktopstk(uint64_t *ktopstk)
|
||||
{
|
||||
__asm__ volatile("movq %0, %%gs:(%c1)"
|
||||
:: "r" (ktopstk), "i" (offsetof(struct intel64_cpu_s,
|
||||
ktopstk)));
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Atomic modification of registers */
|
||||
|
||||
void modifyreg8(unsigned int addr, uint8_t clearbits, uint8_t setbits);
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include <nuttx/arch.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
#include <arch/syscall.h>
|
||||
|
||||
#include "x86_64_internal.h"
|
||||
|
@ -66,6 +68,17 @@
|
|||
void up_pthread_start(pthread_trampoline_t startup,
|
||||
pthread_startroutine_t entrypt, pthread_addr_t arg)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Make sure that kernel stack is set for current CPU */
|
||||
|
||||
if (x86_64_get_ktopstk() == NULL)
|
||||
{
|
||||
x86_64_set_ktopstk(tcb->xcp.ktopstk);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Let sys_call3() do all of the work */
|
||||
|
||||
sys_call3(SYS_pthread_start, (uintptr_t)startup, (uintptr_t)entrypt,
|
||||
|
|
|
@ -59,6 +59,12 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Update kernel stack top pointer */
|
||||
|
||||
x86_64_set_ktopstk(tcb->xcp.ktopstk);
|
||||
#endif
|
||||
|
||||
/* Are we in an interrupt handler? */
|
||||
|
||||
if (up_interrupt_context())
|
||||
|
|
|
@ -218,6 +218,34 @@ uint64_t *x86_64_syscall(uint64_t *regs)
|
|||
regs[REG_RDX] = arg4; /* ucontext */
|
||||
regs[REG_R10] = arg1; /* sighand */
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* If we are signalling a user process, then we must be operating
|
||||
* on the kernel stack now. We need to switch back to the user
|
||||
* stack before dispatching the signal handler to the user code.
|
||||
* The existence of an allocated kernel stack is sufficient
|
||||
* information to make this decision.
|
||||
*/
|
||||
|
||||
if (rtcb->xcp.kstack != NULL)
|
||||
{
|
||||
uint64_t usp;
|
||||
|
||||
/* Copy "info" into user stack */
|
||||
|
||||
usp = rtcb->xcp.saved_ursp - 8;
|
||||
|
||||
/* Create a frame for info and copy the kernel info */
|
||||
|
||||
usp = usp - sizeof(siginfo_t);
|
||||
memcpy((void *)usp, (void *)regs[REG_RSI], sizeof(siginfo_t));
|
||||
|
||||
/* Now set the updated SP and user copy of "info" to RSI */
|
||||
|
||||
regs[REG_RSP] = usp;
|
||||
regs[REG_RSI] = usp;
|
||||
}
|
||||
#endif
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -276,6 +304,12 @@ uint64_t *x86_64_syscall(uint64_t *regs)
|
|||
rtcb->xcp.syscall[rtcb->xcp.nsyscalls].sysreturn = regs[REG_RCX];
|
||||
rtcb->xcp.nsyscalls += 1;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Store reference to user RSP for signals */
|
||||
|
||||
rtcb->xcp.saved_ursp = regs[REG_RSP];
|
||||
#endif
|
||||
|
||||
/* Call syscall function */
|
||||
|
||||
ret = stub(nbr, arg1, arg2, arg3, arg4, arg5, arg6);
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
#include <nuttx/arch.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
#include <arch/syscall.h>
|
||||
|
||||
#include "x86_64_internal.h"
|
||||
|
@ -63,6 +65,17 @@
|
|||
|
||||
void up_task_start(main_t taskentry, int argc, char *argv[])
|
||||
{
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Make sure that kernel stack is set for current CPU */
|
||||
|
||||
if (x86_64_get_ktopstk() == NULL)
|
||||
{
|
||||
x86_64_set_ktopstk(tcb->xcp.ktopstk);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Let sys_call3() do all of the work */
|
||||
|
||||
sys_call3(SYS_task_start, (uintptr_t)taskentry, (uintptr_t)argc,
|
||||
|
|
|
@ -246,6 +246,9 @@ void x86_64_cpu_init(void)
|
|||
g_cpu_priv[i].ustack = NULL;
|
||||
g_cpu_priv[i].uvbase = (uint64_t *)CONFIG_ARCH_TEXT_VBASE;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
g_cpu_priv[i].ktopstk = NULL;
|
||||
#endif
|
||||
|
||||
/* Store private CPU in TSS */
|
||||
|
||||
|
|
|
@ -552,10 +552,32 @@ x86_64_syscall_entry:
|
|||
/* Store current RSP on CPU private data first */
|
||||
movq %rsp, %gs:X86_64_CPUPRIV_USTACK_OFFSET
|
||||
|
||||
/* Store return address - we need free register to store
|
||||
* CPU context but at this point we don't have any.
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* If current RSP is greater than kernel stack, we have to switch stack.
|
||||
* Otherwise we are in nested syscall and we can't modify stack pointer
|
||||
*/
|
||||
cmp %gs:X86_64_CPUPRIV_UVBASE_OFFSET, %rsp
|
||||
jb no_kstack_switch
|
||||
|
||||
/* Change to kernel stack */
|
||||
movq %gs:X86_64_CPUPRIV_KTOPSTK_OFFSET, %rsp
|
||||
no_kstack_switch:
|
||||
#endif
|
||||
|
||||
/* Store some registers on stack.
|
||||
* We need some free registers here to handle stored registers alignment
|
||||
* and kernel stack for nested syscalls but at this point we don't have any
|
||||
*
|
||||
* RDI is needed only for CONFIG_ARCH_KERNEL_STACK=y but to simplify
|
||||
* the logic here - we always release it.
|
||||
*/
|
||||
pushq %rcx
|
||||
pushq %rdi
|
||||
|
||||
/* Get original kernel stack for this call */
|
||||
|
||||
movq %rsp, %rdi
|
||||
add $16, %rdi
|
||||
|
||||
/* Get aligned registers area */
|
||||
movq %rsp, %rcx
|
||||
|
@ -566,7 +588,6 @@ x86_64_syscall_entry:
|
|||
|
||||
/* Syscall arguments */
|
||||
movq %rax, (8*REG_RAX)(%rcx)
|
||||
movq %rdi, (8*REG_RDI)(%rcx)
|
||||
movq %rsi, (8*REG_RSI)(%rcx)
|
||||
movq %rdx, (8*REG_RDX)(%rcx)
|
||||
movq %r10, (8*REG_R10)(%rcx)
|
||||
|
@ -602,16 +623,27 @@ x86_64_syscall_entry:
|
|||
xsave (%rcx)
|
||||
#endif
|
||||
|
||||
/* Save RCX */
|
||||
/* Save RDI and RCX which are on stack now */
|
||||
popq (8*REG_RDI)(%rcx)
|
||||
popq (8*REG_RCX)(%rcx)
|
||||
|
||||
/* Store user stack pointer */
|
||||
/* Store user stack pointer. We can't movq directly here.
|
||||
* NOTE: for nested syscalls this value points to kernel stack.
|
||||
*/
|
||||
pushq %gs:X86_64_CPUPRIV_USTACK_OFFSET
|
||||
popq (8*REG_RSP)(%rcx)
|
||||
|
||||
/* Move stack pointer after registers area */
|
||||
movq %rcx, %rsp
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Update stored kernel stack */
|
||||
movq %rsp, %gs:X86_64_CPUPRIV_KTOPSTK_OFFSET
|
||||
|
||||
/* Store original kernel stack on stack which is now in RDI */
|
||||
pushq %rdi
|
||||
#endif
|
||||
|
||||
/* Return value from syscall stored in rax */
|
||||
movq %rcx, %rdi
|
||||
call x86_64_syscall
|
||||
|
@ -626,6 +658,11 @@ x86_64_syscall_entry:
|
|||
xrstor (%rdi)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Restore original kernel stack */
|
||||
popq %gs:X86_64_CPUPRIV_KTOPSTK_OFFSET
|
||||
#endif
|
||||
|
||||
/* Restore original user RSP */
|
||||
movq (8*REG_RSP)(%rdi), %rsp
|
||||
|
||||
|
|
|
@ -45,6 +45,12 @@
|
|||
# error XCPTCONTEXT_SIZE must be aligned to XCPTCONTEXT_ALIGN !
|
||||
#endif
|
||||
|
||||
/* Aligned size of the kernel stack */
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
# define ARCH_KERNEL_STACKSIZE STACK_ALIGN_UP(CONFIG_ARCH_KERNEL_STACKSIZE)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -65,6 +71,10 @@
|
|||
void up_initial_state(struct tcb_s *tcb)
|
||||
{
|
||||
struct xcptcontext *xcp = &tcb->xcp;
|
||||
uint64_t topstack;
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
uintptr_t *kstack = xcp->kstack;
|
||||
#endif
|
||||
|
||||
/* Initialize the idle thread stack */
|
||||
|
||||
|
@ -102,11 +112,11 @@ void up_initial_state(struct tcb_s *tcb)
|
|||
#ifndef CONFIG_ARCH_X86_64_HAVE_XSAVE
|
||||
/* Set the FCW to 1f80 */
|
||||
|
||||
xcp->regs[1] = (uint64_t)0x0000037f00000000;
|
||||
xcp->regs[1] = (uint64_t)0x0000037f00000000;
|
||||
|
||||
/* Set the MXCSR to 1f80 */
|
||||
|
||||
xcp->regs[3] = (uint64_t)0x0000000000001f80;
|
||||
xcp->regs[3] = (uint64_t)0x0000000000001f80;
|
||||
#else
|
||||
/* Initialize XSAVE region with a valid state */
|
||||
|
||||
|
@ -116,25 +126,49 @@ void up_initial_state(struct tcb_s *tcb)
|
|||
: "memory");
|
||||
#endif
|
||||
|
||||
topstack = (uint64_t)xcp->regs - 8;
|
||||
|
||||
/* Save the initial stack pointer... the value of the stackpointer before
|
||||
* the "interrupt occurs."
|
||||
*/
|
||||
|
||||
xcp->regs[REG_RSP] = (uint64_t)xcp->regs - 8;
|
||||
xcp->regs[REG_RBP] = 0;
|
||||
xcp->regs[REG_RSP] = topstack;
|
||||
xcp->regs[REG_RBP] = 0;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Use the process kernel stack to store context for user processes
|
||||
* in syscalls.
|
||||
*/
|
||||
|
||||
if (kstack)
|
||||
{
|
||||
xcp->kstack = kstack;
|
||||
xcp->ustkptr = (uintptr_t *)topstack;
|
||||
topstack = (uintptr_t)kstack + ARCH_KERNEL_STACKSIZE;
|
||||
xcp->ktopstk = (uintptr_t *)topstack;
|
||||
xcp->kstkptr = xcp->ktopstk;
|
||||
|
||||
/* Initialize kernel stack top reference */
|
||||
|
||||
if (x86_64_get_ktopstk() == NULL)
|
||||
{
|
||||
x86_64_set_ktopstk(tcb->xcp.ktopstk);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Save the task entry point */
|
||||
|
||||
xcp->regs[REG_RIP] = (uint64_t)tcb->start;
|
||||
xcp->regs[REG_RIP] = (uint64_t)tcb->start;
|
||||
|
||||
/* Set up the segment registers... assume the same segment as the caller.
|
||||
* That is not a good assumption in the long run.
|
||||
*/
|
||||
|
||||
xcp->regs[REG_DS] = up_getds();
|
||||
xcp->regs[REG_CS] = up_getcs();
|
||||
xcp->regs[REG_SS] = up_getss();
|
||||
xcp->regs[REG_ES] = up_getes();
|
||||
xcp->regs[REG_DS] = up_getds();
|
||||
xcp->regs[REG_CS] = up_getcs();
|
||||
xcp->regs[REG_SS] = up_getss();
|
||||
xcp->regs[REG_ES] = up_getes();
|
||||
|
||||
/* FS used by for TLS
|
||||
* used by some libc for TLS and segment reference
|
||||
|
|
|
@ -126,6 +126,18 @@ void up_schedule_sigaction(struct tcb_s *tcb)
|
|||
up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver;
|
||||
up_current_regs()[REG_RSP] = up_current_regs()[REG_RSP] - 8;
|
||||
up_current_regs()[REG_RFLAGS] = 0;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Update segments to kernel segments */
|
||||
|
||||
up_current_regs()[REG_SS] = tcb->xcp.regs[REG_SS];
|
||||
up_current_regs()[REG_CS] = tcb->xcp.regs[REG_CS];
|
||||
up_current_regs()[REG_DS] = tcb->xcp.regs[REG_DS];
|
||||
|
||||
/* Update RSP to kernel stack */
|
||||
|
||||
up_current_regs()[REG_RSP] = (uint64_t)x86_64_get_ktopstk();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,6 +228,18 @@ void up_schedule_sigaction(struct tcb_s *tcb)
|
|||
up_current_regs()[REG_RSP] = up_current_regs()[REG_RSP] - 8;
|
||||
up_current_regs()[REG_RFLAGS] = 0;
|
||||
|
||||
#ifdef CONFIG_ARCH_KERNEL_STACK
|
||||
/* Update segments to kernel segments */
|
||||
|
||||
up_current_regs()[REG_SS] = tcb->xcp.regs[REG_SS];
|
||||
up_current_regs()[REG_CS] = tcb->xcp.regs[REG_CS];
|
||||
up_current_regs()[REG_DS] = tcb->xcp.regs[REG_DS];
|
||||
|
||||
/* Update RSP to kernel stack */
|
||||
|
||||
up_current_regs()[REG_RSP] =
|
||||
(uint64_t)x86_64_get_ktopstk();
|
||||
#endif
|
||||
/* Mark that full context switch is necessary when we
|
||||
* return from interrupt handler.
|
||||
* In that case RIP, RSP and RFLAGS are changed, but
|
||||
|
|
Loading…
Reference in a new issue