From d22e6d74892c5156beff8edda4ab1f3ab289b07d Mon Sep 17 00:00:00 2001 From: wangmingrong1 Date: Mon, 6 Jan 2025 20:17:55 +0800 Subject: [PATCH] arm64/sctlr: Allows thread to independent control the switch of sctlr The method is the same as the method of saving the current DAIF state of the thread It will pave the way for the future implementation of hwasan's memory management Allows each thread to independently control the mte switch function Signed-off-by: wangmingrong1 --- arch/arm64/include/irq.h | 3 ++- arch/arm64/src/common/arm64_initialstate.c | 5 +++++ arch/arm64/src/common/arm64_vector_table.S | 8 +++++++- arch/arm64/src/common/arm64_vectors.S | 5 +++++ 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/irq.h b/arch/arm64/include/irq.h index d3e1ece7f8..7467691a28 100644 --- a/arch/arm64/include/irq.h +++ b/arch/arm64/include/irq.h @@ -153,10 +153,11 @@ #define REG_SPSR (33) #define REG_SP_EL0 (34) #define REG_EXE_DEPTH (35) +#define REG_SCTLR_EL1 (36) /* In Armv8-A Architecture, the stack must align with 16 byte */ -#define ARM64_CONTEXT_REGS (36) +#define ARM64_CONTEXT_REGS (37) #define ARM64_CONTEXT_SIZE (8 * ARM64_CONTEXT_REGS) #ifdef CONFIG_ARCH_FPU diff --git a/arch/arm64/src/common/arm64_initialstate.c b/arch/arm64/src/common/arm64_initialstate.c index bc51df5750..ff2d9c5cff 100644 --- a/arch/arm64/src/common/arm64_initialstate.c +++ b/arch/arm64/src/common/arm64_initialstate.c @@ -86,6 +86,11 @@ void arm64_new_task(struct tcb_s * tcb) xcp->regs[REG_SPSR] = SPSR_MODE_EL1H; #endif + xcp->regs[REG_SCTLR_EL1] = read_sysreg(sctlr_el1); +#ifdef CONFIG_ARM64_MTE + xcp->regs[REG_SCTLR_EL1] |= SCTLR_TCF1_BIT; +#endif + #ifdef CONFIG_SUPPRESS_INTERRUPTS xcp->regs[REG_SPSR] |= (DAIF_IRQ_BIT | DAIF_FIQ_BIT); #endif /* CONFIG_SUPPRESS_INTERRUPTS */ diff --git a/arch/arm64/src/common/arm64_vector_table.S b/arch/arm64/src/common/arm64_vector_table.S index e98bd9b0ba..e4708cbb8a 100644 --- a/arch/arm64/src/common/arm64_vector_table.S +++ b/arch/arm64/src/common/arm64_vector_table.S @@ -80,9 +80,12 @@ #endif stp \xreg0, \xreg1, [sp, #8 * REG_ELR] + mrs \xreg0, sctlr_el1 + str \xreg0, [sp, #8 * REG_SCTLR_EL1] + mrs \xreg0, sp_el0 mrs \xreg1, tpidrro_el0 - stp \xreg0, \xreg1, [sp, #8 * REG_SP_EL0] + stp \xreg0, \xreg1, [sp, #8 * REG_SP_EL0] /* Increment exception depth */ @@ -268,6 +271,9 @@ SECTION_FUNC(text, arm64_exit_exception) msr spsr_el1, x1 #endif + ldr x0, [sp, #8 * REG_SCTLR_EL1] + msr sctlr_el1, x0 + ldp x0, x1, [sp, #8 * REG_SP_EL0] msr sp_el0, x0 msr tpidrro_el0, x1 diff --git a/arch/arm64/src/common/arm64_vectors.S b/arch/arm64/src/common/arm64_vectors.S index bbc904b586..a18e0a9838 100644 --- a/arch/arm64/src/common/arm64_vectors.S +++ b/arch/arm64/src/common/arm64_vectors.S @@ -84,6 +84,9 @@ SECTION_FUNC(text, up_saveusercontext) #endif stp x4, x5, [x0, #8 * REG_ELR] + mrs x4, sctlr_el1 + str x4, [x0, #8 * REG_SCTLR_EL1] + ret /**************************************************************************** @@ -115,6 +118,8 @@ SECTION_FUNC(text, arm64_jump_to_user) and x0, x0, #~SPSR_MODE_MASK #orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr str x0, [sp, #8 * REG_SPSR] + mrs x0, sctlr_el1 + str x0, [sp, #8 * REG_SCTLR_EL1] b arm64_exit_exception #endif