arch: arm64: ARMv8-A support for NuttX

N/A

Summary:

Arm64 support for NuttX, Features supported:

1. Cortex-a53 single core and SMP support: it's can run into nsh shell at
   qemu virt machine.

2. qemu-a53 board configuration support: it's only for evaluate propose

3. FPU support for armv8-a: FPU context switching at NEON/floating-point
  TRAP is supported.

4. psci interface, armv8 cache operation(data cache) and smccc support.

5. fix mass code style issue, thank for @xiaoxiang781216, @hartmannathan @pkarashchenko

Please refer to boards/arm64/qemu/qemu-a53/README.txt for detail

Note:
1. GCC MACOS issue
The GCC 11.2 toolchain for MACOS may get crash while compiling
float operation function, the following link describe the issue
and give analyse at the issue:

https://bugs.linaro.org/show_bug.cgi?id=5825

it's seem GCC give a wrong instruction at certain machine which
without architecture features

the new toolchain is not available still, so just disable the MACOS
cibuild check at present

Signed-off-by: qinwei1 <qinwei1@xiaomi.com>
This commit is contained in:
qinwei1 2022-06-18 04:26:10 -07:00 committed by hartmannathan
parent 8dbceb77f7
commit e77b06721b
97 changed files with 16520 additions and 0 deletions

View file

@ -22,6 +22,20 @@ config ARCH_ARM
---help---
The ARM architectures
config ARCH_ARM64
bool "ARM64"
select ARCH_HAVE_BACKTRACE
select ARCH_HAVE_INTERRUPTSTACK
select ARCH_HAVE_VFORK
select ARCH_HAVE_STACKCHECK
select ARCH_HAVE_CUSTOMOPT
select ARCH_HAVE_STDARG_H
select ARCH_HAVE_SYSCALL_HOOKS
select ARCH_HAVE_RDWR_MEM_CPU_RUN
select ARCH_HAVE_THREAD_LOCAL
---help---
The ARM64 architectures
config ARCH_AVR
bool "AVR"
select ARCH_NOINTC
@ -147,6 +161,7 @@ endchoice
config ARCH
string
default "arm" if ARCH_ARM
default "arm64" if ARCH_ARM64
default "avr" if ARCH_AVR
default "hc" if ARCH_HC
default "mips" if ARCH_MIPS
@ -162,6 +177,7 @@ config ARCH
default "sparc" if ARCH_SPARC
source "arch/arm/Kconfig"
source "arch/arm64/Kconfig"
source "arch/avr/Kconfig"
source "arch/hc/Kconfig"
source "arch/mips/Kconfig"

78
arch/arm64/Kconfig Normal file
View file

@ -0,0 +1,78 @@
#
# For a description of the syntax of this configuration file,
# see the file kconfig-language.txt in the NuttX tools repository.
#
if ARCH_ARM64
comment "ARM64 Options"
choice
prompt "ARM64 chip selection"
default ARCH_CHIP_QEMU
config ARCH_CHIP_QEMU
bool "QEMU virt platform (cortex-a53)"
select ARCH_CORTEX_A53
select ARCH_HAVE_ADDRENV
select ARCH_NEED_ADDRENV_MAPPING
---help---
QEMU virt platform (cortex-a53)
endchoice
config ARCH_ARMV8A
bool
default n
config ARCH_ARMV8R
bool
default n
config ARCH_CORTEX_A53
bool
default n
select ARCH_ARMV8A
select ARM_HAVE_NEON
select ARCH_HAVE_TRUSTZONE
select ARCH_DCACHE
select ARCH_ICACHE
select ARCH_HAVE_MMU
select ARCH_HAVE_FPU
select ARCH_HAVE_TESTSET
config ARCH_CORTEX_R82
bool
default n
select ARCH_ARMV8R
select ARCH_DCACHE
select ARCH_ICACHE
select ARCH_HAVE_MPU
select ARCH_HAVE_TESTSET
config ARCH_FAMILY
string
default "armv8-a" if ARCH_ARMV8A
default "armv8-r" if ARCH_ARMV8R
config ARCH_CHIP
string
default "qemu" if ARCH_CHIP_QEMU
config ARCH_HAVE_TRUSTZONE
bool
default n
---help---
Automatically selected to indicate that the ARM CPU supports
TrustZone.
config ARM_HAVE_NEON
bool
default n
---help---
Decide whether support NEON instruction
if ARCH_CHIP_QEMU
source "arch/arm64/src/qemu/Kconfig"
endif
endif # ARCH_ARM64

2
arch/arm64/include/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/board
/chip

139
arch/arm64/include/arch.h Normal file
View file

@ -0,0 +1,139 @@
/****************************************************************************
* arch/arm64/include/arch.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* This file should never be included directly but, rather,
* only indirectly through nuttx/arch.h
*/
#ifndef __ARCH_ARM64_INCLUDE_ARCH_H
#define __ARCH_ARM64_INCLUDE_ARCH_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#ifndef __ASSEMBLY__
# include <stdint.h>
# include <nuttx/pgalloc.h>
# include <nuttx/addrenv.h>
#endif
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
#ifdef CONFIG_ARCH_ADDRENV
#if CONFIG_MM_PGSIZE != 4096
# error Only pages sizes of 4096 are currently supported (CONFIG_ARCH_ADDRENV)
#endif
#endif /* CONFIG_ARCH_ADDRENV */
/****************************************************************************
* Inline functions
****************************************************************************/
/****************************************************************************
* Public Types
****************************************************************************/
#ifdef CONFIG_ARCH_ADDRENV
/* The task group resources are retained in a single structure, task_group_s
* that is defined in the header file nuttx/include/nuttx/sched.h. The type
* group_addrenv_t must be defined by platform specific logic in
* nuttx/arch/<architecture>/include/arch.h.
*
* These tables would hold the physical address of the level 2 page tables.
* All would be initially NULL and would not be backed up with physical
* memory until mappings in the level 2 page table are required.
*/
struct group_addrenv_s
{
/* Level 1 page table entries for each group section */
uintptr_t *text[ARCH_TEXT_NSECTS];
uintptr_t *data[ARCH_DATA_NSECTS];
#ifdef CONFIG_BUILD_KERNEL
uintptr_t *heap[ARCH_HEAP_NSECTS];
#ifdef CONFIG_MM_SHM
uintptr_t *shm[ARCH_SHM_NSECTS];
#endif
/* Initial heap allocation (in bytes). This exists only provide an
* indirect path for passing the size of the initial heap to the heap
* initialization logic. These operations are separated in time and
* architecture. REVISIT: I would like a better way to do this.
*/
size_t heapsize;
#endif
};
typedef struct group_addrenv_s group_addrenv_t;
/* This type is used when the OS needs to temporarily instantiate a
* different address environment. Used in the implementation of
*
* int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv);
* int up_addrenv_restore(save_addrenv_t oldenv);
*
* In this case, the saved value in the L1 page table are returned
*/
struct save_addrenv_s
{
uint32_t text[ARCH_TEXT_NSECTS];
uint32_t data[ARCH_DATA_NSECTS];
#ifdef CONFIG_BUILD_KERNEL
uint32_t heap[ARCH_HEAP_NSECTS];
#ifdef CONFIG_MM_SHM
uint32_t shm[ARCH_SHM_NSECTS];
#endif
#endif
};
typedef struct save_addrenv_s save_addrenv_t;
#endif
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ARCH_ARM64_INCLUDE_ARCH_H */

View file

@ -0,0 +1,119 @@
/****************************************************************************
* arch/arm64/include/inttypes.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_INCLUDE_INTTYPES_H
#define __ARCH_ARM64_INCLUDE_INTTYPES_H
/****************************************************************************
* Included Files
****************************************************************************/
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "ld"
#define PRIdPTR "ld"
#define PRIi8 "i"
#define PRIi16 "i"
#define PRIi32 "i"
#define PRIi64 "li"
#define PRIiPTR "li"
#define PRIo8 "o"
#define PRIo16 "o"
#define PRIo32 "o"
#define PRIo64 "lo"
#define PRIoPTR "lo"
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "lu"
#define PRIuPTR "lu"
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "lx"
#define PRIxPTR "lx"
#define PRIX8 "X"
#define PRIX16 "X"
#define PRIX32 "X"
#define PRIX64 "lX"
#define PRIXPTR "lX"
#define SCNd8 "hhd"
#define SCNd16 "hd"
#define SCNd32 "d"
#define SCNd64 "ld"
#define SCNdPTR "ld"
#define SCNi8 "hhi"
#define SCNi16 "hi"
#define SCNi32 "i"
#define SCNi64 "li"
#define SCNiPTR "li"
#define SCNo8 "hho"
#define SCNo16 "ho"
#define SCNo32 "o"
#define SCNo64 "lo"
#define SCNoPTR "lo"
#define SCNu8 "hhu"
#define SCNu16 "hu"
#define SCNu32 "u"
#define SCNu64 "lu"
#define SCNuPTR "lu"
#define SCNx8 "hhx"
#define SCNx16 "hx"
#define SCNx32 "x"
#define SCNx64 "lx"
#define SCNxPTR "lx"
#define INT8_C(x) x
#define INT16_C(x) x
#define INT32_C(x) x
#define INT64_C(x) x ## l
#define UINT8_C(x) x
#define UINT16_C(x) x
#define UINT32_C(x) x ## u
#define UINT64_C(x) x ## ul
#endif /* __ARCH_ARM64_INCLUDE_INTTYPES_H */

406
arch/arm64/include/irq.h Normal file
View file

@ -0,0 +1,406 @@
/****************************************************************************
* arch/arm64/include/irq.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* This file should never be included directly but, rather, only indirectly
* through nuttx/irq.h
*/
#ifndef __ARCH_ARM64_INCLUDE_IRQ_H
#define __ARCH_ARM64_INCLUDE_IRQ_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#ifndef __ASSEMBLY__
# include <stdint.h>
#endif
/* Include NuttX-specific IRQ definitions */
#include <nuttx/irq.h>
/* Include chip-specific IRQ definitions (including IRQ numbers) */
#include <arch/chip/irq.h>
#ifndef __ASSEMBLY__
# include <stdint.h>
#endif
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
#define up_getsp() (uintptr_t)__builtin_frame_address(0)
/****************************************************************************
* Exception stack frame format:
*
* x0 ~ x18, x30 (lr), spsr and elr
* Corruptible Registers and exception context
* reference to Armv8-A Instruction Set Architecture
* (ARM062-948681440-3280, Issue 1.1), chapter 11 PCS
* need to be saved in all exception
*
* x19 ~ x29, sp_el0, sp_elx
* Callee-saved Registers and SP pointer
* reference to Armv8-A Instruction Set Architecture
* (ARM062-948681440-3280, Issue 1.1), chapter 11 PCS
* These registers frame is allocated on stack frame
* when a exception is occurred and saved at task switch
* or crash exception
* check arm64_vectors.S for detail
*
****************************************************************************/
/****************************************************************************
* Registers and exception context
* Note:
* REG_EXEC_DEPTH indicate the task's exception depth
*
****************************************************************************/
#define REG_X0 (0)
#define REG_X1 (1)
#define REG_X2 (2)
#define REG_X3 (3)
#define REG_X4 (4)
#define REG_X5 (5)
#define REG_X6 (6)
#define REG_X7 (7)
#define REG_X8 (8)
#define REG_X9 (9)
#define REG_X10 (10)
#define REG_X11 (11)
#define REG_X12 (12)
#define REG_X13 (13)
#define REG_X14 (14)
#define REG_X15 (15)
#define REG_X16 (16)
#define REG_X17 (17)
#define REG_X18 (18)
#define REG_X19 (19)
#define REG_X20 (20)
#define REG_X21 (21)
#define REG_X22 (22)
#define REG_X23 (23)
#define REG_X24 (24)
#define REG_X25 (25)
#define REG_X26 (26)
#define REG_X27 (27)
#define REG_X28 (28)
#define REG_X29 (29)
#define REG_X30 (30)
#define REG_SP_ELX (31)
#define REG_ELR (32)
#define REG_SPSR (33)
#define REG_SP_EL0 (34)
#define REG_EXE_DEPTH (35)
#define REG_TPIDR_EL0 (36)
#define REG_TPIDR_EL1 (37)
/* In Armv8-A Architecture, the stack must align with 16 byte */
#define XCPTCONTEXT_GP_REGS (38)
#define XCPTCONTEXT_GP_SIZE (8 * XCPTCONTEXT_GP_REGS)
#ifdef CONFIG_ARCH_FPU
/****************************************************************************
* q0 ~ q31(128bit), fpsr, fpcr
* armv8 fpu registers and context
* With CONFIG_ARCH_FPU is enabled, armv8 fpu registers context
* is allocated on stack frame at exception and store/restore
* when switching FPU context
* check arm64_fpu.c for detail
*
****************************************************************************/
/* 128bit registers */
#define FPU_REG_Q0 (0)
#define FPU_REG_Q1 (1)
#define FPU_REG_Q2 (2)
#define FPU_REG_Q3 (3)
#define FPU_REG_Q4 (4)
#define FPU_REG_Q5 (5)
#define FPU_REG_Q6 (6)
#define FPU_REG_Q7 (7)
#define FPU_REG_Q8 (8)
#define FPU_REG_Q9 (9)
#define FPU_REG_Q10 (10)
#define FPU_REG_Q11 (11)
#define FPU_REG_Q12 (12)
#define FPU_REG_Q13 (13)
#define FPU_REG_Q14 (14)
#define FPU_REG_Q15 (15)
#define FPU_REG_Q16 (16)
#define FPU_REG_Q17 (17)
#define FPU_REG_Q18 (18)
#define FPU_REG_Q19 (19)
#define FPU_REG_Q20 (20)
#define FPU_REG_Q21 (21)
#define FPU_REG_Q22 (22)
#define FPU_REG_Q23 (23)
#define FPU_REG_Q24 (24)
#define FPU_REG_Q25 (25)
#define FPU_REG_Q26 (26)
#define FPU_REG_Q27 (27)
#define FPU_REG_Q28 (28)
#define FPU_REG_Q29 (29)
#define FPU_REG_Q30 (30)
#define FPU_REG_Q31 (31)
/* 32 bit registers
*/
#define FPU_REG_FPSR (0)
#define FPU_REG_FPCR (1)
/* FPU registers(Q0~Q31, 128bit): 32x2 = 64
* FPU FPSR/SPSR(32 bit) : 1
* FPU TRAP: 1
* 64 + 1 + 1 = 66
*/
#define XCPTCONTEXT_FPU_REGS (66)
#else
#define XCPTCONTEXT_FPU_REGS (0)
#endif
#define FPUCONTEXT_SIZE (8 * XCPTCONTEXT_FPU_REGS)
#define XCPTCONTEXT_REGS (XCPTCONTEXT_GP_REGS + XCPTCONTEXT_FPU_REGS)
#define XCPTCONTEXT_SIZE (8 * XCPTCONTEXT_REGS)
#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Data
****************************************************************************/
/* g_current_regs[] holds a references to the current interrupt level
* register storage structure. If is non-NULL only during interrupt
* processing. Access to g_current_regs[] must be through the macro
* CURRENT_REGS for portability.
*/
/* For the case of architectures with multiple CPUs, then there must be one
* such value for each processor that can receive an interrupt.
*/
EXTERN volatile uint64_t *g_current_regs[CONFIG_SMP_NCPUS];
#define CURRENT_REGS (g_current_regs[up_cpu_index()])
struct xcptcontext
{
/* The following function pointer is non-zero if there are pending signals
* to be processed.
*/
void *sigdeliver; /* Actual type is sig_deliver_t */
#ifdef CONFIG_BUILD_KERNEL
/* This is the saved address to use when returning from a user-space
* signal handler.
*/
uintptr_t sigreturn;
#endif
/* task stack reg context */
uint64_t *regs;
/* task context, for signal process */
uint64_t *saved_reg;
#ifdef CONFIG_ARCH_FPU
uint64_t *fpu_regs;
uint64_t *saved_fpu_regs;
#endif
/* Extra fault address register saved for common paging logic. In the
* case of the pre-fetch abort, this value is the same as regs[REG_ELR];
* For the case of the data abort, this value is the value of the fault
* address register (FAR) at the time of data abort exception.
*/
#ifdef CONFIG_PAGING
uintptr_t far;
#endif
#ifdef CONFIG_ARCH_ADDRENV
# ifdef CONFIG_ARCH_STACK_DYNAMIC
/* This array holds the physical address of the level 2 page table used
* to map the thread's stack memory. This array will be initially of
* zeroed and would be back-up up with pages during page fault exception
* handling to support dynamically sized stacks for each thread.
*/
uintptr_t *ustack[ARCH_STACK_NSECTS];
# endif
# ifdef CONFIG_ARCH_KERNEL_STACK
/* In this configuration, all syscalls execute from an internal kernel
* stack. Why? Because when we instantiate and initialize the address
* environment of the new user process, we will temporarily lose the
* address environment of the old user process, including its stack
* contents. The kernel C logic will crash immediately with no valid
* stack in place.
*/
uintptr_t *ustkptr; /* Saved user stack pointer */
uintptr_t *kstack; /* Allocate base of the (aligned) kernel stack */
uintptr_t *kstkptr; /* Saved kernel stack pointer */
# endif
#endif
};
/* Name: up_irq_save, up_irq_restore, and friends.
*
* NOTE: This function should never be called from application code and,
* as a general rule unless you really know what you are doing, this
* function should not be called directly from operation system code either:
* Typically, the wrapper functions, enter_critical_section() and
* leave_critical section(), are probably what you really want.
*/
/* Return the current IRQ state */
static inline irqstate_t irqstate(void)
{
irqstate_t flags;
__asm__ __volatile__("mrs %0, daif" : "=r" (flags):: "memory");
return flags;
}
/* Disable IRQs and return the previous IRQ state */
static inline irqstate_t up_irq_save(void)
{
irqstate_t flags;
__asm__ __volatile__
(
"mrs %0, daif\n"
"msr daifset, #2\n"
: "=r" (flags)
:
: "memory"
);
return flags;
}
/* Enable IRQs and return the previous IRQ state */
static inline irqstate_t up_irq_enable(void)
{
irqstate_t flags;
__asm__ __volatile__
(
"mrs %0, daif\n"
"msr daifclr, #2\n"
: "=r" (flags)
:
: "memory"
);
return flags;
}
/* Restore saved IRQ & FIQ state */
static inline void up_irq_restore(irqstate_t flags)
{
__asm__ __volatile__("msr daif, %0" :: "r" (flags): "memory");
}
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: up_cpu_index
*
* Description:
* Return an index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
* Input Parameters:
* None
*
* Returned Value:
* An integer index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
****************************************************************************/
#ifdef CONFIG_SMP
int up_cpu_index(void);
#else
# define up_cpu_index() (0)
#endif
/****************************************************************************
* Name: up_interrupt_context
*
* Description: Return true is we are currently executing in
* the interrupt handler context.
*
****************************************************************************/
static inline bool up_interrupt_context(void)
{
#ifdef CONFIG_SMP
irqstate_t flags = up_irq_save();
#endif
bool ret = (CURRENT_REGS != NULL);
#ifdef CONFIG_SMP
up_irq_restore(flags);
#endif
return ret;
}
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_INCLUDE_IRQ_H */

View file

@ -0,0 +1,82 @@
/****************************************************************************
* arch/arm64/include/limits.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_INCLUDE_LIMITS_H
#define __ARCH_ARM64_INCLUDE_LIMITS_H
/****************************************************************************
* Included Files
****************************************************************************/
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
#define CHAR_BIT 8
#define SCHAR_MIN (-SCHAR_MAX - 1)
#define SCHAR_MAX 127
#define UCHAR_MAX 255
/* These could be different on machines where char is unsigned */
#ifdef __CHAR_UNSIGNED__
# define CHAR_MIN 0
# define CHAR_MAX UCHAR_MAX
#else
# define CHAR_MIN SCHAR_MIN
# define CHAR_MAX SCHAR_MAX
#endif
#define SHRT_MIN (-SHRT_MAX - 1)
#define SHRT_MAX 32767
#define USHRT_MAX 65535U
#define INT_MIN (-INT_MAX - 1)
#define INT_MAX 2147483647
#define UINT_MAX 4294967295U
/* For 64-bit arm64 with LP64, long is identical to long long */
#define LONG_MIN LLONG_MIN
#define LONG_MAX LLONG_MAX
#define ULONG_MAX ULLONG_MAX
#define LLONG_MIN (-LLONG_MAX - 1)
#define LLONG_MAX 9223372036854775807LL
#define ULLONG_MAX 18446744073709551615ULL
/* A pointer is 8 bytes */
#define PTR_MIN (-PTR_MAX - 1)
#define PTR_MAX LLONG_MAX
#define UPTR_MAX ULLONG_MAX
#if !defined(__WCHAR_TYPE__)
# define WCHAR_MIN INT_MIN
# define WCHAR_MAX INT_MAX
#elif defined(__WCHAR_UNSIGNED__)
# define WCHAR_MIN 0
# define WCHAR_MAX __WCHAR_MAX__
#else
# define WCHAR_MIN (-__WCHAR_MAX__ - 1)
# define WCHAR_MAX __WCHAR_MAX__
#endif
#endif /* __ARCH_ARM64_INCLUDE_LIMITS_H */

View file

@ -0,0 +1,51 @@
/****************************************************************************
* arch/arm64/include/qemu/chip.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_INCLUDE_QEMU_CHIP_H
#define __ARCH_ARM64_INCLUDE_QEMU_CHIP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
#define KB(x) ((x) << 10)
#define MB(x) (KB(x) << 10)
#define GB(x) (MB(UINT64_C(x)) << 10)
#if defined(CONFIG_ARCH_CHIP_QEMU_A53)
#define CONFIG_GICD_BASE 0x8000000
#define CONFIG_GICR_BASE 0x80a0000
#define CONFIG_RAMBANK1_ADDR 0x40000000
#define CONFIG_RAMBANK1_SIZE MB(128)
#define CONFIG_DEVICEIO_BASEADDR 0x7000000
#define CONFIG_DEVICEIO_SIZE MB(512)
#define CONFIG_LOAD_BASE 0x40280000
#endif
#endif /* __ARCH_ARM64_INCLUDE_QEMU_CHIP_H */

View file

@ -0,0 +1,34 @@
/****************************************************************************
* arch/arm64/include/qemu/irq.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* This file should never be included directly but, rather,
* only indirectly through nuttx/irq.h
*/
#ifndef __ARCH_ARM64_INCLUDE_QEMU_IRQ_H
#define __ARCH_ARM64_INCLUDE_QEMU_IRQ_H
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define NR_IRQS 220 /* Total number of interrupts */
#endif /* __ARCH_ARM64_INCLUDE_QEMU_IRQ_H */

View file

@ -0,0 +1,90 @@
/****************************************************************************
* arch/arm64/include/spinlock.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_INCLUDE_SPINLOCK_H
#define __ARCH_ARM64_INCLUDE_SPINLOCK_H
/****************************************************************************
* Included Files
****************************************************************************/
#ifndef __ASSEMBLY__
# include <stdint.h>
#endif /* __ASSEMBLY__ */
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
/* Spinlock states */
#define SP_UNLOCKED 0 /* The Un-locked state */
#define SP_LOCKED 1 /* The Locked state */
/* Memory barriers for use with NuttX spinlock logic
*
* Data Memory Barrier (DMB) acts as a memory barrier. It ensures that all
* explicit memory accesses that appear in program order before the DMB
* instruction are observed before any explicit memory accesses that appear
* in program order after the DMB instruction. It does not affect the
* ordering of any other instructions executing on the processor
*
* dmb st - Data memory barrier. Wait for stores to complete.
*
* Data Synchronization Barrier (DSB) acts as a special kind of memory
* barrier. No instruction in program order after this instruction executes
* until this instruction completes. This instruction completes when: (1) All
* explicit memory accesses before this instruction complete, and (2) all
* Cache, Branch predictor and TLB maintenance operations before this
* instruction complete.
*
* dsb sy - Data synchronization barrier. Assures that the CPU waits until
* all memory accesses are complete
*/
#define SP_DSB(n) __asm__ __volatile__ ("dsb sy" : : : "memory")
#define SP_DMB(n) __asm__ __volatile__ ("dmb st" : : : "memory")
#define SP_WFE() __asm__ __volatile__ ("wfe" : : : "memory")
#define SP_SEV() __asm__ __volatile__ ("sev" : : : "memory")
#ifndef __ASSEMBLY__
/* The Type of a spinlock.
* ARM official document
* ARM® Cortex®-A Series, Version: 1.0, Programmers Guide for ARMv8-A
* ARM DEN0024A (ID050815)
*
* chapter 14.1.4 Synchronization
*
* The A64 instruction set has instructions for implementing
* synchronization functions:
* -- Load Exclusive (LDXR): LDXR W|Xt, [Xn]
* -- Store Exclusive (STXR): STXR Ws, W|Xt, [Xn] where Ws
* indicates whether the store completed successfully.
* 0 = success.
* -- Clear Exclusive access monitor (CLREX) This is used to
* clear the state of the Local Exclusive Monitor.
*/
typedef uint64_t spinlock_t;
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_INCLUDE_SPINLOCK_H */

View file

@ -0,0 +1,45 @@
/****************************************************************************
* arch/arm64/include/stdarg.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_INCLUDE_STDARG_H
#define __ARCH_ARM64_INCLUDE_STDARG_H
/****************************************************************************
* Included Files
****************************************************************************/
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
/* This should work with any modern gcc (newer than 3.4 or so) */
#define va_start(v,l) __builtin_va_start(v,l)
#define va_end(v) __builtin_va_end(v)
#define va_arg(v,l) __builtin_va_arg(v,l)
#define va_copy(d,s) __builtin_va_copy(d,s)
/****************************************************************************
* Public Types
****************************************************************************/
typedef __builtin_va_list va_list;
#endif /* __ARCH_ARM64_INCLUDE_STDARG_H */

View file

@ -0,0 +1,391 @@
/****************************************************************************
* arch/arm64/include/syscall.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* This file should never be included directly but, rather, only indirectly
* through include/syscall.h or include/sys/sycall.h
*/
#ifndef __ARCH_ARM64_INCLUDE_SYSCALL_H
#define __ARCH_ARM64_INCLUDE_SYSCALL_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#ifndef __ASSEMBLY__
# include <stdint.h>
#endif
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
#define SYS_syscall 0x00
#define SYS_smhcall 0xf000
/* Configuration ************************************************************/
/* This logic uses three system calls {0,1,2} for context switching and one
* for the syscall return.
* So a minimum of four syscall values must be reserved.
* If CONFIG_BUILD_PROTECTED is defined, then four more syscall values must
* be reserved.
*/
#ifndef CONFIG_BUILD_FLAT
# define CONFIG_SYS_RESERVED 8
#else
# define CONFIG_SYS_RESERVED 4
#endif
/* system calls */
/* SYS call 0:
*
* int arm64_saveusercontext(void *saveregs);
*/
#define SYS_save_context (0)
/* SYS call 1:
*
* void arm64_fullcontextrestore(void *restoreregs) noreturn_function;
*/
#define SYS_restore_context (1)
/* SYS call 2:
*
* void arm64_switchcontext(void **saveregs, void *restoreregs);
*/
#define SYS_switch_context (2)
#ifdef CONFIG_LIB_SYSCALL
/* SYS call 3:
*
* void arm_syscall_return(void);
*/
#define SYS_syscall_return (3)
#endif /* CONFIG_LIB_SYSCALL */
#ifndef CONFIG_BUILD_FLAT
/* SYS call 4:
*
* void up_task_start(main_t taskentry, int argc, char *argv[])
* noreturn_function;
*/
#define SYS_task_start (4)
/* SYS call 5:
*
* void up_pthread_start((pthread_startroutine_t startup,
* pthread_startroutine_t entrypt, pthread_addr_t arg)
* noreturn_function
*/
#define SYS_pthread_start (5)
/* SYS call 6:
*
* void signal_handler(_sa_sigaction_t sighand,
* int signo, siginfo_t *info,
* void *ucontext);
*/
#define SYS_signal_handler (6)
/* SYS call 7:
*
* void signal_handler_return(void);
*/
#define SYS_signal_handler_return (7)
#endif /* !CONFIG_BUILD_FLAT */
#define ARM_SMCC_RES_A0 (0)
#define ARM_SMCC_RES_A1 (1)
#define ARM_SMCC_RES_A2 (2)
#define ARM_SMCC_RES_A3 (3)
#define ARM_SMCC_RES_A4 (4)
#define ARM_SMCC_RES_A5 (5)
#define ARM_SMCC_RES_A6 (6)
#define ARM_SMCC_RES_A7 (7)
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Types
****************************************************************************/
/* Result from SMC/HVC call
* a0-a7 result values from registers 0 to 7
*/
struct arm64_smccc_res
{
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
};
typedef struct arm64_smccc_res arm64_smccc_res_t;
enum arm64_smccc_conduit
{
SMCCC_CONDUIT_NONE,
SMCCC_CONDUIT_SMC,
SMCCC_CONDUIT_HVC,
};
/****************************************************************************
* Inline functions
****************************************************************************/
/* SVC with SYS_ call number and no parameters */
static inline uintptr_t sys_call0(unsigned int nbr)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and one parameter */
static inline uintptr_t sys_call1(unsigned int nbr, uintptr_t parm1)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and two parameters */
static inline uintptr_t sys_call2(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg2 __asm__("x2") = (uint64_t)(parm2);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and three parameters */
static inline uintptr_t sys_call3(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg3 __asm__("x3") = (uint64_t)(parm3);
register uint64_t reg2 __asm__("x2") = (uint64_t)(parm2);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and four parameters */
static inline uintptr_t sys_call4(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg4 __asm__("x4") = (uint64_t)(parm4);
register uint64_t reg3 __asm__("x3") = (uint64_t)(parm3);
register uint64_t reg2 __asm__("x2") = (uint64_t)(parm2);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and five parameters */
static inline uintptr_t sys_call5(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4, uintptr_t parm5)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg5 __asm__("x5") = (uint64_t)(parm5);
register uint64_t reg4 __asm__("x4") = (uint64_t)(parm4);
register uint64_t reg3 __asm__("x3") = (uint64_t)(parm3);
register uint64_t reg2 __asm__("x2") = (uint64_t)(parm2);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4), "r"(reg5)
: "memory", "x30"
);
return reg0;
}
/* SVC with SYS_ call number and six parameters */
static inline uintptr_t sys_call6(unsigned int nbr, uintptr_t parm1,
uintptr_t parm2, uintptr_t parm3,
uintptr_t parm4, uintptr_t parm5,
uintptr_t parm6)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg6 __asm__("x6") = (uint64_t)(parm6);
register uint64_t reg5 __asm__("x5") = (uint64_t)(parm5);
register uint64_t reg4 __asm__("x4") = (uint64_t)(parm4);
register uint64_t reg3 __asm__("x3") = (uint64_t)(parm3);
register uint64_t reg2 __asm__("x2") = (uint64_t)(parm2);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm1);
__asm__ __volatile__
(
"svc %1"
: "=r"(reg0)
: "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
"r"(reg3), "r"(reg4), "r"(reg5), "r"(reg6)
: "memory", "x30"
);
return reg0;
}
/* semihosting(SMH) call with call number and one parameter */
static inline long smh_call(unsigned int nbr, void *parm)
{
register uint64_t reg0 __asm__("x0") = (uint64_t)(nbr);
register uint64_t reg1 __asm__("x1") = (uint64_t)(parm);
__asm__ __volatile__
(
"hlt %1"
: "=r"(reg0)
: "i"(SYS_smhcall), "r"(reg0), "r"(reg1)
: "memory", "x30"
);
return reg0;
}
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/* Make HVC calls
*
* param a0 function identifier
* param a1-a7 parameters registers
* param res results
*/
void arm64_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm64_smccc_res *res);
/* Make SMC calls
*
* param a0 function identifier
* param a1-a7 parameters registers
* param res results
*/
void arm64_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm64_smccc_res *res);
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_INCLUDE_SYSCALL_H */

110
arch/arm64/include/types.h Normal file
View file

@ -0,0 +1,110 @@
/****************************************************************************
* arch/arm64/include/types.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* This file should never be included directly but, rather, only indirectly
* through sys/types.h
*/
#ifndef __ARCH_ARM64_INCLUDE_TYPES_H
#define __ARCH_ARM64_INCLUDE_TYPES_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
/****************************************************************************
* Type Declarations
****************************************************************************/
#ifndef __ASSEMBLY__
/* These are the sizes of the standard integer types. NOTE that these type
* names have a leading underscore character. This file will be included
* (indirectly) by include/stdint.h and typedef'ed to the final name without
* the underscore character. This roundabout way of doings things allows
* the stdint.h to be removed from the include/ directory in the event that
* the user prefers to use the definitions provided by their toolchain header
* files
*/
typedef signed char _int8_t;
typedef unsigned char _uint8_t;
typedef signed short _int16_t;
typedef unsigned short _uint16_t;
typedef signed int _int32_t;
typedef unsigned int _uint32_t;
typedef signed long _int64_t;
typedef unsigned long _uint64_t;
#define __INT64_DEFINED
typedef _int64_t _intmax_t;
typedef _uint64_t _uintmax_t;
#if defined(__WCHAR_TYPE__)
typedef __WCHAR_TYPE__ _wchar_t;
#else
typedef int _wchar_t;
#endif
#if defined(__SIZE_TYPE__)
/* If __SIZE_TYPE__ is defined we define ssize_t based on size_t.
* We simply change "unsigned" to "signed" for this single definition
* to make sure ssize_t and size_t only differ by their signedness.
*/
#define unsigned signed
typedef __SIZE_TYPE__ _ssize_t;
#undef unsigned
typedef __SIZE_TYPE__ _size_t;
#elif defined(CONFIG_ARCH_SIZET_LONG)
typedef signed long _ssize_t;
typedef unsigned long _size_t;
#else
typedef signed int _ssize_t;
typedef unsigned int _size_t;
#endif
/* This is the size of the interrupt state save returned by
* up_irq_save()
*/
typedef unsigned long irqstate_t;
#endif /* __ASSEMBLY__ */
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#endif /* __ARCH_ARM64_INCLUDE_TYPES_H */

2
arch/arm64/src/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/board
/chip

205
arch/arm64/src/Makefile Normal file
View file

@ -0,0 +1,205 @@
############################################################################
# arch/arm64/src/Makefile
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
include $(TOPDIR)/Make.defs
include chip/Make.defs
ARCH_SRCDIR = $(TOPDIR)$(DELIM)arch$(DELIM)$(CONFIG_ARCH)$(DELIM)src
INCLUDES += ${shell $(INCDIR) "$(CC)" $(ARCH_SRCDIR)$(DELIM)chip}
INCLUDES += ${shell $(INCDIR) "$(CC)" $(ARCH_SRCDIR)$(DELIM)common}
INCLUDES += ${shell $(INCDIR) "$(CC)" $(TOPDIR)$(DELIM)sched}
CPPFLAGS += $(INCLUDES)
CFLAGS += $(INCLUDES)
CXXFLAGS += $(INCLUDES)
AFLAGS += $(INCLUDES)
NUTTX = $(call CONVERT_PATH,$(TOPDIR)$(DELIM)nuttx$(EXEEXT))
# Additional rules for system call wrapper
ifeq ($(CONFIG_SCHED_INSTRUMENTATION_SYSCALL),y)
EXTRALINKCMDS += @$(TOPDIR)/syscall/syscall_wraps.ldcmd
endif
# The "head" object
HEAD_OBJ = $(HEAD_ASRC:.S=$(OBJEXT))
STARTUP_OBJS ?= $(HEAD_OBJ)
# Flat build or kernel-mode objects
ASRCS = $(CHIP_ASRCS) $(CMN_ASRCS)
AOBJS = $(ASRCS:.S=$(OBJEXT))
CSRCS = $(CHIP_CSRCS) $(CMN_CSRCS)
COBJS = $(CSRCS:.c=$(OBJEXT))
SRCS = $(ASRCS) $(CSRCS)
OBJS = $(AOBJS) $(COBJS)
# User-mode objects
UASRCS = $(CHIP_UASRCS) $(CMN_UASRCS)
UAOBJS = $(UASRCS:.S=$(OBJEXT))
UCSRCS = $(CHIP_UCSRCS) $(CMN_UCSRCS)
UCOBJS = $(UCSRCS:.c=$(OBJEXT))
USRCS = $(UASRCS) $(UCSRCS)
UOBJS = $(UAOBJS) $(UCOBJS)
KBIN = libkarch$(LIBEXT)
BIN = libarch$(LIBEXT)
LDFLAGS += $(addprefix -T,$(call CONVERT_PATH,$(ARCHSCRIPT))) $(EXTRALINKCMDS)
# Override in Make.defs if linker is not 'ld'
ifeq ($(LD),$(CC))
LDSTARTGROUP ?= -Wl,--start-group
LDENDGROUP ?= -Wl,--end-group
LDFLAGS := $(addprefix -Xlinker ,$(LDFLAGS))
LDFLAGS += $(CFLAGS)
else
LDSTARTGROUP ?= --start-group
LDENDGROUP ?= --end-group
endif
BOARDMAKE = $(if $(wildcard board$(DELIM)Makefile),y,)
LIBPATHS += -L $(call CONVERT_PATH,$(TOPDIR)$(DELIM)staging)
ifeq ($(BOARDMAKE),y)
LIBPATHS += -L $(call CONVERT_PATH,$(TOPDIR)$(DELIM)arch$(DELIM)$(CONFIG_ARCH)$(DELIM)src$(DELIM)board)
endif
LDLIBS = $(patsubst %.a,%,$(patsubst lib%,-l%,$(LINKLIBS)))
ifeq ($(BOARDMAKE),y)
LDLIBS += -lboard
endif
VPATH += chip
VPATH += common
VPATH += $(ARCH_SUBDIR)
all: $(HEAD_OBJ) $(BIN)
.PHONY: board$(DELIM)libboard$(LIBEXT)
$(AOBJS) $(UAOBJS) $(HEAD_OBJ): %$(OBJEXT): %.S
$(call ASSEMBLE, $<, $@)
$(COBJS) $(UCOBJS): %$(OBJEXT): %.c
$(call COMPILE, $<, $@)
ifeq ($(CONFIG_BUILD_FLAT),y)
$(BIN): $(OBJS)
$(call ARCHIVE, $@, $(OBJS))
else
$(BIN): $(UOBJS)
$(call ARCHIVE, $@, $(UOBJS))
endif
$(KBIN): $(OBJS)
$(call ARCHIVE, $@, $(OBJS))
board$(DELIM)libboard$(LIBEXT):
$(Q) $(MAKE) -C board libboard$(LIBEXT) EXTRAFLAGS="$(EXTRAFLAGS)"
define LINK_ALLSYMS
$(Q) $(TOPDIR)/tools/mkallsyms.sh $(NUTTX) $(CROSSDEV) > allsyms.tmp
$(Q) $(call COMPILE, -x c allsyms.tmp, allsyms$(OBJEXT))
$(Q) $(LD) --entry=__start $(LDFLAGS) $(LIBPATHS) $(EXTRA_LIBPATHS) \
-o $(NUTTX) $(HEAD_OBJ) allsyms$(OBJEXT) $(EXTRA_OBJS) \
$(LDSTARTGROUP) $(LDLIBS) $(EXTRA_LIBS) $(LDENDGROUP)
$(Q) $(call DELFILE, allsyms.tmp allsyms$(OBJEXT))
endef
nuttx$(EXEEXT): $(HEAD_OBJ) board$(DELIM)libboard$(LIBEXT) $(ARCHSCRIPT)
$(Q) echo "LD: nuttx"
ifneq ($(CONFIG_ALLSYMS),y)
$(Q) $(LD) --entry=__start $(LDFLAGS) $(LIBPATHS) $(EXTRA_LIBPATHS) \
-o $(NUTTX) $(HEAD_OBJ) $(EXTRA_OBJS) \
$(LDSTARTGROUP) $(LDLIBS) $(EXTRA_LIBS) $(LDENDGROUP)
else
$(Q) # Link and generate default table
$(Q) $(if $(wildcard $(shell echo $(NUTTX))),,$(call LINK_ALLSYMS,$^))
$(Q) # Extract all symbols
$(Q) $(call LINK_ALLSYMS, $^)
$(Q) # Extract again since the table offset may changed
$(Q) $(call LINK_ALLSYMS, $^)
endif
ifneq ($(CONFIG_WINDOWS_NATIVE),y)
$(Q) $(NM) $(NUTTX) | \
grep -v '\(compiled\)\|\(\$(OBJEXT)$$\)\|\( [aUw] \)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | \
sort > $(TOPDIR)$(DELIM)System.map
endif
# This is part of the top-level export target
# Note that there may not be a head object if layout is handled
# by the linker configuration.
export_startup: $(STARTUP_OBJS)
ifneq ($(STARTUP_OBJS),)
$(Q) if [ -d "$(EXPORT_DIR)$(DELIM)startup" ]; then \
cp -f $(STARTUP_OBJS) "$(EXPORT_DIR)$(DELIM)startup$(DELIM)."; \
else \
echo "$(EXPORT_DIR)$(DELIM)startup does not exist"; \
exit 1; \
fi
endif
# Dependencies
makedepfile: $(CSRCS:.c=.ddc) $(ASRCS:.S=.dds) $(HEAD_ASRC:.S=.dds)
$(call CATFILE, Make.dep, $^)
$(call DELFILE, $^)
.depend: Makefile chip$(DELIM)Make.defs $(SRCS) $(TOPDIR)$(DELIM).config
ifeq ($(BOARDMAKE),y)
$(Q) $(MAKE) -C board depend
endif
$(Q) $(MAKE) makedepfile DEPPATH="$(patsubst %,--dep-path %,$(subst :, ,$(VPATH)))"
$(Q) touch $@
depend: .depend
context::
clean:
ifeq ($(BOARDMAKE),y)
$(Q) $(MAKE) -C board clean
endif
$(call DELFILE, $(KBIN))
$(call DELFILE, $(BIN))
ifneq ($(EXTRADELFILE),)
$(call DELFILE, $(EXTRADELFILE))
endif
$(call CLEAN)
distclean:: clean
ifeq ($(BOARDMAKE),y)
$(Q) $(MAKE) -C board distclean
endif
$(call DELFILE, Make.dep)
$(call DELFILE, .depend)
-include Make.dep

View file

@ -0,0 +1,139 @@
############################################################################
# arch/arm64/src/Toolchain.defs
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
#
# Supported toolchains
#
# Each toolchain definition should set:
#
# CROSSDEV The GNU toolchain triple (command prefix)
# ARCHCPUFLAGS CPU-specific flags selecting the instruction set
# FPU options, etc.
# ARCHOPTIMIZATION The optimization level that results in
# reliable code generation.
#
ifeq ($(CONFIG_ARCH_ARMV8A),y)
ARCHCPUFLAGS += -march=armv8-a
endif
ifeq ($(CONFIG_ARCH_CORTEX_A53),y)
ARCHCPUFLAGS += -mtune=cortex-a53
endif
ifeq ($(CONFIG_DEBUG_CUSTOMOPT),y)
ARCHOPTIMIZATION += $(CONFIG_DEBUG_OPTLEVEL)
else ifeq ($(CONFIG_DEBUG_FULLOPT),y)
ARCHOPTIMIZATION += -Os
endif
ifneq ($(CONFIG_DEBUG_NOOPT),y)
ARCHOPTIMIZATION += -fno-strict-aliasing
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
ARCHOPTIMIZATION += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
ARCHOPTIMIZATION += -fomit-frame-pointer
endif
ifeq ($(CONFIG_STACK_CANARIES),y)
ARCHOPTIMIZATION += -fstack-protector-all
endif
ifeq ($(CONFIG_ARCH_COVERAGE),y)
ARCHOPTIMIZATION += -fprofile-generate -ftest-coverage
endif
ARCHCFLAGS += -fno-common
ARCHCXXFLAGS += -fno-common -nostdinc++
ARCHCFLAGS += -Wall -Wstrict-prototypes -Wshadow -Wundef -Werror
ARCHCXXFLAGS += -Wall -Wshadow -Wundef
ifneq ($(CONFIG_CXX_EXCEPTION),y)
ARCHCXXFLAGS += -fno-exceptions -fcheck-new
endif
ifneq ($(CONFIG_CXX_RTTI),y)
ARCHCXXFLAGS += -fno-rtti
endif
LDFLAGS += -nostdlib
# Optimization of unused sections
ifeq ($(CONFIG_DEBUG_OPT_UNUSED_SECTIONS),y)
LDFLAGS += --gc-sections
ARCHOPTIMIZATION += -ffunction-sections -fdata-sections
endif
# Debug link map
ifeq ($(CONFIG_DEBUG_LINK_MAP),y)
LDFLAGS += --cref -Map=$(call CONVERT_PATH,$(TOPDIR)$(DELIM)nuttx.map)
endif
ifeq ($(CONFIG_DEBUG_SYMBOLS),y)
ARCHOPTIMIZATION += -g
endif
CROSSDEV ?= aarch64-none-elf-
# Default toolchain
CC = $(CROSSDEV)gcc
CXX = $(CROSSDEV)g++
CPP = $(CROSSDEV)gcc -E -P -x c
STRIP = $(CROSSDEV)strip --strip-unneeded
OBJCOPY = $(CROSSDEV)objcopy
OBJDUMP = $(CROSSDEV)objdump
LD = $(CROSSDEV)ld
AR = $(CROSSDEV)ar rcs
NM = $(CROSSDEV)nm
# Link Time Optimization
ifeq ($(CONFIG_LTO_FULL),y)
ARCHOPTIMIZATION += -flto
ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),GNU_EABI)
LD := $(CROSSDEV)gcc
AR := $(CROSSDEV)gcc-ar rcs
NM := $(CROSSDEV)gcc-nm
ARCHOPTIMIZATION += -fuse-linker-plugin
ARCHOPTIMIZATION += -fno-builtin
endif
endif
# Add the builtin library
EXTRA_LIBS += $(wildcard $(shell $(CC) $(ARCHCPUFLAGS) --print-libgcc-file-name))
ifneq ($(CONFIG_LIBM),y)
EXTRA_LIBS += $(wildcard $(shell $(CC) $(ARCHCPUFLAGS) --print-file-name=libm.a))
endif
ifeq ($(CONFIG_LIBSUPCXX),y)
EXTRA_LIBS += $(wildcard $(shell $(CC) $(ARCHCPUFLAGS) --print-file-name=libsupc++.a))
endif
ifeq ($(CONFIG_ARCH_COVERAGE),y)
EXTRA_LIBS += $(wildcard $(shell $(CC) $(ARCHCPUFLAGS) --print-file-name=libgcov.a))
endif

View file

@ -0,0 +1,97 @@
############################################################################
# arch/arm64/src/common/Make.defs
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
# The vector table is the "head" object, i.e., the one that must forced into
# the link in order to draw in all of the other components
HEAD_ASRC = arm64_head.S
ifeq ($(CONFIG_BUILD_KERNEL),y)
crt0$(OBJEXT): crt0.c
$(CC) $(CFLAGS) -c common$(DELIM)crt0.c -o crt0$(OBJEXT)
STARTUP_OBJS = crt0$(OBJEXT)
endif
# Force the start-up logic to be at the beginning of the .text to simplify
# debug.
# Common assembly language files
CMN_ASRCS = arm64_vector_table.S arm64_vectors.S arm64_smccc.S
CMN_ASRCS += arm64_cpu_idle.S arm64_vfork_func.S
ifeq ($(CONFIG_ARCH_HAVE_TESTSET),y)
CMN_ASRCS += arm64_testset.S
endif
# Common C source files ( OS call up_xxx)
CMN_CSRCS = arm64_initialize.c arm64_initialstate.c arm64_boot.c
CMN_CSRCS += arm64_idle.c arm64_copystate.c
CMN_CSRCS += arm64_createstack.c arm64_releasestack.c arm64_stackframe.c arm64_usestack.c
CMN_CSRCS += arm64_task_sched.c arm64_exit.c arm64_vfork.c arm64_reprioritizertr.c
CMN_CSRCS += arm64_releasepending.c arm64_unblocktask.c arm64_blocktask.c
CMN_CSRCS += arm64_assert.c arm64_schedulesigaction.c arm64_backtrace.c
CMN_CSRCS += arm64_sigdeliver.c
# Common C source files ( hardware BSP )
CMN_CSRCS += arm64_mmu.c arm64_arch_timer.c arm64_cache.c
CMN_CSRCS += arm64_doirq.c arm64_gicv3.c arm64_fatal.c
CMN_CSRCS += arm64_syscall.c arm64_cpu_psci.c
# Use common heap allocation for now (may need to be customized later)
CMN_CSRCS += arm64_allocateheap.c
ifeq ($(CONFIG_SMP),y)
CMN_CSRCS += arm64_cpuidlestack.c arm64_cpustart.c arm64_cpuindex.c
CMN_CSRCS += arm64_cpupause.c
endif
ifeq ($(CONFIG_BUILD_KERNEL),y)
CMN_CSRCS += arm64_task_start.c arm64_pthread_start.c arm64_signal_dispatch.c
endif
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CMN_CSRCS += arm64_addrenv.c arm64_addrenv_utils.c arm64_pgalloc.c
ifeq ($(CONFIG_ARCH_STACK_DYNAMIC),y)
CMN_CSRCS += arm64_addrenv_ustack.c
endif
ifeq ($(CONFIG_ARCH_KERNEL_STACK),y)
CMN_CSRCS += arm64_addrenv_kstack.c
endif
ifeq ($(CONFIG_MM_SHM),y)
CMN_CSRCS += arm64_addrenv_shm.c
endif
endif
ifeq ($(CONFIG_MM_PGALLOC),y)
CMN_CSRCS += arm64_physpgaddr.c
ifeq ($(CONFIG_ARCH_PGPOOL_MAPPING),y)
CMN_CSRCS += arm64_virtpgaddr.c
endif
endif
ifeq ($(CONFIG_ARCH_FPU),y)
CMN_CSRCS += arm64_fpu.c
CMN_ASRCS += arm64_fpu_func.S
endif
ifeq ($(CONFIG_STACK_COLORATION),y)
CMN_CSRCS += arm64_checkstack.c
endif

View file

@ -0,0 +1,102 @@
/****************************************************************************
* arch/arm64/src/common/addrenv.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ADDRENV_H
#define __ARCH_ARM64_SRC_COMMON_ADDRENV_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include "arm64_internal.h"
#ifdef CONFIG_ARCH_ADDRENV
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Aligned size of the kernel stack */
#ifdef CONFIG_ARCH_KERNEL_STACK
# define ARCH_KERNEL_STACKSIZE STACK_ALIGN_UP(CONFIG_ARCH_KERNEL_STACKSIZE)
#endif
/****************************************************************************
* Inline Functions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm_addrenv_create_region
*
* Description:
* Create one memory region.
*
* Returned Value:
* On success, the number of pages allocated is returned. Otherwise, a
* negated errno value is returned.
*
****************************************************************************/
int arm64_addrenv_create_region(uintptr_t **list, size_t listlen,
uintptr_t vaddr, size_t regionsize,
uint32_t mmuflags);
/****************************************************************************
* Name: arm_addrenv_destroy_region
*
* Description:
* Destroy one memory region.
*
****************************************************************************/
void arm64_addrenv_destroy_region(uintptr_t **list, size_t listlen,
uintptr_t vaddr, bool keep);
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_ARCH_ADDRENV */
#endif /* __ARCH_ARM64_SRC_COMMON_ADDRENV_H */

View file

@ -0,0 +1,163 @@
/****************************************************************************
* arch/arm64/src/common/arm64_allocateheap.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include <nuttx/userspace.h>
#include "arm64_internal.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
/* Terminology. In the flat build (CONFIG_BUILD_FLAT=y), there is only a
* single heap access with the standard allocations (malloc/free). This
* heap is referred to as the user heap. In the protected build
* (CONFIG_BUILD_PROTECTED=y) where an MPU is used to protect a region of
* otherwise flat memory, there will be two allocators: One that allocates
* protected (kernel) memory and one that allocates unprotected (user)
* memory. These are referred to as the kernel and user heaps,
* respectively.
*
* The ARMv8 has no MPU but does have an MMU. With this MMU, it can support
* the kernel build (CONFIG_BUILD_KERNEL=y). In this configuration, there
* is one kernel heap but multiple user heaps: One per task group. However,
* in this case, we need only be concerned about initializing the single
* kernel heap here.
*/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_allocate_heap/up_allocate_kheap
*
* Description:
* This function will be called to dynamically set aside the heap region.
*
* - For the normal "flat" build, this function returns the size of the
* single heap.
* - For the protected build (CONFIG_BUILD_PROTECTED=y) with both kernel-
* and user-space heaps (CONFIG_MM_KERNEL_HEAP=y), this function
* provides the size of the unprotected, user-space heap.
* - For the kernel build (CONFIG_BUILD_KERNEL=y), this function provides
* the size of the protected, kernel-space heap.
*
* If a protected kernel-space heap is provided, the kernel heap must be
* allocated by an analogous up_allocate_kheap(). A custom version of this
* file is needed if memory protection of the kernel heap is required.
*
* The following memory map is assumed for the flat build:
*
* .data region. Size determined at link time.
* .bss region Size determined at link time.
* IDLE thread stack. Size determined by CONFIG_IDLETHREAD_STACKSIZE.
* Heap. Extends to the end of SRAM.
*
* The following memory map is assumed for the kernel build:
*
* Kernel .data region. Size determined at link time.
* Kernel .bss region Size determined at link time.
* Kernel IDLE thread stack. Size determined by
* CONFIG_IDLETHREAD_STACKSIZE.
* Padding for alignment
* User .data region. Size determined at link time.
* User .bss region Size determined at link time.
* Kernel heap. Size determined by CONFIG_MM_KERNEL_HEAPSIZE.
* User heap. Extends to the end of SRAM.
*
****************************************************************************/
#ifdef CONFIG_BUILD_KERNEL
void up_allocate_kheap(void **heap_start, size_t *heap_size)
#else
void up_allocate_heap(void **heap_start, size_t *heap_size)
#endif
{
#if defined(CONFIG_BUILD_PROTECTED) && defined(CONFIG_MM_KERNEL_HEAP)
/* Get the unaligned size and position of the user-space heap.
* This heap begins after the user-space .bss section at an offset
* of CONFIG_MM_KERNEL_HEAPSIZE (subject to alignment).
*/
uintptr_t ubase = (uintptr_t)USERSPACE->us_bssend +
CONFIG_MM_KERNEL_HEAPSIZE;
size_t usize = CONFIG_RAM_END - ubase;
/* Return the user-space heap settings */
board_autoled_on(LED_HEAPALLOCATE);
*heap_start = (void *)ubase;
*heap_size = usize;
#else
/* Return the heap settings */
*heap_start = (void *)g_idle_topstack;
*heap_size = CONFIG_RAM_END - (size_t)g_idle_topstack;
sinfo("heap_start=0x%p, heap_size=0x%"PRIx64"\n", *heap_start, *heap_size);
#endif
}
/****************************************************************************
* Name: up_allocate_kheap
*
* Description:
* For the kernel build (CONFIG_BUILD_PROTECTED/KERNEL=y) with both kernel-
* and user-space heaps (CONFIG_MM_KERNEL_HEAP=y), this function allocates
* the kernel-space heap. A custom version of this function is needed if
* memory protection of the kernel heap is required.
*
****************************************************************************/
#if defined(CONFIG_BUILD_PROTECTED) && defined(CONFIG_MM_KERNEL_HEAP)
void up_allocate_kheap(void **heap_start, size_t *heap_size)
{
/* Get the unaligned size and position of the user-space heap.
* This heap begins after the user-space .bss section at an offset
* of CONFIG_MM_KERNEL_HEAPSIZE (subject to alignment).
*/
uintptr_t ubase = (uintptr_t)USERSPACE->us_bssend +
CONFIG_MM_KERNEL_HEAPSIZE;
/* Return the kernel heap settings (i.e., the part of the heap region
* that was not dedicated to the user heap).
*/
*heap_start = (void *)USERSPACE->us_bssend;
*heap_size = ubase - (uintptr_t)USERSPACE->us_bssend;
}
#endif

View file

@ -0,0 +1,548 @@
/****************************************************************************
* arch/arm64/src/common/arm64_arch.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/* Unsigned integer with bit position n set (signed in
* assembly language).
*/
#ifndef __ASSEMBLY__
#include <stdint.h>
#endif
#include "barriers.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define BIT(n) ((1UL) << (n))
#define BIT64(n) ((1ULL) << (n))
/* Bit mask with bits 0 through n-1 (inclusive) set,
* or 0 if n is 0.
*/
#define BIT_MASK(n) (BIT(n) - 1)
#define BIT64_MASK(n) (BIT64(n) - 1ULL)
#define DAIFSET_FIQ_BIT BIT(0)
#define DAIFSET_IRQ_BIT BIT(1)
#define DAIFSET_ABT_BIT BIT(2)
#define DAIFSET_DBG_BIT BIT(3)
#define DAIFCLR_FIQ_BIT BIT(0)
#define DAIFCLR_IRQ_BIT BIT(1)
#define DAIFCLR_ABT_BIT BIT(2)
#define DAIFCLR_DBG_BIT BIT(3)
#define DAIF_FIQ_BIT BIT(6)
#define DAIF_IRQ_BIT BIT(7)
#define DAIF_ABT_BIT BIT(8)
#define DAIF_DBG_BIT BIT(9)
#define DAIF_MASK (0xf << 6)
/* Arm® Architecture Registers Armv8, for Armv8-A architecture profile
* ( DDI 0595, ID121321 ), defined:
*
* SCTLR_EL1: System Control Register (EL1)
* SCTLR_EL2: System Control Register (EL2)
* SCTLR_EL3: System Control Register (EL3)
*
*/
#define SCTLR_EL3_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL2_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL1_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(20) | BIT(11))
#define SCTLR_M_BIT BIT(0)
#define SCTLR_A_BIT BIT(1)
#define SCTLR_C_BIT BIT(2)
#define SCTLR_SA_BIT BIT(3)
#define SCTLR_I_BIT BIT(12)
/* SPSR M[3:0] define
*
* Arm® Architecture Registers Armv8, for Armv8-A architecture profile
* ( DDI 0595, ID121321 ), defined:
* SPSR_EL1: Saved Program Status Register (EL1)
* SPSR_EL2: Saved Program Status Register (EL2)
* SPSR_EL3: Saved Program Status Register (EL3)
*
* reference to Programmers Guide for ARMv8-A
* (ARM DEN0024A, ID050815 ), 4.1.2 Stack pointer
*
* The T suffix, indicates use of the SP_EL0 stack pointer.
* The H suffix, indicates use of the SP_ELx stack pointer.
*
*/
#define SPSR_DAIF_SHIFT (6)
#define SPSR_DAIF_MASK (0xf << SPSR_DAIF_SHIFT)
#define SPSR_MODE_EL0T (0x0)
#define SPSR_MODE_EL1T (0x4)
#define SPSR_MODE_EL1H (0x5)
#define SPSR_MODE_EL2T (0x8)
#define SPSR_MODE_EL2H (0x9)
#define SPSR_MODE_MASK (0xf)
/* CurrentEL: Current Exception Level */
#define MODE_EL_SHIFT (0x2)
#define MODE_EL_MASK (0x3)
#define MODE_EL3 (0x3)
#define MODE_EL2 (0x2)
#define MODE_EL1 (0x1)
#define MODE_EL0 (0x0)
/* struct arm64_boot_params member offset for assembly code
* struct is defined at arm64_cpustart.c
*/
#define BOOT_PARAM_MPID 0
#define BOOT_PARAM_SP 8
#ifndef __ASSEMBLY__
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define STRINGIFY(x) #x
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* define MAX(a, b)/MIN(a, b)
* The larger/smaller value between a and b.
* Arguments are evaluated twice.
*/
#ifndef MAX
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef MIN
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
/* MPIDR_EL1, Multiprocessor Affinity Register */
#define MPIDR_AFFLVL_MASK (0xff)
#define MPIDR_AFF0_SHIFT (0)
#define MPIDR_AFF1_SHIFT (8)
#define MPIDR_AFF2_SHIFT (16)
#define MPIDR_AFF3_SHIFT (32)
#define MPIDR_AFFLVL(mpidr, aff_level) \
(((mpidr) >> MPIDR_AFF ## aff_level ## _SHIFT) & MPIDR_AFFLVL_MASK)
#define GET_MPIDR() read_sysreg(mpidr_el1)
#define MPIDR_TO_CORE(mpidr) MPIDR_AFFLVL((mpidr), 0)
#define IS_PRIMARY_CORE() (!MPIDR_TO_CORE(GET_MPIDR()))
/* System register interface to GICv3 */
#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
#define ICC_SGI1R S3_0_C12_C11_5
#define ICC_SRE_EL1 S3_0_C12_C12_5
#define ICC_SRE_EL2 S3_4_C12_C9_5
#define ICC_SRE_EL3 S3_6_C12_C12_5
#define ICC_CTLR_EL1 S3_0_C12_C12_4
#define ICC_CTLR_EL3 S3_6_C12_C12_4
#define ICC_PMR_EL1 S3_0_C4_C6_0
#define ICC_RPR_EL1 S3_0_C12_C11_3
#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
#define ICC_IAR0_EL1 S3_0_C12_C8_0
#define ICC_IAR1_EL1 S3_0_C12_C12_0
#define ICC_EOIR0_EL1 S3_0_C12_C8_1
#define ICC_EOIR1_EL1 S3_0_C12_C12_1
#define ICC_SGI0R_EL1 S3_0_C12_C11_7
/* register constants */
#define ICC_SRE_ELX_SRE_BIT BIT(0)
#define ICC_SRE_ELX_DFB_BIT BIT(1)
#define ICC_SRE_ELX_DIB_BIT BIT(2)
#define ICC_SRE_EL3_EN_BIT BIT(3)
/* ICC SGI macros */
#define SGIR_TGT_MASK (0xffff)
#define SGIR_AFF1_SHIFT (16)
#define SGIR_AFF2_SHIFT (32)
#define SGIR_AFF3_SHIFT (48)
#define SGIR_AFF_MASK (0xf)
#define SGIR_INTID_SHIFT (24)
#define SGIR_INTID_MASK (0xf)
#define SGIR_IRM_SHIFT (40)
#define SGIR_IRM_MASK (0x1)
#define SGIR_IRM_TO_AFF (0)
#define SGIR_IRM_TO_ALL (1)
#define GICV3_SGIR_VALUE(_aff3, _aff2, _aff1, _intid, _irm, _tgt) \
((((uint64_t)(_aff3) & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) | \
(((uint64_t)(_irm) & SGIR_IRM_MASK) << SGIR_IRM_SHIFT) | \
(((uint64_t)(_aff2) & SGIR_AFF_MASK) << SGIR_AFF2_SHIFT) | \
(((_intid) & SGIR_INTID_MASK) << SGIR_INTID_SHIFT) | \
(((_aff1) & SGIR_AFF_MASK) << SGIR_AFF1_SHIFT) | \
((_tgt) & SGIR_TGT_MASK))
/* CPTR_EL2, Architectural Feature Trap Register (EL2) */
#define CPTR_EZ_BIT BIT(8)
#define CPTR_TFP_BIT BIT(10)
#define CPTR_TTA_BIT BIT(20)
#define CPTR_TCPAC_BIT BIT(31)
#define CPTR_EL2_RES1 BIT(13) | BIT(12) | BIT(9) | (0xff)
/* CPACR_EL1, Architectural Feature Access Control Register */
#define CPACR_EL1_FPEN_NOTRAP (0x3 << 20)
/* SCR_EL3, Secure Configuration Register */
#define SCR_NS_BIT BIT(0)
#define SCR_IRQ_BIT BIT(1)
#define SCR_FIQ_BIT BIT(2)
#define SCR_EA_BIT BIT(3)
#define SCR_SMD_BIT BIT(7)
#define SCR_HCE_BIT BIT(8)
#define SCR_RW_BIT BIT(10)
#define SCR_ST_BIT BIT(11)
#define SCR_RES1 (BIT(4) | BIT(5))
/* HCR_EL2, Hypervisor Configuration Register */
#define HCR_FMO_BIT BIT(3)
#define HCR_IMO_BIT BIT(4)
#define HCR_AMO_BIT BIT(5)
#define HCR_RW_BIT BIT(31)
/* CNTHCTL_EL2 bits definitions */
#define CNTHCTL_EL2_EL1PCEN_EN BIT(1)
#define CNTHCTL_EL2_EL1PCTEN_EN BIT(0)
/* CNTV_CVAL, Counter-timer Virtual Timer CompareValue register
* CNTV_CTL, Counter-timer Virtual Timer Control register
*/
#define CNTV_CTL_ENABLE_BIT BIT(0)
#define CNTV_CTL_IMASK_BIT BIT(1)
/* Maximum numbers of translation tables
* This option specifies the maximum numbers of translation tables
* excluding the base translation table. Based on this, translation
* tables are allocated at compile time and used at runtime as needed.
* If the runtime need exceeds preallocated numbers of translation
* tables, it will result in assert. Number of translation tables
* required is decided based on how many discrete memory regions
* (both normal and device memory) are present on given platform and
* how much granularity is required while assigning attributes
* to these memory regions.
*/
#define CONFIG_MAX_XLAT_TABLES 7
/* Virtual address space size
* Allows choosing one of multiple possible virtual address
* space sizes. The level of translation table is determined by
* a combination of page size and virtual address space size.
*
* The choice could be: 32, 36, 42, 48
*/
#define CONFIG_ARM64_VA_BITS 36
/* Physical address space size
* Choose the maximum physical address range that the kernel will support.
*
* The choice could be: 32, 36, 42, 48
*/
#define CONFIG_ARM64_PA_BITS 36
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
/****************************************************************************
* Type Declarations
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
/****************************************************************************
* armv8 fpu registers and context
****************************************************************************/
struct fpu_reg
{
__int128 q[32];
uint32_t fpsr;
uint32_t fpcr;
uint64_t fpu_trap;
};
#endif
/****************************************************************************
* Registers and exception context
****************************************************************************/
struct regs_context
{
uint64_t regs[31]; /* x0~x30 */
uint64_t sp_elx;
uint64_t elr;
uint64_t spsr;
uint64_t sp_el0;
uint64_t exe_depth;
uint64_t tpidr_el0;
uint64_t tpidr_el1;
};
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name:
* getreg8/16/32/64
* putreg8/16/32/64
*
* Description:
* We need to use explicit assembler instruction there, because with
* classic "volatile pointer" approach compiler might generate
* instruction with immediate value like
*
* str w4, [x1], #4
*
* Such instructions produce invalid syndrome in HSR register,
* so hypervisor can't emulate MMIO when it traps memory access.
*
****************************************************************************/
static inline uint8_t getreg8(unsigned long addr)
{
uint8_t val;
__asm__ volatile ("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
ARM64_DMB();
return val;
}
static inline void putreg8(uint8_t data, unsigned long addr)
{
ARM64_DMB();
__asm__ volatile ("strb %w0, [%1]" : : "r" (data), "r" (addr));
}
static inline uint16_t getreg16(unsigned long addr)
{
uint16_t val;
__asm__ volatile ("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
ARM64_DMB();
return val;
}
static inline void putreg16(uint16_t data, unsigned long addr)
{
ARM64_DMB();
__asm__ volatile ("strh %w0, [%1]" : : "r" (data), "r" (addr));
}
static inline uint32_t getreg32(unsigned long addr)
{
uint32_t val;
__asm__ volatile ("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
ARM64_DMB();
return val;
}
static inline void putreg32(uint32_t data, unsigned long addr)
{
ARM64_DMB();
__asm__ volatile ("str %w0, [%1]" : : "r" (data), "r" (addr));
}
static inline uint64_t getreg64(unsigned long addr)
{
uint64_t val;
__asm__ volatile ("ldr %x0, [%1]" : "=r" (val) : "r" (addr));
ARM64_DMB();
return val;
}
static inline void putreg64(uint64_t data, unsigned long addr)
{
ARM64_DMB();
__asm__ volatile ("str %x0, [%1]" : : "r" (data), "r" (addr));
}
static inline void arch_nop(void)
{
__asm__ volatile ("nop");
}
/****************************************************************************
* Name:
* read_/write_/zero_ sysreg
*
* Description:
*
* ARMv8 Architecture Registers access method
* All the macros need a memory clobber
*
****************************************************************************/
#define read_sysreg(reg) \
({ \
uint64_t __val; \
__asm__ volatile ("mrs %0, " STRINGIFY(reg) \
: "=r" (__val) :: "memory"); \
__val; \
})
#define read_sysreg_dump(reg) \
({ \
uint64_t __val; \
__asm__ volatile ("mrs %0, " STRINGIFY(reg) \
: "=r" (__val) :: "memory"); \
sinfo("%s, regval=0x%llx\n", \
STRINGIFY(reg), __val); \
__val; \
})
#define write_sysreg(__val, reg) \
({ \
__asm__ volatile ("msr " STRINGIFY(reg) ", %0" \
: : "r" (__val) : "memory"); \
})
#define zero_sysreg(reg) \
({ \
__asm__ volatile ("msr " STRINGIFY(reg) ", xzr" \
::: "memory"); \
})
#define modreg8(v,m,a) putreg8((getreg8(a) & ~(m)) | ((v) & (m)), (a))
#define modreg16(v,m,a) putreg16((getreg16(a) & ~(m)) | ((v) & (m)), (a))
#define modreg32(v,m,a) putreg32((getreg32(a) & ~(m)) | ((v) & (m)), (a))
/****************************************************************************
* Name:
* arch_get_exception_depth
* arch_get_current_tcb
*
* Description:
* tpidrro_el0 is used to record exception depth, it's used for fpu trap
* happened at exception context (like IRQ).
* tpidr_el1 is used to record TCB at present, it's used for fpu and task
* switch propose
*
****************************************************************************/
static inline int arch_get_exception_depth(void)
{
return read_sysreg(tpidrro_el0);
}
static inline uint64_t arch_get_current_tcb(void)
{
return read_sysreg(tpidr_el1);
}
void arch_cpu_idle(void);
/****************************************************************************
* Name: arm64_cpu_disable
*
* Description:
* Called from CPU0 to make sure that all other CPUs are in the disabled
* state. This is a formality because the other CPUs are actually running
* then we have probably already crashed.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void arm64_cpu_disable(void);
#else
# define arm64_cpu_disable()
#endif
/****************************************************************************
* Name: arm64_cpu_enable
*
* Description:
* Called from CPU0 to enable all other CPUs. The enabled CPUs will start
* execution at __cpuN_start and, after very low-level CPU initialization
* has been performed, will branch to arm_cpu_boot()
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void arm64_cpu_enable(void);
#else
# define arm64_cpu_enable()
#endif
#endif /* __ASSEMBLY__ */
#endif /* ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H */

View file

@ -0,0 +1,233 @@
/****************************************************************************
* arch/arm64/src/common/arm64_arch_timer.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include <nuttx/spinlock.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_gic.h"
#include "arm64_arch_timer.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define MIN_DELAY (1000)
/****************************************************************************
* Private Data
****************************************************************************/
static uint64_t last_cycle;
static uint64_t cycle_per_tick;
static uint32_t arch_timer_rate;
/****************************************************************************
* Private Functions
****************************************************************************/
static inline void arm64_arch_timer_set_compare(uint64_t value)
{
write_sysreg(value, cntv_cval_el0);
}
static inline void arm64_arch_timer_enable(unsigned char enable)
{
uint64_t value;
value = read_sysreg(cntv_ctl_el0);
if (enable)
{
value |= CNTV_CTL_ENABLE_BIT;
}
else
{
value &= ~CNTV_CTL_ENABLE_BIT;
}
write_sysreg(value, cntv_ctl_el0);
}
static inline void arm64_arch_timer_set_irq_mask(bool mask)
{
uint64_t value;
value = read_sysreg(cntv_ctl_el0);
if (mask)
{
value |= CNTV_CTL_IMASK_BIT;
}
else
{
value &= ~CNTV_CTL_IMASK_BIT;
}
write_sysreg(value, cntv_ctl_el0);
}
static inline uint64_t arm64_arch_timer_count(void)
{
return read_sysreg(cntvct_el0);
}
static inline uint32_t arm64_arch_timer_get_cntfrq(void)
{
return read_sysreg(cntfrq_el0);
}
#ifdef CONFIG_SCHED_TICKLESS
static int arm64_arch_timer_compare_isr(int irq, void *regs, void *arg)
{
irqstate_t flags;
uint64_t curr_cycle;
uint32_t delta_ticks;
UNUSED(regs);
UNUSED(arg);
flags = spin_lock_irqsave(&g_arch_timer_lock);
curr_cycle = arm64_arch_timer_count();
delta_ticks = (uint32_t)((curr_cycle - last_cycle) / cycle_per_tick);
last_cycle += delta_ticks * cycle_per_tick;
arm_arch_timer_set_irq_mask(true);
spin_unlock_irqrestore(&g_arch_timer_lock, flags);
nxsched_process_timer();
return OK;
}
#else
static int arm64_arch_timer_compare_isr(int irq, void *regs, void *arg)
{
uint64_t curr_cycle;
uint32_t delta_ticks;
uint64_t next_cycle;
UNUSED(irq);
UNUSED(regs);
UNUSED(arg);
curr_cycle = arm64_arch_timer_count();
delta_ticks = (uint32_t)((curr_cycle - last_cycle) / cycle_per_tick);
last_cycle += delta_ticks * cycle_per_tick;
next_cycle = last_cycle + cycle_per_tick;
if ((uint64_t)(next_cycle - curr_cycle) < MIN_DELAY)
{
next_cycle += cycle_per_tick;
}
arm64_arch_timer_set_compare(next_cycle);
arm64_arch_timer_set_irq_mask(false);
nxsched_process_timer();
return OK;
}
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
#ifdef CONFIG_SMP
/* Notes:
*
* The origin design for ARMv8-A timer is assigned private timer to
* every PE(CPU core), the ARM_ARCH_TIMER_IRQ is a PPI so it's
* should be enable at every core.
*
* But for NuttX, it's design only for primary core to handle timer
* interrupt and call nxsched_process_timer at timer tick mode.
* So we need only enable timer for primary core
*
* IMX6 use GPT which is a SPI rather than generic timer to handle
* timer interrupt
*/
void arm64_smp_timer_init(void)
{
uint64_t curr_cycle;
/* set the initial status of timer0 of each secondary core */
curr_cycle = arm64_arch_timer_count();
arm64_arch_timer_set_compare(curr_cycle + cycle_per_tick);
arm64_arch_timer_enable(true);
up_enable_irq(ARM_ARCH_TIMER_IRQ);
arm64_arch_timer_set_irq_mask(false);
}
#endif
uint64_t arm64_counter_read(void)
{
return arm64_arch_timer_count();
}
/****************************************************************************
* Name: up_timer_initialize
*
* Description:
*
****************************************************************************/
void up_timer_initialize(void)
{
uint64_t curr_cycle;
arch_timer_rate = arm64_arch_timer_get_cntfrq();
cycle_per_tick = ((uint64_t)arch_timer_rate / (uint64_t)TICK_PER_SEC);
sinfo("%s: cp15 timer(s) running at %lu.%02luMHz, cycle %ld\n", __func__,
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100, cycle_per_tick);
irq_attach(ARM_ARCH_TIMER_IRQ, arm64_arch_timer_compare_isr, 0);
arm64_gic_irq_set_priority(ARM_ARCH_TIMER_IRQ, ARM_ARCH_TIMER_PRIO,
ARM_ARCH_TIMER_FLAGS);
curr_cycle = arm64_arch_timer_count();
arm64_arch_timer_set_compare(curr_cycle + cycle_per_tick);
arm64_arch_timer_enable(true);
up_enable_irq(ARM_ARCH_TIMER_IRQ);
arm64_arch_timer_set_irq_mask(false);
}

View file

@ -0,0 +1,58 @@
/****************************************************************************
* arch/arm64/src/common/arm64_arch_timer.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_ARCH_TIMER_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_ARCH_TIMER_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include "arm64_gic.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define CONFIG_ARM_TIMER_SECURE_IRQ (GIC_PPI_INT_BASE + 13)
#define CONFIG_ARM_TIMER_NON_SECURE_IRQ (GIC_PPI_INT_BASE + 14)
#define CONFIG_ARM_TIMER_VIRTUAL_IRQ (GIC_PPI_INT_BASE + 11)
#define CONFIG_ARM_TIMER_HYP_IRQ (GIC_PPI_INT_BASE + 10)
#define ARM_ARCH_TIMER_IRQ CONFIG_ARM_TIMER_VIRTUAL_IRQ
#define ARM_ARCH_TIMER_PRIO IRQ_DEFAULT_PRIORITY
#define ARM_ARCH_TIMER_FLAGS IRQ_TYPE_LEVEL
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
extern uint64_t arm64_counter_read(void);
extern void arm64_start_timer(void);
#ifdef CONFIG_SMP
void arm64_smp_timer_init(void);
#endif
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_ARCH_TIMER_H */

View file

@ -0,0 +1,583 @@
/****************************************************************************
* arch/arm64/src/common/arm64_assert.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/board.h>
#include <nuttx/syslog/syslog.h>
#include <nuttx/usb/usbdev_trace.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "chip.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* USB trace dumping */
#ifndef CONFIG_USBDEV_TRACE
# undef CONFIG_ARCH_USBDUMP
#endif
#ifndef CONFIG_BOARD_RESET_ON_ASSERT
# define CONFIG_BOARD_RESET_ON_ASSERT 0
#endif
/****************************************************************************
* Name: arm_registerdump
****************************************************************************/
static void arm64_registerdump(struct regs_context * regs)
{
_alert("stack = %p\n", regs);
_alert("x0: 0x%-16"PRIx64" x1: 0x%"PRIx64"\n",
regs->regs[REG_X0], regs->regs[REG_X1]);
_alert("x2: 0x%-16"PRIx64" x3: 0x%"PRIx64"\n",
regs->regs[REG_X2], regs->regs[REG_X3]);
_alert("x4: 0x%-16"PRIx64" x5: 0x%"PRIx64"\n",
regs->regs[REG_X4], regs->regs[REG_X5]);
_alert("x6: 0x%-16"PRIx64" x7: 0x%"PRIx64"\n",
regs->regs[REG_X6], regs->regs[REG_X7]);
_alert("x8: 0x%-16"PRIx64" x9: 0x%"PRIx64"\n",
regs->regs[REG_X8], regs->regs[REG_X9]);
_alert("x10: 0x%-16"PRIx64" x11: 0x%"PRIx64"\n",
regs->regs[REG_X10], regs->regs[REG_X11]);
_alert("x12: 0x%-16"PRIx64" x13: 0x%"PRIx64"\n",
regs->regs[REG_X12], regs->regs[REG_X13]);
_alert("x14: 0x%-16"PRIx64" x15: 0x%"PRIx64"\n",
regs->regs[REG_X14], regs->regs[REG_X15]);
_alert("x16: 0x%-16"PRIx64" x17: 0x%"PRIx64"\n",
regs->regs[REG_X16], regs->regs[REG_X17]);
_alert("x18: 0x%-16"PRIx64" x19: 0x%"PRIx64"\n",
regs->regs[REG_X18], regs->regs[REG_X19]);
_alert("x20: 0x%-16"PRIx64" x21: 0x%"PRIx64"\n",
regs->regs[REG_X20], regs->regs[REG_X21]);
_alert("x22: 0x%-16"PRIx64" x23: 0x%"PRIx64"\n",
regs->regs[REG_X22], regs->regs[REG_X23]);
_alert("x24: 0x%-16"PRIx64" x25: 0x%"PRIx64"\n",
regs->regs[REG_X24], regs->regs[REG_X25]);
_alert("x26: 0x%-16"PRIx64" x27: 0x%"PRIx64"\n",
regs->regs[REG_X26], regs->regs[REG_X27]);
_alert("x28: 0x%-16"PRIx64" x29: 0x%"PRIx64"\n",
regs->regs[REG_X28], regs->regs[REG_X29]);
_alert("x30: 0x%-16"PRIx64"\n", regs->regs[REG_X30]);
_alert("\n");
_alert("STATUS Registers:\n");
_alert("SPSR: 0x%-16"PRIx64"\n", regs->spsr);
_alert("ELR: 0x%-16"PRIx64"\n", regs->elr);
_alert("SP_EL0: 0x%-16"PRIx64"\n", regs->sp_el0);
_alert("SP_ELX: 0x%-16"PRIx64"\n", regs->sp_elx);
_alert("TPIDR_EL0: 0x%-16"PRIx64"\n", regs->tpidr_el0);
_alert("TPIDR_EL1: 0x%-16"PRIx64"\n", regs->tpidr_el1);
_alert("EXE_DEPTH: 0x%-16"PRIx64"\n", regs->exe_depth);
}
#ifdef CONFIG_ARCH_STACKDUMP
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: arm_stackdump
****************************************************************************/
static void arm64_stackdump(uint64_t sp, uint64_t stack_top)
{
uint64_t stack;
/* Flush any buffered SYSLOG data to avoid overwrite */
syslog_flush();
for (stack = sp & ~0x1f; stack < (stack_top & ~0x1f); stack += 64)
{
uint64_t *ptr = (uint64_t *)stack;
_alert("%08" PRIx64 ": %08" PRIx64 " %08" PRIx64 " %08" PRIx64
" %08" PRIx64 " %08" PRIx64 " %08" PRIx64 " %08" PRIx64
" %08" PRIx64 "\n",
stack, ptr[0], ptr[1], ptr[2], ptr[3],
ptr[4], ptr[5], ptr[6], ptr[7]);
}
}
/****************************************************************************
* Name: arm_dump_task
****************************************************************************/
static void arm64_dump_task(struct tcb_s *tcb, void *arg)
{
char args[64] = "";
#ifdef CONFIG_STACK_COLORATION
uint64_t stack_filled = 0;
uint64_t stack_used;
#endif
#ifdef CONFIG_SCHED_CPULOAD
struct cpuload_s cpuload;
uint64_t fracpart;
uint64_t intpart;
uint64_t tmp;
clock_cpuload(tcb->pid, &cpuload);
if (cpuload.total > 0)
{
tmp = (1000 * cpuload.active) / cpuload.total;
intpart = tmp / 10;
fracpart = tmp - 10 * intpart;
}
else
{
intpart = 0;
fracpart = 0;
}
#endif
#ifdef CONFIG_STACK_COLORATION
stack_used = up_check_tcbstack(tcb);
if (tcb->adj_stack_size > 0 && stack_used > 0)
{
/* Use fixed-point math with one decimal place */
stack_filled = 10 * 100 * stack_used / tcb->adj_stack_size;
}
#endif
#ifndef CONFIG_DISABLE_PTHREAD
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
{
struct pthread_tcb_s *ptcb = (struct pthread_tcb_s *)tcb;
snprintf(args, sizeof(args), "%p ", ptcb->arg);
}
else
#endif
{
char **argv = tcb->group->tg_info->argv + 1;
size_t npos = 0;
while (*argv != NULL && npos < sizeof(args))
{
npos += snprintf(args + npos, sizeof(args) - npos, " %s", *argv++);
}
}
/* Dump interesting properties of this task */
_alert(" %4d %4d"
#ifdef CONFIG_SMP
" %4d"
#endif
#ifdef CONFIG_STACK_COLORATION
" %7lu"
#endif
" %7lu"
#ifdef CONFIG_STACK_COLORATION
" %3" PRId64 ".%1" PRId64 "%%%c"
#endif
#ifdef CONFIG_SCHED_CPULOAD
" %3" PRId64 ".%01" PRId64 "%%"
#endif
#if CONFIG_TASK_NAME_SIZE > 0
" %s %s\n",
#else
" %s\n",
#endif
tcb->pid, tcb->sched_priority,
#ifdef CONFIG_SMP
tcb->cpu,
#endif
#ifdef CONFIG_STACK_COLORATION
(unsigned long)up_check_tcbstack(tcb),
#endif
(unsigned long)tcb->adj_stack_size
#ifdef CONFIG_STACK_COLORATION
, stack_filled / 10, stack_filled % 10,
(stack_filled >= 10 * 80 ? '!' : ' ')
#endif
#ifdef CONFIG_SCHED_CPULOAD
, intpart, fracpart
#endif
#if CONFIG_TASK_NAME_SIZE > 0
, tcb->name
#else
, "<noname>"
#endif
, args
);
}
/****************************************************************************
* Name: arm_dump_backtrace
****************************************************************************/
#ifdef CONFIG_SCHED_BACKTRACE
static void arm64_dump_backtrace(struct tcb_s *tcb, void *arg)
{
/* Show back trace */
sched_dumpstack(tcb->pid);
}
#endif
/****************************************************************************
* Name: arm_showtasks
****************************************************************************/
static void arm64_showtasks(void)
{
#if CONFIG_ARCH_INTERRUPTSTACK > 15
# ifdef CONFIG_STACK_COLORATION
uint64_t stack_used = up_check_intstack();
uint64_t stack_filled = 0;
if ((CONFIG_ARCH_INTERRUPTSTACK & ~15) > 0 && stack_used > 0)
{
/* Use fixed-point math with one decimal place */
stack_filled = 10 * 100 *
stack_used / (CONFIG_ARCH_INTERRUPTSTACK & ~15);
}
# endif
#endif
/* Dump interesting properties of each task in the crash environment */
_alert(" PID PRI"
#ifdef CONFIG_SMP
" CPU"
#endif
#ifdef CONFIG_STACK_COLORATION
" USED"
#endif
" STACK"
#ifdef CONFIG_STACK_COLORATION
" FILLED "
#endif
#ifdef CONFIG_SCHED_CPULOAD
" CPU"
#endif
" COMMAND\n");
#if CONFIG_ARCH_INTERRUPTSTACK > 7
_alert(" ---- ----"
# ifdef CONFIG_SMP
" ----"
# endif
# ifdef CONFIG_STACK_COLORATION
" %7lu"
# endif
" %7lu"
# ifdef CONFIG_STACK_COLORATION
" %3" PRId64 ".%1" PRId64 "%%%c"
# endif
# ifdef CONFIG_SCHED_CPULOAD
" ----"
# endif
# if CONFIG_TASK_NAME_SIZE > 0
" irq"
# endif
"\n"
# ifdef CONFIG_STACK_COLORATION
, (unsigned long)stack_used
# endif
, (unsigned long)(CONFIG_ARCH_INTERRUPTSTACK & ~15)
# ifdef CONFIG_STACK_COLORATION
, stack_filled / 10, stack_filled % 10,
(stack_filled >= 10 * 80 ? '!' : ' ')
# endif
);
#endif
nxsched_foreach(arm64_dump_task, NULL);
#ifdef CONFIG_SCHED_BACKTRACE
nxsched_foreach(arm64_dump_backtrace, NULL);
#endif
}
/****************************************************************************
* Name: assert_tracecallback
****************************************************************************/
#ifdef CONFIG_ARCH_USBDUMP
static int usbtrace_syslog(const char *fmt, ...)
{
va_list ap;
/* Let vsyslog do the real work */
va_start(ap, fmt);
vsyslog(LOG_EMERG, fmt, ap);
va_end(ap);
return OK;
}
static int assert_tracecallback(struct usbtrace_s *trace, void *arg)
{
usbtrace_trprintf(usbtrace_syslog, trace->event, trace->value);
return 0;
}
#endif
/****************************************************************************
* Name: arm_dump_stack
****************************************************************************/
static void arm64_dump_stack(const char *tag, uint64_t sp,
uint64_t base, uint64_t size, bool force)
{
uint64_t top = base + size;
_alert("%s Stack:\n", tag);
_alert("sp: %08" PRIx64 "\n", sp);
_alert(" base: %08" PRIx64 "\n", base);
_alert(" size: %08" PRIx64 "\n", size);
if (sp >= base && sp < top)
{
arm64_stackdump(sp, top);
}
else
{
_alert("ERROR: %s Stack pointer is not within the stack\n", tag);
if (force)
{
arm64_stackdump(base, top);
}
}
}
/****************************************************************************
* Name: arm64_dumpstate_assert (for assert)
****************************************************************************/
static void arm64_dumpstate_assert(void)
{
struct tcb_s *rtcb = (struct tcb_s *)arch_get_current_tcb();
uint64_t sp = up_getsp();
/* Show back trace */
#ifdef CONFIG_SCHED_BACKTRACE
sched_dumpstack(rtcb->pid);
#endif
/* Update the xcp context */
if (CURRENT_REGS)
{
/* in interrupt */
rtcb->xcp.regs = (uint64_t *)CURRENT_REGS;
}
else
{
up_saveusercontext(rtcb->xcp.regs);
}
/* Dump the registers */
arm64_registerdump((struct regs_context *)rtcb->xcp.regs);
/* Dump the irq stack */
#if CONFIG_ARCH_INTERRUPTSTACK > 7
arm64_dump_stack("IRQ", sp,
# ifdef CONFIG_SMP
(uint64_t)arm64_intstack_alloc(),
# else
(uint64_t)&g_interrupt_stack,
# endif
(CONFIG_ARCH_INTERRUPTSTACK & ~15),
!!CURRENT_REGS);
if (CURRENT_REGS)
{
sp = CURRENT_REGS[REG_X13];
}
#endif
/* Dump the user stack */
arm64_dump_stack("User", sp,
(uint64_t)rtcb->stack_base_ptr,
(uint64_t)rtcb->adj_stack_size,
#ifdef CONFIG_ARCH_KERNEL_STACK
false
#else
true
#endif
);
#ifdef CONFIG_ARCH_KERNEL_STACK
arm64_dump_stack("Kernel", sp,
(uint64_t)rtcb->xcp.kstack,
CONFIG_ARCH_KERNEL_STACKSIZE,
false);
#endif
/* Dump the state of all tasks (if available) */
arm64_showtasks();
#ifdef CONFIG_ARCH_USBDUMP
/* Dump USB trace data */
usbtrace_enumerate(assert_tracecallback, NULL);
#endif
}
#endif /* CONFIG_ARCH_STACKDUMP */
/****************************************************************************
* Name: arm64_dump_fatal
****************************************************************************/
void arm64_dump_fatal(struct regs_context * regs)
{
#ifdef CONFIG_SCHED_BACKTRACE
struct tcb_s *rtcb = (struct tcb_s *)regs->tpidr_el1;
/* Show back trace */
sched_dumpstack(rtcb->pid);
#endif
/* Dump the registers */
arm64_registerdump(regs);
}
void up_mdelay(unsigned int milliseconds)
{
volatile unsigned int i;
volatile unsigned int j;
for (i = 0; i < milliseconds; i++)
{
for (j = 0; j < CONFIG_BOARD_LOOPSPERMSEC; j++)
{
}
}
}
/****************************************************************************
* Name: arm64_assert
****************************************************************************/
static void arm64_assert(void)
{
/* Flush any buffered SYSLOG data */
syslog_flush();
/* Are we in an interrupt handler or the idle task? */
if (CURRENT_REGS || (running_task())->flink == NULL)
{
/* Disable interrupts on this CPU */
up_irq_save();
for (; ; )
{
#ifdef CONFIG_SMP
/* Try (again) to stop activity on other CPUs */
spin_trylock(&g_cpu_irqlock);
#endif
up_mdelay(250);
}
}
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_assert
****************************************************************************/
void up_assert(const char *filename, int lineno)
{
#ifdef CONFIG_ARCH_FPU
arm64_fpu_disable();
#endif
/* Flush any buffered SYSLOG data (prior to the assertion) */
syslog_flush();
_alert("Assertion failed "
#ifdef CONFIG_SMP
"CPU%d "
#endif
"at file:%s line: %d"
#if CONFIG_TASK_NAME_SIZE > 0
" task: %s"
#endif
"\n",
#ifdef CONFIG_SMP
up_cpu_index(),
#endif
filename, lineno
#if CONFIG_TASK_NAME_SIZE > 0
, running_task()->name
#endif
);
#ifdef CONFIG_ARCH_STACKDUMP
arm64_dumpstate_assert();
#endif
/* Flush any buffered SYSLOG data (from the above) */
syslog_flush();
#ifdef CONFIG_BOARD_CRASHDUMP
board_crashdump(up_getsp(), running_task(), filename, lineno);
#endif
arm64_assert();
}

View file

@ -0,0 +1,184 @@
/****************************************************************************
* arch/arm64/src/common/arm64_backtrace.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/arch.h>
#include "sched/sched.h"
#include "arm64_internal.h"
#include "arm64_arch.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#if defined(CONFIG_FRAME_POINTER)
/****************************************************************************
* Name: backtrace
*
* Description:
* backtrace() parsing the return address through frame pointer
*
****************************************************************************/
#ifdef CONFIG_MM_KASAN
__attribute__((no_sanitize_address))
#endif
static int backtrace(uintptr_t *base, uintptr_t *limit,
uintptr_t *fp, uintptr_t *pc,
void **buffer, int size, int *skip)
{
int i = 0;
if (pc)
{
i++;
if (*skip-- <= 0)
{
*buffer++ = pc;
}
}
for (; i < size; fp = (uintptr_t *)*(fp - 1), i++)
{
if (fp > limit || fp < base || *fp == 0)
{
break;
}
if (*skip-- <= 0)
{
*buffer++ = (void *)*fp;
}
}
return i;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_backtrace
*
* Description:
* up_backtrace() returns a backtrace for the TCB, in the array
* pointed to by buffer. A backtrace is the series of currently active
* function calls for the program. Each item in the array pointed to by
* buffer is of type void *, and is the return address from the
* corresponding stack frame. The size argument specifies the maximum
* number of addresses that can be stored in buffer. If the backtrace is
* larger than size, then the addresses corresponding to the size most
* recent function calls are returned; to obtain the complete backtrace,
* make sure that buffer and size are large enough.
*
* Input Parameters:
* tcb - Address of the task's TCB
* buffer - Return address from the corresponding stack frame
* size - Maximum number of addresses that can be stored in buffer
* skip - number of addresses to be skipped
*
* Returned Value:
* up_backtrace() returns the number of addresses returned in buffer
*
****************************************************************************/
#ifdef CONFIG_MM_KASAN
__attribute__((no_sanitize_address))
#endif
int up_backtrace(struct tcb_s *tcb,
void **buffer, int size, int skip)
{
struct tcb_s *rtcb = (struct tcb_s *)arch_get_current_tcb();
struct regs_context * p_regs;
#if CONFIG_ARCH_INTERRUPTSTACK > 7
void *istacklimit;
#endif
irqstate_t flags;
int ret;
if (size <= 0 || !buffer)
{
return 0;
}
if (tcb == NULL || tcb == rtcb)
{
if (up_interrupt_context())
{
#if CONFIG_ARCH_INTERRUPTSTACK > 7
# ifdef CONFIG_SMP
istacklimit = (void *)arm64_intstack_top();
# else
istacklimit = g_interrupt_stack + INTSTACK_SIZE;
# endif /* CONFIG_SMP */
ret = backtrace(istacklimit - (CONFIG_ARCH_INTERRUPTSTACK & ~15),
istacklimit,
(void *)__builtin_frame_address(0),
NULL, buffer, size, &skip);
#else
ret = backtrace(rtcb->stack_base_ptr,
rtcb->stack_base_ptr + rtcb->adj_stack_size,
(void *)__builtin_frame_address(0),
NULL, buffer, size, &skip);
#endif /* CONFIG_ARCH_INTERRUPTSTACK > 7 */
if (ret < size)
{
p_regs = (struct regs_context *)CURRENT_REGS;
ret += backtrace(rtcb->stack_base_ptr,
rtcb->stack_base_ptr + rtcb->adj_stack_size,
(void *)p_regs->regs[REG_X29],
(void *)p_regs->elr,
&buffer[ret], size - ret, &skip);
}
}
else
{
ret = backtrace(rtcb->stack_base_ptr,
rtcb->stack_base_ptr + rtcb->adj_stack_size,
(void *)__builtin_frame_address(0),
NULL, buffer, size, &skip);
}
}
else
{
flags = enter_critical_section();
p_regs = (struct regs_context *)CURRENT_REGS;
ret = backtrace(tcb->stack_base_ptr,
tcb->stack_base_ptr + tcb->adj_stack_size,
(void *)p_regs->regs[REG_X29],
(void *)p_regs->elr,
buffer, size, &skip);
leave_critical_section(flags);
}
return ret;
}
#endif /* CONFIG_FRAME_POINTER */

View file

@ -0,0 +1,162 @@
/****************************************************************************
* arch/arm64/src/common/arm64_blocktask.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdbool.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "group/group.h"
#include "arm64_internal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_block_task
*
* Description:
* The currently executing task at the head of the ready to run list must
* be stopped. Save its context and move it to the inactive list
* specified by task_state.
*
* Input Parameters:
* tcb: Refers to a task in the ready-to-run list (normally the task at
* the head of the list). It must be stopped, its context saved and
* moved into one of the waiting task lists. If it was the task at the
* head of the ready-to-run list, then a context switch to the new
* ready to run task must be performed.
* task_state: Specifies which waiting task list should hold the blocked
* task TCB.
*
****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
struct tcb_s *rtcb;
bool switch_needed;
rtcb = this_task();
/* Verify that the context switch can be performed */
DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
(tcb->task_state <= LAST_READY_TO_RUN_STATE));
/* Remove the tcb task from the ready-to-run list. If we are blocking the
* task at the head of the task list (the most likely case), then a
* context switch to the next ready-to-run task is needed. In this case,
* it should also be true that rtcb == tcb.
*/
switch_needed = nxsched_remove_readytorun(tcb);
/* Add the task to the specified blocked task list */
nxsched_add_blocked(tcb, (tstate_t)task_state);
/* If there are any pending tasks, then add them to the ready-to-run
* task list now
*/
if (g_pendingtasks.head)
{
switch_needed |= nxsched_merge_pending();
}
/* Now, perform the context switch if one is needed */
if (switch_needed)
{
/* Update scheduler parameters */
nxsched_suspend_scheduler(rtcb);
/* Are we in an interrupt handler? */
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
arm64_savestate(rtcb->xcp.regs);
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
/* Reset scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts. Any necessary address environment
* changes will be made when the interrupt returns.
*/
arm64_restorestate(rtcb->xcp.regs);
}
/* No, then we will need to perform the user context switch */
else
{
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(nexttcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm64_switchcontext(&rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View file

@ -0,0 +1,184 @@
/****************************************************************************
* arch/arm64/src/common/arm64_boot.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/arch.h>
#include <nuttx/init.h>
#include "arm64_internal.h"
#include "arm64_arch.h"
extern void *_vector_table[];
/****************************************************************************
* Public Functions
****************************************************************************/
void arm64_boot_el3_init(void)
{
uint64_t reg;
/* Setup vector table */
write_sysreg((uint64_t)_vector_table, vbar_el3);
ARM64_ISB();
reg = 0U; /* Mostly RES0 */
reg &= ~(CPTR_TTA_BIT | /* Do not trap sysreg accesses */
CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
CPTR_TCPAC_BIT); /* Do not trap CPTR_EL2 CPACR_EL1 accesses */
/* CPTR_EL3, Architectural Feature Trap Register (EL3) */
write_sysreg(reg, cptr_el3);
reg = 0U; /* Reset */
reg |= SCR_NS_BIT; /* EL2 / EL3 non-secure */
reg |= (SCR_RES1 | /* RES1 */
SCR_RW_BIT | /* EL2 execution state is AArch64 */
SCR_ST_BIT | /* Do not trap EL1 accesses to timer */
SCR_HCE_BIT | /* Do not trap HVC */
SCR_SMD_BIT); /* Do not trap SMC */
write_sysreg(reg, scr_el3);
reg = read_sysreg(ICC_SRE_EL3);
reg |= (ICC_SRE_ELX_DFB_BIT | /* Disable FIQ bypass */
ICC_SRE_ELX_DIB_BIT | /* Disable IRQ bypass */
ICC_SRE_ELX_SRE_BIT | /* System register interface is used */
ICC_SRE_EL3_EN_BIT); /* Enables lower Exception level access to
* ICC_SRE_EL1 */
write_sysreg(reg, ICC_SRE_EL3);
ARM64_ISB();
}
void arm64_boot_el3_get_next_el(uint64_t switch_addr)
{
uint64_t spsr;
write_sysreg(switch_addr, elr_el3);
/* Mask the DAIF */
spsr = SPSR_DAIF_MASK;
spsr |= SPSR_MODE_EL2T;
write_sysreg(spsr, spsr_el3);
}
void arm64_boot_el2_init(void)
{
uint64_t reg;
reg = read_sysreg(sctlr_el2);
reg |= (SCTLR_EL2_RES1 | /* RES1 */
SCTLR_I_BIT | /* Enable i-cache */
SCTLR_SA_BIT); /* Enable SP alignment check */
write_sysreg(reg, sctlr_el2);
reg = read_sysreg(hcr_el2);
reg |= HCR_RW_BIT; /* EL1 Execution state is AArch64 */
write_sysreg(reg, hcr_el2);
reg = 0U; /* RES0 */
reg |= CPTR_EL2_RES1; /* RES1 */
reg &= ~(CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
CPTR_TCPAC_BIT); /* Do not trap CPACR_EL1 accesses */
write_sysreg(reg, cptr_el2);
/* Enable EL1 access to timers */
reg = read_sysreg(cnthctl_el2);
reg |= (CNTHCTL_EL2_EL1PCEN_EN | CNTHCTL_EL2_EL1PCTEN_EN);
write_sysreg(reg, cnthctl_el2);
zero_sysreg(cntvoff_el2); /* Set 64-bit virtual timer offset to 0 */
#ifdef CONFIG_ARCH_ARMV8R
zero_sysreg(cnthps_ctl_el2);
#else
zero_sysreg(cnthp_ctl_el2);
#endif
/* Enable this if/when we use the hypervisor timer.
* write_cnthp_cval_el2(~(uint64_t)0);
*/
ARM64_ISB();
}
void arm64_boot_el1_init(void)
{
uint64_t reg;
/* Setup vector table */
write_sysreg((uint64_t)_vector_table, vbar_el1);
ARM64_ISB();
reg = 0U; /* RES0 */
reg |= CPACR_EL1_FPEN_NOTRAP; /* Do not trap NEON/SIMD/FP initially */
/* TODO: CONFIG_FLOAT_*_FORBIDDEN */
write_sysreg(reg, cpacr_el1);
reg = read_sysreg(sctlr_el1);
reg |= (SCTLR_EL1_RES1 | /* RES1 */
SCTLR_I_BIT | /* Enable i-cache */
SCTLR_SA_BIT); /* Enable SP alignment check */
write_sysreg(reg, sctlr_el1);
write_sysreg((~(uint64_t)0), cntv_cval_el0);
/* Enable these if/when we use the corresponding timers.
* write_cntp_cval_el0(~(uint64_t)0);
* write_cntps_cval_el1(~(uint64_t)0);
*/
ARM64_ISB();
}
/* These simple memset alternatives are necessary
* as the function at libc is depend on the MMU
* to be active.
*/
static void boot_early_memset(void *dst, int c, size_t n)
{
uint8_t *d = dst;
while (n--)
{
*d++ = c;
}
}
void arm64_boot_primary_c_routine(void)
{
boot_early_memset(_START_BSS, 0, _END_BSS - _START_BSS);
arm64_chip_boot();
nx_start();
}

View file

@ -0,0 +1,458 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cache.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/cache.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include <nuttx/spinlock.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_mmu.h"
/****************************************************************************
* Pre-processor Macros
****************************************************************************/
/* Common operations for the caches
*
* WB means write-back and intends to transfer dirty cache lines to memory in
* a copy-back cache policy. May be a no-op in write-back cache policy.
*
* INVD means invalidate and will mark cache lines as not valid. A future
* access to the associated address is guaranteed to generate a memory fetch.
*
* armv8 data cache instruction:
*
* DC CIVAC (WB+INVD):
* Data or unified Cache line Clean and Invalidate by VA to PoC
* Clean and Invalidate data cache by address to Point of Coherency.
*
* DC CVAC (WB):
* Data or unified Cache line Clean by VA to PoC
* Clean data cache by address to Point of Coherency.
*
* DC IVAC (INVD):
* Data or unified Cache line Invalidate by VA to PoC
* Invalidate data cache by address to Point of Coherency
*/
#define CACHE_OP_WB BIT(0)
#define CACHE_OP_INVD BIT(1)
#define CACHE_OP_WB_INVD (CACHE_OP_WB | CACHE_OP_INVD)
#define LINE_MASK(line) ((line) - 1)
#define LINE_ALIGN_DOWN(a, line) ((a) & ~LINE_MASK(line))
#define LINE_ALIGN_UP(a, line) \
(((a) + LINE_MASK(line)) & ~LINE_MASK(line))
#define dc_ops(op, val) \
({ \
__asm__ volatile ("dc " op ", %0" : : "r" (val) : "memory"); \
})
/* IC IALLUIS, Instruction Cache Invalidate All to PoU, Inner Shareable
* Purpose
* Invalidate all instruction caches in the Inner Shareable domain of
* the PE executing the instruction to the Point of Unification.
*/
static inline void __ic_iallu(void)
{
__asm__ volatile ("ic iallu" : : : "memory");
}
/* IC IALLU, Instruction Cache Invalidate All to PoU
* Purpose
* Invalidate all instruction caches of the PE executing
* the instruction to the Point of Unification.
*/
static inline void __ic_ialluis(void)
{
__asm__ volatile ("ic ialluis" : : : "memory");
}
size_t dcache_line_size;
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/* operation for data cache by virtual address to PoC */
static inline int arm64_dcache_range(uintptr_t start_addr,
uintptr_t end_addr, int op)
{
/* Align address to line size */
start_addr = LINE_ALIGN_DOWN(start_addr, dcache_line_size);
while (start_addr < end_addr)
{
switch (op)
{
case CACHE_OP_WB:
{
dc_ops("cvac", start_addr);
break;
}
case CACHE_OP_INVD:
{
dc_ops("ivac", start_addr);
break;
}
case CACHE_OP_WB_INVD:
{
dc_ops("civac", start_addr);
break;
}
default:
{
DEBUGASSERT(0);
}
}
start_addr += dcache_line_size;
}
ARM64_DSB();
return 0;
}
/* operation for all data cache */
static inline int arm64_dcache_all(int op)
{
uint32_t clidr_el1;
uint32_t csselr_el1;
uint32_t ccsidr_el1;
uint8_t loc;
uint8_t ctype;
uint8_t cache_level;
uint8_t line_size;
uint8_t way_pos;
uint32_t max_ways;
uint32_t max_sets;
uint32_t dc_val;
uint32_t set;
uint32_t way;
/* Data barrier before start */
ARM64_DSB();
clidr_el1 = read_sysreg(clidr_el1);
loc = (clidr_el1 >> CLIDR_EL1_LOC_SHIFT) & CLIDR_EL1_LOC_MASK;
if (!loc)
{
return 0;
}
for (cache_level = 0; cache_level < loc; cache_level++)
{
ctype =
(clidr_el1 >>
CLIDR_EL1_CTYPE_SHIFT(cache_level)) & CLIDR_EL1_CTYPE_MASK;
/* No data cache, continue */
if (ctype < 2)
{
continue;
}
/* select cache level */
csselr_el1 = cache_level << 1;
write_sysreg(csselr_el1, csselr_el1);
ARM64_ISB();
ccsidr_el1 = read_sysreg(ccsidr_el1);
line_size =
(ccsidr_el1 >> CCSIDR_EL1_LN_SZ_SHIFT & CCSIDR_EL1_LN_SZ_MASK) + 4;
max_ways =
(ccsidr_el1 >> CCSIDR_EL1_WAYS_SHIFT) & CCSIDR_EL1_WAYS_MASK;
max_sets =
(ccsidr_el1 >> CCSIDR_EL1_SETS_SHIFT) & CCSIDR_EL1_SETS_MASK;
/* 32-log2(ways), bit position of way in DC operand */
way_pos = __builtin_clz(max_ways);
for (set = 0; set <= max_sets; set++)
{
for (way = 0; way <= max_ways; way++)
{
/* way number, aligned to pos in DC operand */
dc_val = way << way_pos;
/* cache level, aligned to pos in DC operand */
dc_val |= csselr_el1;
/* set number, aligned to pos in DC operand */
dc_val |= set << line_size;
switch (op)
{
case CACHE_OP_WB:
{
dc_ops("csw", dc_val);
break;
}
case CACHE_OP_INVD:
{
dc_ops("isw", dc_val);
break;
}
case CACHE_OP_WB_INVD:
{
dc_ops("cisw", dc_val);
break;
}
default:
{
DEBUGASSERT(0);
}
}
}
}
}
/* Restore csselr_el1 to level 0 */
write_sysreg(0, csselr_el1);
__ic_iallu();
ARM64_DSB();
ARM64_ISB();
return 0;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_invalidate_dcache
*
* Description:
* Invalidate the data cache within the specified region; we will be
* performing a DMA operation in this region and we want to purge old data
* in the cache.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region + 1
*
* Returned Value:
* None
*
* Assumptions:
* This operation is not atomic. This function assumes that the caller
* has exclusive access to the address range so that no harm is done if
* the operation is pre-empted.
*
****************************************************************************/
void up_invalidate_dcache(uintptr_t start, uintptr_t end)
{
arm64_dcache_range(start, end, CACHE_OP_INVD);
}
/****************************************************************************
* Name: up_invalidate_dcache_all
*
* Description:
* Invalidate the entire contents of D cache.
*
* NOTE: This function forces L1 and L2 cache operations to be atomic
* by disabling interrupts.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void up_invalidate_dcache_all(void)
{
arm64_dcache_all(CACHE_OP_INVD);
}
/****************************************************************************
* Name: up_invalidate_icache_all
*
* Description:
* Invalidate all instruction caches to PoU, also flushes branch target
* cache
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void up_invalidate_icache_all(void)
{
__ic_ialluis();
}
/****************************************************************************
* Name: up_clean_dcache
*
* Description:
* Clean the data cache within the specified region by flushing the
* contents of the data cache to memory.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region + 1
*
* Returned Value:
* None
*
* Assumptions:
* This operation is not atomic. This function assumes that the caller
* has exclusive access to the address range so that no harm is done if
* the operation is pre-empted.
*
****************************************************************************/
void up_clean_dcache(uintptr_t start, uintptr_t end)
{
if (dcache_line_size < (end - start))
{
arm64_dcache_range(start, end, CACHE_OP_WB);
}
else
{
arm64_dcache_all(CACHE_OP_WB);
}
}
/****************************************************************************
* Name: up_clean_dcache_all
*
* Description:
* Clean the entire data cache within the specified region by flushing the
* contents of the data cache to memory.
*
* NOTE: This operation is un-necessary if the DCACHE is configured in
* write-through mode.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* This operation is not atomic. This function assumes that the caller
* has exclusive access to the address range so that no harm is done if
* the operation is pre-empted.
*
****************************************************************************/
void up_clean_dcache_all(void)
{
arm64_dcache_all(CACHE_OP_WB);
}
/****************************************************************************
* Name: up_flush_dcache
*
* Description:
* Flush the data cache within the specified region by cleaning and
* invalidating the D cache.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region + 1
*
* Returned Value:
* None
*
* Assumptions:
* This operation is not atomic. This function assumes that the caller
* has exclusive access to the address range so that no harm is done if
* the operation is pre-empted.
*
****************************************************************************/
void up_flush_dcache(uintptr_t start, uintptr_t end)
{
if (dcache_line_size < (end - start))
{
arm64_dcache_range(start, end, CACHE_OP_WB_INVD);
}
else
{
arm64_dcache_all(CACHE_OP_WB_INVD);
}
}
/****************************************************************************
* Name: up_flush_dcache_all
*
* Description:
* Flush the entire data cache by cleaning and invalidating the D cache.
*
* NOTE: If DCACHE write-through is configured, then this operation is the
* same as up_invalidate_cache_all().
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* This operation is not atomic. This function assumes that the caller
* has exclusive access to the address range so that no harm is done if
* the operation is pre-empted.
*
****************************************************************************/
void up_flush_dcache_all(void)
{
arm64_dcache_all(CACHE_OP_WB_INVD);
}

View file

@ -0,0 +1,239 @@
/****************************************************************************
* arch/arm64/src/common/arm64_checkstack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include "sched/sched.h"
#include "arm64_internal.h"
#ifdef CONFIG_STACK_COLORATION
/****************************************************************************
* Pre-processor Macros
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
static size_t do_stackcheck(void *stackbase, size_t nbytes);
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: do_stackcheck
*
* Description:
* Determine (approximately) how much stack has been used by searching the
* stack memory for a high water mark. That is, the deepest level of the
* stack that clobbered some recognizable marker in the stack memory.
*
* Input Parameters:
* alloc - Allocation base address of the stack
* size - The size of the stack in bytes
*
* Returned Value:
* The estimated amount of stack space used.
*
****************************************************************************/
static size_t do_stackcheck(void *stackbase, size_t nbytes)
{
uintptr_t start;
uintptr_t end;
uint64_t *ptr;
size_t mark;
if (nbytes == 0)
{
return 0;
}
/* Take extra care that we do not check outside the stack boundaries */
start = STACK_ALIGN_UP((uintptr_t)stackbase);
end = STACK_ALIGN_DOWN((uintptr_t)stackbase + nbytes);
/* Get the adjusted size based on the top and bottom of the stack */
nbytes = end - start;
/* The ARM uses a push-down stack: the stack grows toward lower addresses
* in memory. We need to start at the lowest address in the stack memory
* allocation and search to higher addresses. The first word we encounter
* that does not have the magic value is the high water mark.
*/
for (ptr = (uint64_t *)start, mark = (nbytes >> 2);
*ptr == STACK_COLOR && mark > 0;
ptr++, mark--);
/* If the stack is completely used, then this might mean that the stack
* overflowed from above (meaning that the stack is too small), or may
* have been overwritten from below meaning that some other stack or data
* structure overflowed.
*
* If you see returned values saying that the entire stack is being used
* then enable the following logic to see it there are unused areas in the
* middle of the stack.
*/
#if 0
if (mark + 16 > nwords)
{
int i;
int j;
ptr = (uint32_t *)start;
for (i = 0; i < nbytes; i += 4 * 64)
{
for (j = 0; j < 64; j++)
{
int ch;
if (*ptr++ == STACK_COLOR)
{
ch = '.';
}
else
{
ch = 'X';
}
up_putc(ch);
}
up_putc('\n');
}
}
#endif
/* Return our guess about how much stack space was used */
return mark << 2;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_stack_color
*
* Description:
* Write a well know value into the stack
*
****************************************************************************/
void arm64_stack_color(void *stackbase, size_t nbytes)
{
uintptr_t start;
uintptr_t end;
size_t nwords;
uint32_t *ptr;
/* Take extra care that we do not write outside the stack boundaries */
start = STACK_ALIGN_UP((uintptr_t)stackbase);
end = nbytes ? STACK_ALIGN_DOWN((uintptr_t)stackbase + nbytes) :
up_getsp(); /* 0: colorize the running stack */
/* Get the adjusted size based on the top and bottom of the stack */
nwords = (end - start) >> 2;
ptr = (uint32_t *)start;
/* Set the entire stack to the coloration value */
while (nwords-- > 0)
{
*ptr++ = STACK_COLOR;
}
}
/****************************************************************************
* Name: up_check_stack and friends
*
* Description:
* Determine (approximately) how much stack has been used by searching the
* stack memory for a high water mark. That is, the deepest level of the
* stack that clobbered some recognizable marker in the stack memory.
*
* Input Parameters:
* None
*
* Returned Value:
* The estimated amount of stack space used.
*
****************************************************************************/
size_t up_check_tcbstack(struct tcb_s *tcb)
{
return do_stackcheck(tcb->stack_base_ptr, tcb->adj_stack_size);
}
ssize_t up_check_tcbstack_remain(struct tcb_s *tcb)
{
return tcb->adj_stack_size - up_check_tcbstack(tcb);
}
size_t up_check_stack(void)
{
return up_check_tcbstack(this_task());
}
ssize_t up_check_stack_remain(void)
{
return up_check_tcbstack_remain(this_task());
}
#if CONFIG_ARCH_INTERRUPTSTACK > 7
size_t up_check_intstack(void)
{
#ifdef CONFIG_SMP
return do_stackcheck((void *)arm64_intstack_alloc(),
STACK_ALIGN_DOWN(CONFIG_ARCH_INTERRUPTSTACK));
#else
return do_stackcheck((void *)&g_interrupt_stack,
STACK_ALIGN_DOWN(CONFIG_ARCH_INTERRUPTSTACK));
#endif
}
size_t up_check_intstack_remain(void)
{
return STACK_ALIGN_DOWN(CONFIG_ARCH_INTERRUPTSTACK) - up_check_intstack();
}
#endif
#endif /* CONFIG_STACK_COLORATION */

View file

@ -0,0 +1,121 @@
/****************************************************************************
* arch/arm64/src/common/arm64_copystate.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdbool.h>
#include <sched.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <arch/syscall.h>
#include <arch/irq.h>
#include "arm64_internal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
int arch_save_fpucontext(void *saveregs)
{
irqstate_t flags;
uint64_t *p_save;
/* Take a snapshot of the thread context right now */
flags = enter_critical_section();
p_save = saveregs + XCPTCONTEXT_GP_SIZE;
arm64_fpu_save((struct fpu_reg *)p_save);
ARM64_DSB();
leave_critical_section(flags);
return 0;
}
#endif
int arm64_syscall_save_context(uint64_t * regs)
{
struct regs_context *f_regs;
uint64_t *p_save;
int i;
#ifdef CONFIG_ARCH_FPU
uint64_t *p_fpu;
struct tcb_s *rtcb;
struct tcb_s *rtcb_cur =
(struct tcb_s *)arch_get_current_tcb();
#endif
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
DEBUGASSERT(f_regs->regs[REG_X1] != 0 && f_regs->regs[REG_X2] != 0);
p_save = (uint64_t *)f_regs->regs[REG_X2];
for (i = 0; i < XCPTCONTEXT_GP_REGS; i++)
{
p_save[i] = regs[i];
}
#ifdef CONFIG_ARCH_FPU
rtcb = (struct tcb_s *)f_regs->regs[REG_X1];
p_save += XCPTCONTEXT_GP_SIZE;
if (rtcb_cur == rtcb)
{
arch_save_fpucontext(p_save);
}
else
{
p_fpu = (uint64_t *)rtcb->xcp.fpu_regs;
for (i = 0; i < XCPTCONTEXT_FPU_REGS; i++)
{
p_save[i] = p_fpu[i];
}
}
#endif
return OK;
}

View file

@ -0,0 +1,70 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpu_idle.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_arch.h"
#include "arm64_macro.inc"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_cpu_idle.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
GTEXT(arch_cpu_idle)
SECTION_FUNC(text, arch_cpu_idle)
#ifdef CONFIG_TRACING_IDLE
stp xzr, x30, [sp, #-16]!
bl sys_trace_idle
ldp xzr, x30, [sp], #16
#endif
dsb sy
wfi
msr daifclr, #(DAIFCLR_IRQ_BIT)
ret
GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(text, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING_IDLE
stp xzr, x30, [sp, #-16]!
bl sys_trace_idle
ldp xzr, x30, [sp], #16
#endif
msr daifset, #(DAIFSET_IRQ_BIT)
isb
wfe
tst x0, #(DAIF_IRQ_BIT)
beq _irq_disabled
msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled:
ret

View file

@ -0,0 +1,188 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpu_psci.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include <nuttx/spinlock.h>
#include "arch/syscall.h"
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_cpu_psci.h"
static struct psci_interface psci_data;
/****************************************************************************
* Private Functions
****************************************************************************/
static int psci_to_dev_err(int ret)
{
switch (ret)
{
case PSCI_RET_SUCCESS:
{
return 0;
}
case PSCI_RET_NOT_SUPPORTED:
{
return -ENOTSUP;
}
case PSCI_RET_INVALID_PARAMS:
case PSCI_RET_INVALID_ADDRESS:
{
return -EINVAL;
}
case PSCI_RET_DENIED:
{
return -EPERM;
}
}
return -EINVAL;
}
static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2)
{
struct arm64_smccc_res res;
arm64_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2)
{
struct arm64_smccc_res res;
arm64_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static uint32_t psci_get_version(void)
{
return psci_data.invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
static int set_conduit_method(const char *method)
{
if (!strcmp("hvc", method))
{
psci_data.conduit = SMCCC_CONDUIT_HVC;
psci_data.invoke_psci_fn = __invoke_psci_fn_hvc;
}
else if (!strcmp("smc", method))
{
psci_data.conduit = SMCCC_CONDUIT_SMC;
psci_data.invoke_psci_fn = __invoke_psci_fn_smc;
}
else
{
serr("Invalid conduit method");
return -EINVAL;
}
return 0;
}
static int psci_detect(void)
{
uint32_t ver = psci_get_version();
sinfo("Detected PSCI v%d.%d\n",
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2)
{
serr("PSCI unsupported version");
return -ENOTSUP;
}
psci_data.version = ver;
return 0;
}
/****************************************************************************
* Public Functions
****************************************************************************/
uint32_t psci_version(void)
{
return psci_data.version;
}
int pcsi_cpu_off(void)
{
int ret;
if (psci_data.conduit == SMCCC_CONDUIT_NONE)
{
return -EINVAL;
}
ret = psci_data.invoke_psci_fn(PSCI_0_2_FN_CPU_OFF, 0, 0, 0);
return psci_to_dev_err(ret);
}
int pcsi_cpu_on(unsigned long cpuid, uintptr_t entry_point)
{
int ret;
if (psci_data.conduit == SMCCC_CONDUIT_NONE)
{
return -EINVAL;
}
ret = psci_data.invoke_psci_fn(PSCI_FN_NATIVE(0_2, CPU_ON),
cpuid, (unsigned long)entry_point, 0);
return psci_to_dev_err(ret);
}
int arm64_psci_init(const char * method)
{
psci_data.conduit = SMCCC_CONDUIT_NONE;
if (set_conduit_method(method))
{
return -ENOTSUP;
}
return psci_detect();
}

View file

@ -0,0 +1,104 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpu_psci.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_CPU_PSCI_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_CPU_PSCI_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include <arch/syscall.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
/* PSCI v0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_64BIT 0x40000000
#define PSCI_0_2_FN64_BASE \
(PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
#define PSCI_RET_INVALID_PARAMS -2
#define PSCI_RET_DENIED -3
#define PSCI_RET_ALREADY_ON -4
#define PSCI_RET_ON_PENDING -5
#define PSCI_RET_INTERNAL_FAILURE -6
#define PSCI_RET_NOT_PRESENT -7
#define PSCI_RET_DISABLED -8
#define PSCI_RET_INVALID_ADDRESS -9
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
#define PSCI_VERSION_MAJOR(ver) \
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
typedef unsigned long (*psci_fn)(unsigned long, unsigned long, unsigned long,
unsigned long);
struct psci_interface
{
enum arm64_smccc_conduit conduit;
psci_fn invoke_psci_fn;
uint32_t version;
};
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
uint32_t psci_version(void);
int pcsi_cpu_off(void);
int pcsi_cpu_on(unsigned long cpuid, uintptr_t entry_point);
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_CPU_PSCI_H */

View file

@ -0,0 +1,112 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpuidlestack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "arm64_smp.h"
#include "arm64_internal.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Stack alignment macros */
#define STACK_ISALIGNED(a) ((uintptr_t)(a) & ~STACK_ALIGN_MASK)
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_idlestack
*
* Description:
* Allocate a stack for the CPU[n] IDLE task (n > 0) if appropriate and
* setup up stack-related information in the IDLE task's TCB. This
* function is always called before up_cpu_start(). This function is
* only called for the CPU's initial IDLE task; up_create_task is used for
* all normal tasks, pthreads, and kernel threads for all CPUs.
*
* The initial IDLE task is a special case because the CPUs can be started
* in different ways in different environments:
*
* 1. The CPU may already have been started and waiting in a low power
* state for up_cpu_start(). In this case, the IDLE thread's stack
* has already been allocated and is already in use. Here
* up_cpu_idlestack() only has to provide information about the
* already allocated stack.
*
* 2. The CPU may be disabled but started when up_cpu_start() is called.
* In this case, a new stack will need to be created for the IDLE
* thread and this function is then equivalent to:
*
* return up_create_stack(tcb, stack_size, TCB_FLAG_TTYPE_KERNEL);
*
* The following TCB fields must be initialized by this function:
*
* - adj_stack_size: Stack size after adjustment for hardware, processor,
* etc. This value is retained only for debug purposes.
* - stack_alloc_ptr: Pointer to allocated stack
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
* Arguments has been removed from the stack allocation.
*
* Input Parameters:
* - cpu: CPU index that indicates which CPU the IDLE task is
* being created for.
* - tcb: The TCB of new CPU IDLE task
* - stack_size: The requested stack size for the IDLE task. At least
* this much must be allocated. This should be
* CONFIG_SMP_STACK_SIZE.
*
****************************************************************************/
int up_cpu_idlestack(int cpu, struct tcb_s *tcb, size_t stack_size)
{
#if CONFIG_SMP_NCPUS > 1
uintptr_t stack_alloc;
DEBUGASSERT(cpu > 0 && cpu < CONFIG_SMP_NCPUS && tcb != NULL &&
stack_size <= SMP_STACK_SIZE);
/* Get the top of the stack */
stack_alloc = (uintptr_t)g_cpu_idlestackalloc[cpu];
DEBUGASSERT(stack_alloc != 0 && STACK_ISALIGNED(stack_alloc));
tcb->adj_stack_size = SMP_STACK_SIZE;
tcb->stack_alloc_ptr = (void *)stack_alloc;
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
#endif
return OK;
}

View file

@ -0,0 +1,68 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpuindex.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <nuttx/arch.h>
#include "arm64_arch.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_index
*
* Description:
* Return an index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
* If TLS is enabled, then the RTOS can get this information from the TLS
* info structure. Otherwise, the MCU-specific logic must provide some
* mechanism to provide the CPU index.
*
* Input Parameters:
* None
*
* Returned Value:
* An integer index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
****************************************************************************/
int up_cpu_index(void)
{
/* Read the Multiprocessor Affinity Register (MPIDR)
* And return the CPU ID field
*/
return MPIDR_TO_CORE(GET_MPIDR());
}
#endif /* CONFIG_SMP */

View file

@ -0,0 +1,335 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpupause.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/spinlock.h>
#include <nuttx/sched_note.h>
#include "arm64_internal.h"
#include "arm64_gic.h"
#include "sched/sched.h"
/****************************************************************************
* Private Data
****************************************************************************/
/* These spinlocks are used in the SMP configuration in order to implement
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.
*
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
* so that it will be ready for the next pause operation.
*/
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_pausereq
*
* Description:
* Return true if a pause request is pending for this CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be queried
*
* Returned Value:
* true = a pause request is pending.
* false = no pasue request is pending.
*
****************************************************************************/
bool up_cpu_pausereq(int cpu)
{
return spin_islocked(&g_cpu_paused[cpu]);
}
/****************************************************************************
* Name: up_cpu_paused
*
* Description:
* Handle a pause request from another CPU. Normally, this logic is
* executed from interrupt handling logic within the architecture-specific
* However, it is sometimes necessary necessary to perform the pending
* pause operation in other contexts where the interrupt cannot be taken
* in order to avoid deadlocks.
*
* This function performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* cpu - The index of the CPU to be paused
*
* Returned Value:
* On success, OK is returned. Otherwise, a negated errno value indicating
* the nature of the failure is returned.
*
****************************************************************************/
int up_cpu_paused(int cpu)
{
struct tcb_s *tcb = this_task();
/* Update scheduler parameters */
nxsched_suspend_scheduler(tcb);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we are paused */
sched_note_cpu_paused(tcb);
#endif
/* Save the current context at CURRENT_REGS into the TCB at the head
* of the assigned task list for this CPU.
*/
arm64_savestate(tcb->xcp.regs);
/* Release the g_cpu_paused spinlock to synchronize with the
* requesting CPU.
*/
spin_unlock(&g_cpu_paused[cpu]);
/* Wait for the spinlock to be released. The requesting CPU will release
* the spinlock when the CPU is resumed.
*/
spin_lock(&g_cpu_wait[cpu]);
/* This CPU has been resumed. Restore the exception context of the TCB at
* the (new) head of the assigned task list.
*/
tcb = this_task();
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we have resumed */
sched_note_cpu_resumed(tcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(tcb);
/* Then switch contexts. Any necessary address environment changes
* will be made when the interrupt returns.
*/
arm64_restorestate(tcb->xcp.regs);
spin_unlock(&g_cpu_wait[cpu]);
return OK;
}
/****************************************************************************
* Name: arm64_pause_handler
*
* Description:
* This is the handler for SGI2. It performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* Standard interrupt handling
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int arm64_pause_handler(int irq, void *context, void *arg)
{
int cpu = this_cpu();
/* Check for false alarms. Such false could occur as a consequence of
* some deadlock breaking logic that might have already serviced the SG2
* interrupt by calling up_cpu_paused(). If the pause event has already
* been processed then g_cpu_paused[cpu] will not be locked.
*/
if (up_cpu_pausereq(cpu))
{
/* NOTE: The following enter_critical_section() will call
* up_cpu_paused() to process a pause request to break a deadlock
* because the caller held a critical section. Once up_cpu_paused()
* finished, the caller will proceed and release the g_cpu_irqlock.
* Then this CPU will acquire g_cpu_irqlock in the function.
*/
irqstate_t flags = enter_critical_section();
/* NOTE: the pause request should not exist here */
DEBUGVERIFY(!up_cpu_pausereq(cpu));
leave_critical_section(flags);
}
return OK;
}
/****************************************************************************
* Name: up_cpu_pause
*
* Description:
* Save the state of the current task at the head of the
* g_assignedtasks[cpu] task list and then pause task execution on the
* CPU.
*
* This function is called by the OS when the logic executing on one CPU
* needs to modify the state of the g_assignedtasks[cpu] list for another
* CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be stopped
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_pause(int cpu)
{
int ret;
uint64_t mpidr = GET_MPIDR();
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the pause event */
sched_note_cpu_pause(this_task(), cpu);
#endif
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
* handler from returning until up_cpu_resume() is called; g_cpu_paused
* is a handshake that will prefent this function from returning until
* the CPU is actually paused.
* Note that we might spin before getting g_cpu_wait, this just means that
* the other CPU still hasn't finished responding to the previous resume
* request.
*/
DEBUGASSERT(!spin_islocked(&g_cpu_paused[cpu]));
spin_lock(&g_cpu_wait[cpu]);
spin_lock(&g_cpu_paused[cpu]);
/* Execute SGI2 */
ret = arm64_gic_raise_sgi(SGI_CPU_PAUSE, mpidr, (1 << cpu));
if (ret < 0)
{
/* What happened? Unlock the g_cpu_wait spinlock */
spin_unlock(&g_cpu_wait[cpu]);
}
else
{
/* Wait for the other CPU to unlock g_cpu_paused meaning that
* it is fully paused and ready for up_cpu_resume();
*/
spin_lock(&g_cpu_paused[cpu]);
}
spin_unlock(&g_cpu_paused[cpu]);
/* On successful return g_cpu_wait will be locked, the other CPU will be
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
* called. g_cpu_paused will be unlocked in any case.
*/
return ret;
}
/****************************************************************************
* Name: up_cpu_resume
*
* Description:
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
* state of the task at the head of the g_assignedtasks[cpu] list, and
* resume normal tasking.
*
* This function is called after up_cpu_pause in order resume operation of
* the CPU after modifying its g_assignedtasks[cpu] list.
*
* Input Parameters:
* cpu - The index of the CPU being re-started.
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_resume(int cpu)
{
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the resume event */
sched_note_cpu_resume(this_task(), cpu);
#endif
/* Release the spinlock. Releasing the spinlock will cause the SGI2
* handler on 'cpu' to continue and return from interrupt to the newly
* established thread.
*/
DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
!spin_islocked(&g_cpu_paused[cpu]));
spin_unlock(&g_cpu_wait[cpu]);
return OK;
}

View file

@ -0,0 +1,266 @@
/****************************************************************************
* arch/arm64/src/common/arm64_cpustart.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/sched_note.h>
#include <sched/sched.h>
#include <nuttx/cache.h>
#include <arch/spinlock.h>
#include <nuttx/init.h>
#include "init/init.h"
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_gic.h"
#include "arm64_arch_timer.h"
#include "arm64_smp.h"
#include "arm64_cpu_psci.h"
#include "arm64_mmu.h"
/****************************************************************************
* Public data
****************************************************************************/
typedef void (*arm64_cpustart_t)(void *data);
struct arm64_boot_params
{
uint64_t mpid;
char *boot_sp;
arm64_cpustart_t func;
void *arg;
int cpu_num;
};
volatile struct arm64_boot_params aligned_data(L1_CACHE_BYTES)
cpu_boot_params =
{
.mpid = -1,
.boot_sp = (char *)g_cpu_idlestackalloc[0],
};
volatile uint64_t *g_cpu_int_stacktop[CONFIG_SMP_NCPUS] =
{
(uint64_t *)(g_interrupt_stacks[0] + INTSTACK_SIZE),
};
/****************************************************************************
* Private data
****************************************************************************/
static volatile long cpu_ready_flag;
/****************************************************************************
* Private Functions
****************************************************************************/
static inline void local_delay(void)
{
for (volatile int i = 0; i < 1000; i++)
{
}
}
static void arm64_smp_init_top(void *arg)
{
struct tcb_s *tcb = this_task();
cpu_ready_flag = 1;
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* And finally, enable interrupts */
up_irq_enable();
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that this CPU has started */
sched_note_cpu_started(tcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(tcb);
/* core n, idle n */
write_sysreg(0, tpidrro_el0);
write_sysreg(tcb, tpidr_el1);
write_sysreg(tcb, tpidr_el0);
nx_idle_trampoline();
}
static void arm64_start_cpu(int cpu_num, char *stack, int stack_sz,
arm64_cpustart_t fn)
{
uint64_t cpu_mpid = cpu_num;
uintptr_t flush_start;
uintptr_t flush_end;
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the start event */
sched_note_cpu_start(this_task(), cpu);
#endif
cpu_boot_params.boot_sp = stack;
cpu_boot_params.func = fn;
cpu_boot_params.arg = 0;
cpu_boot_params.cpu_num = cpu_num;
g_cpu_int_stacktop[cpu_num] =
(uint64_t *)(g_interrupt_stacks[cpu_num] + INTSTACK_SIZE);
ARM64_DSB();
/* store mpid last as this is our synchronization point */
cpu_boot_params.mpid = cpu_num;
flush_start = (uintptr_t)&cpu_boot_params;
flush_end = flush_start + sizeof(cpu_boot_params);
up_flush_dcache(flush_start, flush_end);
if (pcsi_cpu_on(cpu_mpid, (uint64_t)&__start))
{
sinfo("Failed to boot secondary CPU core %d (MPID:%#llx)\n", cpu_num,
cpu_mpid);
return;
}
/* Wait secondary cores up, see z_arm64_secondary_start */
while (cpu_boot_params.func)
{
SP_WFE();
}
sinfo("Secondary CPU core %d (MPID:%#llx) is up\n", cpu_num, cpu_mpid);
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_start
*
* Description:
* In an SMP configuration, only one CPU is initially active (CPU 0).
* System initialization occurs on that single thread. At the completion of
* the initialization of the OS, just before beginning normal multitasking,
* the additional CPUs would be started by calling this function.
*
* Each CPU is provided the entry point to its IDLE task when started. A
* TCB for each CPU's IDLE task has been initialized and placed in the
* CPU's g_assignedtasks[cpu] list. No stack has been allocated or
* initialized.
*
* The OS initialization logic calls this function repeatedly until each
* CPU has been started, 1 through (CONFIG_SMP_NCPUS-1).
*
* Input Parameters:
* cpu - The index of the CPU being started. This will be a numeric
* value in the range of one to (CONFIG_SMP_NCPUS-1).
* (CPU 0 is already active)
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_start(int cpu)
{
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the start event */
sched_note_cpu_start(this_task(), cpu);
#endif
cpu_ready_flag = 0;
arm64_start_cpu(cpu, (char *)g_cpu_idlestackalloc[cpu], SMP_STACK_SIZE,
arm64_smp_init_top);
/* Waiting for this CPU to be boot complete */
while (!cpu_ready_flag)
{
local_delay();
}
return 0;
}
/* the C entry of secondary cores */
void arm64_boot_secondary_c_routine(void)
{
arm64_cpustart_t func;
void *arg;
arm64_mmu_init(false);
arm64_gic_secondary_init();
up_enable_irq(SGI_CPU_PAUSE);
func = cpu_boot_params.func;
arg = cpu_boot_params.arg;
ARM64_DSB();
/* Secondary core clears .func to announce its presence.
* Primary core is polling for this. We no longer own
* arm64_cpu_boot_params afterwards.
*/
cpu_boot_params.func = NULL;
ARM64_DSB();
SP_SEV();
func(arg);
}
int arm64_smp_sgi_init(void)
{
irq_attach(SGI_CPU_PAUSE, arm64_pause_handler, 0);
arm64_gic_irq_set_priority(SGI_CPU_PAUSE, IRQ_DEFAULT_PRIORITY, 0);
up_enable_irq(SGI_CPU_PAUSE);
return 0;
}

View file

@ -0,0 +1,205 @@
/****************************************************************************
* arch/arm64/src/common/arm64_createstack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/tls.h>
#include <nuttx/board.h>
#include <arch/irq.h>
#include "arm64_internal.h"
#include "arm64_fatal.h"
/****************************************************************************
* Pre-processor Macros
****************************************************************************/
/****************************************************************************
* Private Types
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_create_stack
*
* Description:
* Allocate a stack for a new thread and setup up stack-related information
* in the TCB.
*
* The following TCB fields must be initialized by this function:
*
* - adj_stack_size: Stack size after adjustment for hardware, processor,
* etc. This value is retained only for debug purposes.
* - stack_alloc_ptr: Pointer to allocated stack
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
* Arguments has been removed from the stack allocation.
*
* Input Parameters:
* - tcb: The TCB of new task
* - stack_size: The requested stack size. At least this much
* must be allocated.
* - ttype: The thread type. This may be one of following (defined in
* include/nuttx/sched.h):
*
* TCB_FLAG_TTYPE_TASK Normal user task
* TCB_FLAG_TTYPE_PTHREAD User pthread
* TCB_FLAG_TTYPE_KERNEL Kernel thread
*
* This thread type is normally available in the flags field of the TCB,
* however, there are certain contexts where the TCB may not be fully
* initialized when up_create_stack is called.
*
* If either CONFIG_BUILD_PROTECTED or CONFIG_BUILD_KERNEL are defined,
* then this thread type may affect how the stack is allocated. For
* example, kernel thread stacks should be allocated from protected
* kernel memory. Stacks for user tasks and threads must come from
* memory that is accessible to user code.
*
****************************************************************************/
int up_create_stack(struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
{
stack_size = STACK_ALIGN_UP(stack_size);
#ifdef CONFIG_TLS_ALIGNED
/* The allocated stack size must not exceed the maximum possible for the
* TLS feature.
*/
DEBUGASSERT(stack_size <= TLS_MAXSTACK);
if (stack_size > TLS_MAXSTACK)
{
stack_size = TLS_MAXSTACK;
}
#endif
/* Is there already a stack allocated of a different size? */
if (tcb->stack_alloc_ptr && tcb->adj_stack_size != stack_size)
{
/* Yes.. Release the old stack */
up_release_stack(tcb, ttype);
}
/* Do we need to allocate a new stack? */
if (!tcb->stack_alloc_ptr)
{
/* Allocate the stack. If DEBUG is enabled (but not stack debug),
* then create a zeroed stack to make stack dumps easier to trace.
* If TLS is enabled, then we must allocate aligned stacks.
*/
#ifdef CONFIG_TLS_ALIGNED
#ifdef CONFIG_MM_KERNEL_HEAP
/* Use the kernel allocator if this is a kernel thread */
if (ttype == TCB_FLAG_TTYPE_KERNEL)
{
tcb->stack_alloc_ptr = kmm_memalign(TLS_STACK_ALIGN, stack_size);
}
else
#endif
{
/* Use the user-space allocator if this is a task or pthread */
tcb->stack_alloc_ptr = kumm_memalign(TLS_STACK_ALIGN, stack_size);
}
#else /* CONFIG_TLS_ALIGNED */
#ifdef CONFIG_MM_KERNEL_HEAP
/* Use the kernel allocator if this is a kernel thread */
if (ttype == TCB_FLAG_TTYPE_KERNEL)
{
tcb->stack_alloc_ptr =
kmm_memalign(STACK_ALIGNMENT, stack_size);
}
else
#endif
{
/* Use the user-space allocator if this is a task or pthread */
tcb->stack_alloc_ptr =
kumm_memalign(STACK_ALIGNMENT, stack_size);
}
#endif /* CONFIG_TLS_ALIGNED */
#ifdef CONFIG_DEBUG_FEATURES
/* Was the allocation successful? */
if (!tcb->stack_alloc_ptr)
{
serr("ERROR: Failed to allocate stack, size %ld\n", stack_size);
}
#endif
}
/* Did we successfully allocate a stack? */
if (tcb->stack_alloc_ptr)
{
/* The ARM uses a "full descending" stack:
* the stack grows toward lower addresses in memory.
* The stack pointer register points to the last pushed item in
* the stack.
* Items on the stack are referenced as positive word offsets from sp.
*/
/* Since both stack_alloc_ptr and stack_size are in
* CONFIG_STACK_ALIGNMENT, and the stack ptr is decremented before
* the first write, we can directly save our variables to struct
* tcb_s.
*/
tcb->adj_stack_size = stack_size;
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
#ifdef CONFIG_STACK_COLORATION
/* If stack debug is enabled, then fill the stack with a
* recognizable value that we can use later to test for high
* water marks.
*/
arm64_stack_color(tcb->stack_base_ptr, tcb->adj_stack_size);
#endif /* CONFIG_STACK_COLORATION */
tcb->flags |= TCB_FLAG_FREE_STACK;
return OK;
}
return ERROR;
}

View file

@ -0,0 +1,156 @@
/****************************************************************************
* arch/arm64/src/common/arm64_doirq.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include "task/task.h"
#include "sched/sched.h"
#include "group/group.h"
#include "irq/irq.h"
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_gic.h"
#include "arm64_fatal.h"
/****************************************************************************
* Public data
****************************************************************************/
/* g_current_regs[] holds a references to the current interrupt level
* register storage structure. If is non-NULL only during interrupt
* processing. Access to g_current_regs[] must be through the macro
* CURRENT_REGS for portability.
*/
/* For the case of configurations with multiple CPUs, then there must be one
* such value for each processor that can receive an interrupt.
*/
volatile uint64_t *g_current_regs[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_doirq
*
* Description:
* Receives the decoded GIC interrupt information and dispatches control
* to the attached interrupt handler.
*
****************************************************************************/
uint64_t *arm64_doirq(int irq, uint64_t * regs)
{
/* Nested interrupts are not supported */
DEBUGASSERT(CURRENT_REGS == NULL);
/* Current regs non-zero indicates that we are processing an interrupt;
* CURRENT_REGS is also used to manage interrupt level context switches.
*/
CURRENT_REGS = regs;
/* Deliver the IRQ */
irq_dispatch(irq, regs);
/* Check for a context switch. If a context switch occurred, then
* CURRENT_REGS will have a different value than it did on entry. If an
* interrupt level context switch has occurred, then restore the floating
* point state and the establish the correct address environment before
* returning from the interrupt.
*/
if (regs != CURRENT_REGS)
{
/* need to do a context switch */
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(NULL);
#endif
}
/* Set CURRENT_REGS to NULL to indicate that we are no longer in an
* interrupt handler.
*/
regs = (uint64_t *)CURRENT_REGS;
CURRENT_REGS = NULL;
return regs;
}
/****************************************************************************
* Name: up_irqinitialize
*
* Description:
* This function is called by up_initialize() during the bring-up of the
* system. It is the responsibility of this function to but the interrupt
* subsystem into the working and ready state.
*
****************************************************************************/
void up_irqinitialize(void)
{
/* The following operations need to be atomic, but since this function is
* called early in the initialization sequence, we expect to have exclusive
* access to the GIC.
*/
/* Initialize the Generic Interrupt Controller (GIC) for CPU0 */
arm64_gic_initialize(); /* Initialization common to all CPUs */
/* currents_regs is non-NULL only while processing an interrupt */
CURRENT_REGS = NULL;
#ifdef CONFIG_SMP
arm64_smp_sgi_init();
#endif
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* And finally, enable interrupts */
up_irq_enable();
#endif
}

View file

@ -0,0 +1,158 @@
/****************************************************************************
* arch/arm64/src/common/arm64_exit.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/irq.h>
#ifdef CONFIG_DUMP_ON_EXIT
# include <nuttx/fs/fs.h>
#endif
#include "task/task.h"
#include "sched/sched.h"
#include "group/group.h"
#include "irq/irq.h"
#include "arm64_internal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#ifndef CONFIG_DEBUG_SCHED_INFO
# undef CONFIG_DUMP_ON_EXIT
#endif
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: _up_dumponexit
*
* Description:
* Dump the state of all tasks whenever on task exits. This is debug
* instrumentation that was added to check file-related reference counting
* but could be useful again sometime in the future.
*
****************************************************************************/
#ifdef CONFIG_DUMP_ON_EXIT
static void _up_dumponexit(struct tcb_s *tcb, void *arg)
{
struct filelist *filelist;
int i;
int j;
sinfo(" TCB=%p name=%s pid=%d\n", tcb, tcb->name, tcb->pid);
sinfo(" priority=%d state=%d\n", tcb->sched_priority, tcb->task_state);
filelist = tcb->group->tg_filelist;
for (i = 0; i < filelist->fl_rows; i++)
{
for (j = 0; j < CONFIG_NFILE_DESCRIPTORS_PER_BLOCK; j++)
{
struct inode *inode = filelist->fl_files[i][j].f_inode;
if (inode)
{
sinfo(" fd=%d refcount=%d\n",
i * CONFIG_NFILE_DESCRIPTORS_PER_BLOCK + j,
inode->i_crefs);
}
}
}
}
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_exit
*
* Description:
* This function causes the currently executing task to cease
* to exist. This is a special case of task_delete() where the task to
* be deleted is the currently executing task. It is more complex because
* a context switch must be perform to the next ready to run task.
*
****************************************************************************/
void up_exit(int status)
{
struct tcb_s *tcb = this_task();
UNUSED(status);
/* Make sure that we are in a critical section with local interrupts.
* The IRQ state will be restored when the next task is started.
*/
enter_critical_section();
#ifdef CONFIG_DUMP_ON_EXIT
sinfo("Other tasks:\n");
nxsched_foreach(_up_dumponexit, NULL);
#endif
/* Destroy the task at the head of the ready to run list. */
#ifdef CONFIG_ARCH_FPU
arm64_destory_fpu(tcb);
#endif
nxtask_exit();
/* Now, perform the context switch to the new ready-to-run task at the
* head of the list.
*/
tcb = this_task();
/* Adjusts time slice for SCHED_RR & SCHED_SPORADIC cases
* NOTE: the API also adjusts the global IRQ control for SMP
*/
nxsched_resume_scheduler(tcb);
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously running
* task is closed down gracefully (data caches dump, MMU flushed) and
* set up the address environment for the new thread at the head of
* the ready-to-run list.
*/
group_addrenv(tcb);
#endif
/* Then switch contexts */
arm64_fullcontextrestore(tcb->xcp.regs);
}

View file

@ -0,0 +1,365 @@
/****************************************************************************
* arch/arm64/src/common/arm64_fatal.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <arch/irq.h>
#include <debug.h>
#include <assert.h>
#include <sched.h>
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/tls.h>
#include <nuttx/board.h>
#include <arch/chip/chip.h>
#include <nuttx/syslog/syslog.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
#include "arm64_mmu.h"
#include "arm64_fatal.h"
#include "arm64_arch_timer.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: print_ec_cause
****************************************************************************/
static void print_ec_cause(uint64_t esr)
{
uint32_t ec = (uint32_t)esr >> 26;
switch (ec)
{
case 0b000000:
{
sinfo("Unknown reason\n");
break;
}
case 0b000001:
{
sinfo("Trapped WFI or WFE instruction execution\n");
break;
}
case 0b000011:
{
sinfo(
"Trapped MCR or MRC access with (coproc==0b1111) that "
"is not reported using EC 0b000000\n");
break;
}
case 0b000100:
{
sinfo(
"Trapped MCRR or MRRC access with (coproc==0b1111) "
"that is not reported using EC 0b000000\n");
break;
}
case 0b000101:
{
sinfo("Trapped MCR or MRC access with (coproc==0b1110)\n");
break;
}
case 0b000110:
{
sinfo("Trapped LDC or STC access\n");
break;
}
case 0b000111:
{
sinfo(
"Trapped access to SVE, Advanced SIMD, or "
"floating-point functionality\n");
break;
}
case 0b001100:
{
sinfo("Trapped MRRC access with (coproc==0b1110)\n");
break;
}
case 0b001101:
{
sinfo("Branch Target Exception\n");
break;
}
case 0b001110:
{
sinfo("Illegal Execution state\n");
break;
}
case 0b010001:
{
sinfo("SVC instruction execution in AArch32 state\n");
break;
}
case 0b011000:
{
sinfo(
"Trapped MSR, MRS or System instruction execution in "
"AArch64 state, that is not reported using EC "
"0b000000, 0b000001 or 0b000111\n");
break;
}
case 0b011001:
{
sinfo("Trapped access to SVE functionality\n");
break;
}
case 0b100000:
{
sinfo(
"Instruction Abort from a lower Exception level, that "
"might be using AArch32 or AArch64\n");
break;
}
case 0b100001:
{
sinfo(
"Instruction Abort taken without a change "
"in Exception level.\n");
break;
}
case 0b100010:
{
sinfo("PC alignment fault exception.\n");
break;
}
case 0b100100:
{
sinfo(
"Data Abort from a lower Exception level, that might "
"be using AArch32 or AArch64\n");
break;
}
case 0b100101:
{
sinfo("Data Abort taken without a change in Exception level\n");
break;
}
case 0b100110:
{
sinfo("SP alignment fault exception\n");
break;
}
case 0b101000:
{
sinfo(
"Trapped floating-point exception "
"taken from AArch32 state\n");
break;
}
case 0b101100:
{
sinfo(
"Trapped floating-point exception "
"taken from AArch64 state.\n");
break;
}
case 0b101111:
{
sinfo("SError interrupt\n");
break;
}
case 0b110000:
{
sinfo(
"Breakpoint exception from a lower Exception level, "
"that might be using AArch32 or AArch64\n");
break;
}
case 0b110001:
{
sinfo(
"Breakpoint exception taken without a change in "
"Exception level\n");
break;
}
case 0b110010:
{
sinfo(
"Software Step exception from a lower Exception level, "
"that might be using AArch32 or AArch64\n");
break;
}
case 0b110011:
{
sinfo(
"Software Step exception taken without a change in "
"Exception level\n");
break;
}
case 0b110100:
{
sinfo(
"Watchpoint exception from a lower Exception level, "
"that might be using AArch32 or AArch64\n");
break;
}
case 0b110101:
{
sinfo(
"Watchpoint exception taken without a change in "
"Exception level.\n");
break;
}
case 0b111000:
{
sinfo("BKPT instruction execution in AArch32 state\n");
break;
}
case 0b111100:
{
sinfo("BRK instruction execution in AArch64 state.\n");
break;
}
default:
break;
}
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_fatal_error
*
* Description:
*
****************************************************************************/
void arm64_fatal_error(unsigned int reason, struct regs_context * reg)
{
uint64_t el, esr, elr, far;
int cpu = up_cpu_index();
sinfo("reason = %d\n", reason);
sinfo("arm64_fatal_error: CPU%d task: %s\n", cpu, running_task()->name);
if (reason != K_ERR_SPURIOUS_IRQ)
{
__asm__ volatile ("mrs %0, CurrentEL" : "=r" (el));
switch (GET_EL(el))
{
case MODE_EL1:
{
sinfo("CurrentEL: MODE_EL1\n");
__asm__ volatile ("mrs %0, esr_el1" : "=r" (esr));
__asm__ volatile ("mrs %0, far_el1" : "=r" (far));
__asm__ volatile ("mrs %0, elr_el1" : "=r" (elr));
break;
}
case MODE_EL2:
{
sinfo("CurrentEL: MODE_EL2\n");
__asm__ volatile ("mrs %0, esr_el2" : "=r" (esr));
__asm__ volatile ("mrs %0, far_el2" : "=r" (far));
__asm__ volatile ("mrs %0, elr_el2" : "=r" (elr));
break;
}
case MODE_EL3:
{
sinfo("CurrentEL: MODE_EL3\n");
__asm__ volatile ("mrs %0, esr_el3" : "=r" (esr));
__asm__ volatile ("mrs %0, far_el3" : "=r" (far));
__asm__ volatile ("mrs %0, elr_el3" : "=r" (elr));
break;
}
default:
{
sinfo("CurrentEL: unknown\n");
/* Just to keep the compiler happy */
esr = elr = far = 0;
break;
}
}
if (GET_EL(el) != MODE_EL0)
{
sinfo("ESR_ELn: 0x%"PRIx64"\n", esr);
sinfo("FAR_ELn: 0x%"PRIx64"\n", far);
sinfo("ELR_ELn: 0x%"PRIx64"\n", elr);
print_ec_cause(esr);
}
}
if (reg != NULL)
{
arm64_dump_fatal(reg);
}
for (; ; )
{
up_mdelay(1000);
}
}

View file

@ -0,0 +1,83 @@
/****************************************************************************
* arch/arm64/src/common/arm64_fatal.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H
/**
* @defgroup fatal_apis Fatal error APIs
* @ingroup kernel_apis
* @{
*/
#define K_ERR_CPU_EXCEPTION (0)
#define K_ERR_CPU_MODE32 (1)
#define K_ERR_SPURIOUS_IRQ (2)
#ifndef __ASSEMBLY__
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdbool.h>
#include <debug.h>
#include <assert.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define __builtin_unreachable() \
do \
{ \
sinfo("Unreachable code\n"); \
PANIC(); \
} while (true)
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm64_fatal_error
*
* Description:
* fatal error handle for arm64
* Input Parameters:
* reason: error reason
* reg: exception stack reg context
*
* Returned Value:
*
****************************************************************************/
void arm64_fatal_error(unsigned int reason, struct regs_context * reg);
void arm64_dump_fatal(struct regs_context * reg);
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H */

View file

@ -0,0 +1,242 @@
/***************************************************************************
* arch/arm64/src/common/arm64_fpu.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
***************************************************************************/
/***************************************************************************
* Included Files
***************************************************************************/
#include <nuttx/config.h>
#include <inttypes.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include "sched/sched.h"
#include "arm64_arch.h"
#include "arm64_vfork.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
#include "arm64_fpu.h"
/***************************************************************************
* Private Data
***************************************************************************/
static struct fpu_reg g_idle_thread_fpu[CONFIG_SMP_NCPUS];
static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS];
/***************************************************************************
* Private Functions
***************************************************************************/
/* enable FPU access trap */
static void arm64_fpu_access_trap_enable(void)
{
uint64_t cpacr;
cpacr = read_sysreg(cpacr_el1);
cpacr &= ~CPACR_EL1_FPEN_NOTRAP;
write_sysreg(cpacr, cpacr_el1);
ARM64_ISB();
}
/* disable FPU access trap */
static void arm64_fpu_access_trap_disable(void)
{
uint64_t cpacr;
cpacr = read_sysreg(cpacr_el1);
cpacr |= CPACR_EL1_FPEN_NOTRAP;
write_sysreg(cpacr, cpacr_el1);
ARM64_ISB();
}
/***************************************************************************
* Public Functions
***************************************************************************/
void arm64_init_fpu(struct tcb_s *tcb)
{
struct fpu_reg *fpu_reg;
if (tcb->pid < CONFIG_SMP_NCPUS)
{
memset(&g_cpu_fpu_ctx[this_cpu()], 0,
sizeof(struct arm64_cpu_fpu_context));
g_cpu_fpu_ctx[this_cpu()].idle_thread = tcb;
tcb->xcp.fpu_regs = (uint64_t *)&g_idle_thread_fpu[this_cpu()];
}
memset(tcb->xcp.fpu_regs, 0, sizeof(struct fpu_reg));
fpu_reg = (struct fpu_reg *)tcb->xcp.fpu_regs;
fpu_reg->fpu_trap = 0;
}
void arm64_destory_fpu(struct tcb_s * tcb)
{
struct tcb_s * owner;
/* save current fpu owner's context */
owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
if (owner == tcb)
{
g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
}
}
/***************************************************************************
* Name: arm64_fpu_enter_exception
*
* Description:
* called at every time get into a exception
*
***************************************************************************/
void arm64_fpu_enter_exception(void)
{
}
void arm64_fpu_exit_exception(void)
{
}
void arm64_fpu_trap(struct regs_context * regs)
{
struct tcb_s * owner;
struct fpu_reg *fpu_reg;
UNUSED(regs);
/* disable fpu trap access */
arm64_fpu_access_trap_disable();
/* save current fpu owner's context */
owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
if (owner != NULL)
{
arm64_fpu_save((struct fpu_reg *)owner->xcp.fpu_regs);
ARM64_DSB();
g_cpu_fpu_ctx[this_cpu()].save_count++;
g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
}
if (arch_get_exception_depth() > 1)
{
/* if get_exception_depth > 1
* it means FPU access exception occurred in exception context
* switch FPU owner to idle thread
*/
owner = g_cpu_fpu_ctx[this_cpu()].idle_thread;
}
else
{
owner = (struct tcb_s *)arch_get_current_tcb();
}
/* restore our context */
arm64_fpu_restore((struct fpu_reg *)owner->xcp.fpu_regs);
g_cpu_fpu_ctx[this_cpu()].restore_count++;
/* become new owner */
g_cpu_fpu_ctx[this_cpu()].fpu_owner = owner;
fpu_reg = (struct fpu_reg *)owner->xcp.fpu_regs;
fpu_reg->fpu_trap = 0;
}
void arm64_fpu_context_restore(void)
{
struct tcb_s *new_tcb = (struct tcb_s *)arch_get_current_tcb();
struct fpu_reg *fpu_reg = (struct fpu_reg *)new_tcb->xcp.fpu_regs;
arm64_fpu_access_trap_enable();
if (fpu_reg->fpu_trap == 0)
{
/* FPU trap hasn't happened at this task */
arm64_fpu_access_trap_enable();
}
else
{
/* FPU trap has happened at this task */
if (new_tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner)
{
arm64_fpu_access_trap_disable();
}
else
{
arm64_fpu_access_trap_enable();
}
}
g_cpu_fpu_ctx[this_cpu()].switch_count++;
}
void arm64_fpu_enable(void)
{
irqstate_t flags = up_irq_save();
arm64_fpu_access_trap_enable();
up_irq_restore(flags);
}
void arm64_fpu_disable(void)
{
irqstate_t flags = up_irq_save();
arm64_fpu_access_trap_disable();
up_irq_restore(flags);
}
/***************************************************************************
* Name: up_fpucmp
*
* Description:
* compare FPU areas from thread context
*
***************************************************************************/
bool up_fpucmp(const void *saveregs1, const void *saveregs2)
{
const uint64_t *regs1 = saveregs1 + XCPTCONTEXT_GP_SIZE;
const uint64_t *regs2 = saveregs2 + XCPTCONTEXT_GP_SIZE;
return memcmp(regs1, regs2, 8 * XCPTCONTEXT_FPU_REGS) == 0;
}

View file

@ -0,0 +1,74 @@
/****************************************************************************
* arch/arm64/src/common/arm64_fpu.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_FPU_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_FPU_H
#ifndef __ASSEMBLY__
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <debug.h>
#include <assert.h>
/****************************************************************************
* Type Declarations
****************************************************************************/
struct arm64_cpu_fpu_context
{
/* owner of current CPU's FPU */
struct tcb_s *fpu_owner;
struct tcb_s *idle_thread;
/* for statistic propose */
int save_count;
int restore_count;
int switch_count;
int exe_depth_count;
};
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
void arm64_init_fpu(struct tcb_s *tcb);
void arm64_destory_fpu(struct tcb_s *tcb);
void arm64_fpu_disable(void);
void arm64_fpu_enable(void);
void arm64_fpu_save(struct fpu_reg *saved_fp_context);
void arm64_fpu_restore(struct fpu_reg *saved_fp_context);
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_FPU_H */

View file

@ -0,0 +1,101 @@
/****************************************************************************
* arch/arm64/src/common/arm64_fpu_func.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_macro.inc"
#include "arch/irq.h"
#include "arm64_fatal.h"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_fpu_func.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
GTEXT(arm64_fpu_save)
SECTION_FUNC(text, arm64_fpu_save)
stp q0, q1, [x0, #(16 * FPU_REG_Q0)]
stp q2, q3, [x0, #(16 * FPU_REG_Q2)]
stp q4, q5, [x0, #(16 * FPU_REG_Q4)]
stp q6, q7, [x0, #(16 * FPU_REG_Q6)]
stp q8, q9, [x0, #(16 * FPU_REG_Q8)]
stp q10, q11, [x0, #(16 * FPU_REG_Q10)]
stp q12, q13, [x0, #(16 * FPU_REG_Q12)]
stp q14, q15, [x0, #(16 * FPU_REG_Q14)]
stp q16, q17, [x0, #(16 * FPU_REG_Q16)]
stp q18, q19, [x0, #(16 * FPU_REG_Q18)]
stp q20, q21, [x0, #(16 * FPU_REG_Q20)]
stp q22, q23, [x0, #(16 * FPU_REG_Q22)]
stp q24, q25, [x0, #(16 * FPU_REG_Q24)]
stp q26, q27, [x0, #(16 * FPU_REG_Q26)]
stp q28, q29, [x0, #(16 * FPU_REG_Q28)]
stp q30, q31, [x0, #(16 * FPU_REG_Q30)]
mrs x1, fpsr
mrs x2, fpcr
str w1, [x0, #(16 * 32 + 0)]
str w2, [x0, #(16 * 32 + 4)]
ret
GTEXT(arm64_fpu_restore)
SECTION_FUNC(text, arm64_fpu_restore)
ldp q0, q1, [x0, #(16 * FPU_REG_Q0)]
ldp q2, q3, [x0, #(16 * FPU_REG_Q2)]
ldp q4, q5, [x0, #(16 * FPU_REG_Q4)]
ldp q6, q7, [x0, #(16 * FPU_REG_Q6)]
ldp q8, q9, [x0, #(16 * FPU_REG_Q8)]
ldp q10, q11, [x0, #(16 * FPU_REG_Q10)]
ldp q12, q13, [x0, #(16 * FPU_REG_Q12)]
ldp q14, q15, [x0, #(16 * FPU_REG_Q14)]
ldp q16, q17, [x0, #(16 * FPU_REG_Q16)]
ldp q18, q19, [x0, #(16 * FPU_REG_Q18)]
ldp q20, q21, [x0, #(16 * FPU_REG_Q20)]
ldp q22, q23, [x0, #(16 * FPU_REG_Q22)]
ldp q24, q25, [x0, #(16 * FPU_REG_Q24)]
ldp q26, q27, [x0, #(16 * FPU_REG_Q26)]
ldp q28, q29, [x0, #(16 * FPU_REG_Q28)]
ldp q30, q31, [x0, #(16 * FPU_REG_Q30)]
ldr w1, [x0, #(16 * 32 + 0)]
ldr w2, [x0, #(16 * 32 + 4)]
msr fpsr, x1
msr fpcr, x2
ret

View file

@ -0,0 +1,319 @@
/****************************************************************************
* arch/arm64/src/common/arm64_gic.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_GIC_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_GIC_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* GIC Distributor register Interface Base Addresses
* Arm® Generic Interrupt Controller Architecture Specification
* GIC architecture version 3 and version 4
*/
#define GIC_DIST_BASE CONFIG_GICD_BASE
#define GICD_CTLR (GIC_DIST_BASE + 0x0)
#define GICD_TYPER (GIC_DIST_BASE + 0x4)
#define GICD_IIDR (GIC_DIST_BASE + 0x8)
#define GICD_STATUSR (GIC_DIST_BASE + 0x10)
#define GICD_SETSPI_NSR (GIC_DIST_BASE + 0x40)
#define GICD_CLRSPI_NSR (GIC_DIST_BASE + 0x48)
#define GICD_SETSPI_SR (GIC_DIST_BASE + 0x50)
#define GICD_CLRSPI_SR (GIC_DIST_BASE + 0x58)
#define GICD_IGROUPRn (GIC_DIST_BASE + 0x80)
#define GICD_ISENABLERn (GIC_DIST_BASE + 0x100)
#define GICD_ICENABLERn (GIC_DIST_BASE + 0x180)
#define GICD_ISPENDRn (GIC_DIST_BASE + 0x200)
#define GICD_ICPENDRn (GIC_DIST_BASE + 0x280)
#define GICD_ISACTIVERn (GIC_DIST_BASE + 0x300)
#define GICD_ICACTIVERn (GIC_DIST_BASE + 0x380)
#define GICD_IPRIORITYRn (GIC_DIST_BASE + 0x400)
#define GICD_ITARGETSRn (GIC_DIST_BASE + 0x800)
#define GICD_ICFGRn (GIC_DIST_BASE + 0xc00)
#define GICD_SGIR (GIC_DIST_BASE + 0xf00)
#define GICD_IDREGS (GIC_DIST_BASE + 0xFFD0)
#define GICD_PIDR2 (GIC_DIST_BASE + 0xFFE8)
/* Offsets from GICD base or GICR(n) SGI_base */
#define GIC_DIST_IGROUPR 0x0080
#define GIC_DIST_ISENABLER 0x0100
#define GIC_DIST_ICENABLER 0x0180
#define GIC_DIST_ISPENDR 0x0200
#define GIC_DIST_ICPENDR 0x0280
#define GIC_DIST_ISACTIVER 0x0300
#define GIC_DIST_ICACTIVER 0x0380
#define GIC_DIST_IPRIORITYR 0x0400
#define GIC_DIST_ITARGETSR 0x0800
#define GIC_DIST_ICFGR 0x0c00
#define GIC_DIST_IGROUPMODR 0x0d00
#define GIC_DIST_SGIR 0x0f00
/* GICD GICR common access macros */
#define IGROUPR(base, n) (base + GIC_DIST_IGROUPR + (n) * 4)
#define ISENABLER(base, n) (base + GIC_DIST_ISENABLER + (n) * 4)
#define ICENABLER(base, n) (base + GIC_DIST_ICENABLER + (n) * 4)
#define ISPENDR(base, n) (base + GIC_DIST_ISPENDR + (n) * 4)
#define ICPENDR(base, n) (base + GIC_DIST_ICPENDR + (n) * 4)
#define IPRIORITYR(base, n) (base + GIC_DIST_IPRIORITYR + n)
#define ITARGETSR(base, n) (base + GIC_DIST_ITARGETSR + (n) * 4)
#define ICFGR(base, n) (base + GIC_DIST_ICFGR + (n) * 4)
#define IGROUPMODR(base, n) (base + GIC_DIST_IGROUPMODR + (n) * 4)
/* GICD_PIDR2 : Peripheral ID2 Register
* bit assignments
* [31:8] - IMPLEMENTATION DEFINED
* [7:4] ArchRev 0x1. GICv1.
* 0x2. GICv2.
* 0x3. GICv3.
* 0x4. GICv4.
* [3:0] - IMPLEMENTATION DEFINED.
*/
#define GICD_PIDR2_ARCH_MASK 0xf0
#define GICD_PIDR2_ARCH_GICV3 0x30
#define GICD_PIDR2_ARCH_GICV4 0x40
/* GICD_TYPER : Interrupt Controller Type Register
* Arm® Generic Interrupt Controller Architecture Specification
* GIC architecture version 3 and version 4
*/
#define GICD_TYPER_RSS BIT(26)
#define GICD_TYPER_LPIS BIT(17)
#define GICD_TYPER_MBIS BIT(16)
#define GICD_TYPER_ESPI BIT(8)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
/* Common Helper Constants */
#define GIC_SGI_INT_BASE 0
#define GIC_PPI_INT_BASE 16
#define GIC_IS_SGI(intid) (((intid) >= GIC_SGI_INT_BASE) && \
((intid) < GIC_PPI_INT_BASE))
#define GIC_SPI_INT_BASE 32
#define GIC_NUM_INTR_PER_REG 32
#define GIC_NUM_CFG_PER_REG 16
#define GIC_NUM_PRI_PER_REG 4
/* GIC idle priority : value '0xff' will allow all interrupts */
#define GIC_IDLE_PRIO 0xff
/* Priority levels 0:255 */
#define GIC_PRI_MASK 0xff
/* '0xa0'is used to initialize each interrtupt default priority.
* This is an arbitrary value in current context.
* Any value '0x80' to '0xff' will work for both NS and S state.
* The values of individual interrupt and default has to be chosen
* carefully if PMR and BPR based nesting and preemption has to be done.
*/
#define GIC_INT_DEF_PRI_X4 0xa0a0a0a0
/* Register bit definitions */
/* GICD_CTLR Interrupt group definitions */
#define GICD_CTLR_ENABLE_G0 0
#define GICD_CTLR_ENABLE_G1NS 1
#define GICD_CTLR_ENABLE_G1S 2
#define GICD_CTRL_ARE_S 4
#define GICD_CTRL_ARE_NS 5
#define GICD_CTRL_NS 6
#define GICD_CGRL_E1NWF 7
/* GICD_CTLR Register write progress bit */
#define GICD_CTLR_RWP 31
/* GICR_CTLR */
#define GICR_CTLR_ENABLE_LPIS BIT(0)
#define GICR_CTLR_RWP 3
/* GICD_TYPER.ITLinesNumber 0:4 */
#define GICD_TYPER_ITLINESNUM_MASK 0x1f
/* GICR Re-Distributor registers, offsets from RD_base(n) */
#define GICR_CTLR 0x0000
#define GICR_IIDR 0x0004
#define GICR_TYPER 0x0008
#define GICR_STATUSR 0x0010
#define GICR_WAKER 0x0014
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
#define GICR_IDREGS 0xFFD0
#define GICR_PIDR2 0xFFE8
/* GICR_PIDR2 : Peripheral ID2 Register
* bit assignments are the same as those for GICD_PIDR2)
* [31:8] - IMPLEMENTATION DEFINED
* [7:4] ArchRev 0x1. GICv1.
* 0x2. GICv2.
* 0x3. GICv3.
* 0x4. GICv4.
* [3:0] - IMPLEMENTATION DEFINED.
*/
#define GICR_PIDR2_ARCH_MASK 0xf0
#define GICR_PIDR2_ARCH_GICV3 0x30
#define GICR_PIDR2_ARCH_GICV4 0x40
/* GICR_TYPER : Redistributor Type Register
* Arm® Generic Interrupt Controller Architecture Specification
* GIC architecture version 3 and version 4
* chapter 9.11.35 for detail descriptions
*/
#define GICR_TYPER_PLPIS BIT(0)
#define GICR_TYPER_VLPIS BIT(1)
#define GICR_TYPER_DIRECTLPIS BIT(3)
#define GICR_TYPER_LAST BIT(4)
/* GICR_WAKER */
#define GICR_WAKER_PS 1
#define GICR_WAKER_CA 2
/* SGI base is at 64K offset from Redistributor */
#define GICR_SGI_BASE_OFF 0x10000
/* GICD_ICFGR */
#define GICD_ICFGR_MASK BIT_MASK(2)
#define GICD_ICFGR_TYPE BIT(1)
/* BIT(0) reserved for IRQ_ZERO_LATENCY */
#define IRQ_TYPE_LEVEL BIT(1)
#define IRQ_TYPE_EDGE BIT(2)
#define GIC_SPI_INT_BASE 32
#define GIC_SPI_MAX_INTID 1019
#define GIC_IS_SPI(intid) (((intid) >= GIC_SPI_INT_BASE) && \
((intid) <= GIC_SPI_MAX_INTID))
/* GITCD_IROUTER */
#define GIC_DIST_IROUTER 0x6000
#define IROUTER(base, n) (base + GIC_DIST_IROUTER + (n) * 8)
/* BIT(0) reserved for IRQ_ZERO_LATENCY */
#define IRQ_TYPE_LEVEL BIT(1)
#define IRQ_TYPE_EDGE BIT(2)
#define IRQ_DEFAULT_PRIORITY 0xa0
#define GIC_IRQ_SGI0 0
#define GIC_IRQ_SGI1 1
#define GIC_IRQ_SGI2 2
#define GIC_IRQ_SGI3 3
#define GIC_IRQ_SGI4 4
#define GIC_IRQ_SGI5 5
#define GIC_IRQ_SGI6 6
#define GIC_IRQ_SGI7 7
#define GIC_IRQ_SGI8 8
#define GIC_IRQ_SGI9 9
#define GIC_IRQ_SGI10 10
#define GIC_IRQ_SGI11 11
#define GIC_IRQ_SGI12 12
#define GIC_IRQ_SGI13 13
#define GIC_IRQ_SGI14 14
#define GIC_IRQ_SGI15 15
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
bool arm64_gic_irq_is_enabled(unsigned int intid);
int arm64_gic_initialize(void);
void arm64_gic_irq_set_priority(unsigned int intid, unsigned int prio,
uint32_t flags);
/****************************************************************************
* Name: arm64_decodeirq
*
* Description:
* This function is called from the IRQ vector handler in arm_vectors.S.
* At this point, the interrupt has been taken and the registers have
* been saved on the stack. This function simply needs to determine the
* the irq number of the interrupt and then to call arm_doirq to dispatch
* the interrupt.
*
* Input Parameters:
* regs - A pointer to the register save area on the stack.
*
****************************************************************************/
uint64_t * arm64_decodeirq(uint64_t *regs);
int arm64_gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
uint16_t target_list);
#ifdef CONFIG_SMP
#define SGI_CPU_PAUSE GIC_IRQ_SGI0
/****************************************************************************
* Name: arm64_pause_handler
*
* Description:
* This is the handler for SGI2. It performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* Standard interrupt handling
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int arm64_pause_handler(int irq, void *context, void *arg);
void arm64_gic_secondary_init(void);
int arm64_smp_sgi_init(void);
#endif
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_GIC_H */

View file

@ -0,0 +1,613 @@
/***************************************************************************
* arch/arm64/src/common/arm64_gicv3.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
***************************************************************************/
/***************************************************************************
* Included Files
***************************************************************************/
#include <nuttx/config.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include <sched/sched.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_gic.h"
#include "arm64_fatal.h"
/***************************************************************************
* Pre-processor Definitions
***************************************************************************/
#define GICR_TYPER_NR_PPIS(r) \
({ \
unsigned int __ppinum = ((r) >> 27) & 0x1f; \
unsigned int __nr_ppis = 16; \
if (__ppinum == 1 || __ppinum == 2) \
{ __nr_ppis += __ppinum * 32; } \
__nr_ppis; \
})
/* selects redistributor SGI_base for current core for PPI and SGI
* selects distributor base for SPI
* The macro translates to distributor base for GICv2 and GICv1
*/
#define GET_DIST_BASE(intid) ((intid < GIC_SPI_INT_BASE) ? \
(gic_get_rdist() + GICR_SGI_BASE_OFF) \
: GIC_DIST_BASE)
#define IGROUPR_VAL 0xFFFFFFFFU
/* Redistributor base addresses for each core */
unsigned long gic_rdists[CONFIG_SMP_NCPUS];
/***************************************************************************
* Private Functions
***************************************************************************/
static inline unsigned long gic_get_rdist(void)
{
return gic_rdists[this_cpu()];
}
static inline uint32_t read_gicd_wait_rwp(void)
{
uint32_t value;
value = getreg32(GICD_CTLR);
while (value & BIT(GICD_CTLR_RWP))
{
value = getreg32(GICD_CTLR);
}
return value;
}
/* Wait for register write pending
* TODO: add timed wait
*/
static int gic_wait_rwp(uint32_t intid)
{
uint32_t rwp_mask;
unsigned long base;
if (intid < GIC_SPI_INT_BASE)
{
base = (gic_get_rdist() + GICR_CTLR);
rwp_mask = BIT(GICR_CTLR_RWP);
}
else
{
base = GICD_CTLR;
rwp_mask = BIT(GICD_CTLR_RWP);
}
while (getreg32(base) & rwp_mask)
{
}
return 0;
}
void arm64_gic_irq_set_priority(unsigned int intid, unsigned int prio,
uint32_t flags)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
uint32_t shift;
uint32_t val;
unsigned long base = GET_DIST_BASE(intid);
/* Disable the interrupt */
putreg32(mask, ICENABLER(base, idx));
gic_wait_rwp(intid);
/* PRIORITYR registers provide byte access */
putreg8(prio & GIC_PRI_MASK, IPRIORITYR(base, intid));
/* Interrupt type config */
if (!GIC_IS_SGI(intid))
{
idx = intid / GIC_NUM_CFG_PER_REG;
shift = (intid & (GIC_NUM_CFG_PER_REG - 1)) * 2;
val = getreg32(ICFGR(base, idx));
val &= ~(GICD_ICFGR_MASK << shift);
if (flags & IRQ_TYPE_EDGE)
{
val |= (GICD_ICFGR_TYPE << shift);
}
putreg32(val, ICFGR(base, idx));
}
}
void arm64_gic_irq_enable(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
putreg32(mask, ISENABLER(GET_DIST_BASE(intid), idx));
/* Affinity routing is enabled for Non-secure state (GICD_CTLR.ARE_NS
* is set to '1' when GIC distributor is initialized) ,so need to set
* SPI's affinity, now set it to be the PE on which it is enabled.
*/
if (GIC_IS_SPI(intid))
{
putreg64(MPIDR_TO_CORE(GET_MPIDR()),
IROUTER(GET_DIST_BASE(intid), intid));
}
}
void arm64_gic_irq_disable(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
putreg32(mask, ICENABLER(GET_DIST_BASE(intid), idx));
/* poll to ensure write is complete */
gic_wait_rwp(intid);
}
bool arm64_gic_irq_is_enabled(unsigned int intid)
{
uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
uint32_t val;
val = getreg32(ISENABLER(GET_DIST_BASE(intid), idx));
return (val & mask) != 0;
}
unsigned int arm64_gic_get_active(void)
{
int intid;
/* (Pending -> Active / AP) or (AP -> AP) */
intid = read_sysreg(ICC_IAR1_EL1);
return intid;
}
void arm64_gic_eoi(unsigned int intid)
{
/* Interrupt request deassertion from peripheral to GIC happens
* by clearing interrupt condition by a write to the peripheral
* register. It is desired that the write transfer is complete
* before the core tries to change GIC state from 'AP/Active' to
* a new state on seeing 'EOI write'.
* Since ICC interface writes are not ordered against Device
* memory writes, a barrier is required to ensure the ordering.
* The dsb will also ensure *completion* of previous writes with
* DEVICE nGnRnE attribute.
*/
ARM64_DSB();
/* (AP -> Pending) Or (Active -> Inactive) or (AP to AP) nested case */
write_sysreg(intid, ICC_EOIR1_EL1);
}
int arm64_gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
uint16_t target_list)
{
uint32_t aff3;
uint32_t aff2;
uint32_t aff1;
uint64_t sgi_val;
assert(GIC_IS_SGI(sgi_id));
/* Extract affinity fields from target */
aff1 = MPIDR_AFFLVL(target_aff, 1);
aff2 = MPIDR_AFFLVL(target_aff, 2);
aff3 = MPIDR_AFFLVL(target_aff, 3);
sgi_val = GICV3_SGIR_VALUE(aff3, aff2, aff1, sgi_id, SGIR_IRM_TO_AFF,
target_list);
ARM64_DSB();
write_sysreg(sgi_val, ICC_SGI1R);
ARM64_ISB();
return 0;
}
/* Wake up GIC redistributor.
* clear ProcessorSleep and wait till ChildAsleep is cleared.
* ProcessSleep to be cleared only when ChildAsleep is set
* Check if redistributor is not powered already.
*/
static void gicv3_rdist_enable(unsigned long rdist)
{
uint32_t temp;
if (!(getreg32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA)))
{
return;
}
temp = getreg32(rdist + GICR_WAKER);
temp = temp & ~(BIT(GICR_WAKER_PS));
putreg32(temp, rdist + GICR_WAKER);
while (getreg32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA))
{
}
}
/* Initialize the cpu interface. This should be called by each core. */
static void gicv3_cpuif_init(void)
{
uint32_t icc_sre;
uint32_t intid;
unsigned long base = gic_get_rdist() + GICR_SGI_BASE_OFF;
/* Disable all sgi ppi */
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICENABLER(base, 0));
/* Any sgi/ppi intid ie. 0-31 will select GICR_CTRL */
gic_wait_rwp(0);
/* Clear pending */
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICPENDR(base, 0));
/* Configure all SGIs/PPIs as G1S or G1NS depending on Zephyr
* is run in EL1S or EL1NS respectively.
* All interrupts will be delivered as irq
*/
putreg32(IGROUPR_VAL, IGROUPR(base, 0));
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG), IGROUPMODR(base, 0));
/* Configure default priorities for SGI 0:15 and PPI 0:15. */
for (intid = 0; intid < GIC_SPI_INT_BASE;
intid += GIC_NUM_PRI_PER_REG)
{
putreg32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
}
/* Configure PPIs as level triggered */
putreg32(0, ICFGR(base, 1));
/* Check if system interface can be enabled.
* 'icc_sre_el3' needs to be configured at 'EL3'
* to allow access to 'icc_sre_el1' at 'EL1'
* eg: z_arch_el3_plat_init can be used by platform.
*/
icc_sre = read_sysreg(ICC_SRE_EL1);
if (!(icc_sre & ICC_SRE_ELX_SRE_BIT))
{
icc_sre =
(icc_sre | ICC_SRE_ELX_SRE_BIT | ICC_SRE_ELX_DIB_BIT |
ICC_SRE_ELX_DFB_BIT);
write_sysreg(icc_sre, ICC_SRE_EL1);
icc_sre = read_sysreg(ICC_SRE_EL1);
assert(icc_sre & ICC_SRE_ELX_SRE_BIT);
}
write_sysreg(GIC_IDLE_PRIO, ICC_PMR_EL1);
/* Allow group1 interrupts */
write_sysreg(1, ICC_IGRPEN1_EL1);
}
static void gicv3_dist_init(void)
{
unsigned int num_ints;
unsigned int intid;
unsigned int idx;
unsigned long base = GIC_DIST_BASE;
num_ints = getreg32(GICD_TYPER);
num_ints &= GICD_TYPER_ITLINESNUM_MASK;
num_ints = (num_ints + 1) << 5;
/* Disable the distributor */
putreg32(0, GICD_CTLR);
gic_wait_rwp(GIC_SPI_INT_BASE);
#if 0
/* Before configuration, we need to check whether
* the GIC single security state mode is supported.
* Make sure GICD_CTRL_NS is 1.
*/
sys_set_bit(GICD_CTLR, GICD_CTRL_NS);
__ASSERT(sys_test_bit(GICD_CTLR,
GICD_CTRL_NS),
"Current GIC does not support single security state");
#endif
/* Default configuration of all SPIs */
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_INTR_PER_REG)
{
idx = intid / GIC_NUM_INTR_PER_REG;
/* Disable interrupt */
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
ICENABLER(base, idx));
/* Clear pending */
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
ICPENDR(base, idx));
putreg32(IGROUPR_VAL, IGROUPR(base, idx));
putreg32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
IGROUPMODR(base, idx));
}
/* wait for rwp on GICD */
gic_wait_rwp(GIC_SPI_INT_BASE);
/* Configure default priorities for all SPIs. */
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_PRI_PER_REG)
{
putreg32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
}
/* Configure all SPIs as active low, level triggered by default */
for (intid = GIC_SPI_INT_BASE; intid < num_ints;
intid += GIC_NUM_CFG_PER_REG)
{
idx = intid / GIC_NUM_CFG_PER_REG;
putreg32(0, ICFGR(base, idx));
}
/* Enable distributor with ARE */
putreg32(BIT(GICD_CTRL_ARE_NS) | BIT(GICD_CTLR_ENABLE_G1NS),
GICD_CTLR);
#if 0
/* TODO: ARMv8-R support
*
* For GIC single security state(ARMv8-R), the config means
* the GIC is under single security state which has
* only two groups:
* group 0 and group 1.
* Then set GICD_CTLR_ARE and GICD_CTLR_ENABLE_G1 to enable Group 1
* interrupt.
* Since the GICD_CTLR_ARE and GICD_CTRL_ARE_S share BIT(4), and
* similarly the GICD_CTLR_ENABLE_G1 and GICD_CTLR_ENABLE_G1NS share
* BIT(1), we can reuse them.
*/
putreg32(BIT(GICD_CTRL_ARE_S) | BIT(GICD_CTLR_ENABLE_G1NS),
GICD_CTLR);
#endif
}
void up_enable_irq(int irq)
{
arm64_gic_irq_enable(irq);
}
void up_disable_irq(int irq)
{
arm64_gic_irq_disable(irq);
}
/***************************************************************************
* Name: arm64_decodeirq
*
* Description:
* This function is called from the IRQ vector handler in arm64_vectors.S.
* At this point, the interrupt has been taken and the registers have
* been saved on the stack. This function simply needs to determine the
* the irq number of the interrupt and then to call arm_doirq to dispatch
* the interrupt.
*
* Input Parameters:
* regs - A pointer to the register save area on the stack.
***************************************************************************/
uint64_t * arm64_decodeirq(uint64_t * regs)
{
int irq;
/* Read the interrupt acknowledge register and get the interrupt ID */
irq = arm64_gic_get_active();
/* Ignore spurions IRQs. ICCIAR will report 1023 if there is no pending
* interrupt.
*/
DEBUGASSERT(irq < NR_IRQS || irq == 1023);
if (irq < NR_IRQS)
{
/* Dispatch the interrupt */
regs = arm64_doirq(irq, regs);
}
/* Write to the end-of-interrupt register */
arm64_gic_eoi(irq);
return regs;
}
static int gic_validate_dist_version(void)
{
uint32_t typer;
bool has_rss;
uint32_t reg = getreg32(GICD_PIDR2) & GICD_PIDR2_ARCH_MASK;
int spis;
int espis;
if (reg == GICD_PIDR2_ARCH_GICV3)
{
sinfo("GICv3 version detect\n");
}
else if (reg == GICD_PIDR2_ARCH_GICV4)
{
sinfo("GICv4 version detect\n");
}
else
{
sinfo("No GIC version detect\n");
return -ENODEV;
}
/* Find out how many interrupts are supported. */
typer = getreg32(GICD_TYPER);
spis = MIN(GICD_TYPER_SPIS(typer), 1020U) - 32;
espis = GICD_TYPER_ESPIS(typer);
sinfo("GICD_TYPER = 0x%x\n", typer);
sinfo("%d SPIs implemented\n", spis);
sinfo("%d Extended SPIs implemented\n", espis);
has_rss = !!(typer & GICD_TYPER_RSS);
sinfo("Distributor has %sRange Selector support\n", has_rss ? "" : "no ");
if (typer & GICD_TYPER_MBIS)
{
sinfo("MBIs is present, But No support\n");
}
return 0;
}
static int gic_validate_redist_version(void)
{
uint64_t typer;
unsigned int ppi_nr;
bool has_vlpis = true;
bool has_direct_lpi = true;
uint32_t reg;
unsigned long redist_base = gic_get_rdist();
ppi_nr = (~0U);
reg = getreg32(redist_base +
GICR_PIDR2) & GICR_PIDR2_ARCH_MASK;
if (reg != GICR_PIDR2_ARCH_GICV3 &&
reg != GICR_PIDR2_ARCH_GICV4)
{
sinfo("No redistributor present 0x%lx\n", redist_base);
return -ENODEV;
}
typer = getreg64(redist_base + GICR_TYPER);
has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
has_direct_lpi &= !!(typer & GICR_TYPER_DIRECTLPIS);
ppi_nr = MIN(GICR_TYPER_NR_PPIS(typer), ppi_nr);
if (ppi_nr == (~0U))
{
ppi_nr = 0;
}
sinfo("GICD_TYPER = 0x%"PRIx64"\n", typer);
sinfo("%d PPIs implemented\n", ppi_nr);
sinfo("%sVLPI support, %sdirect LPI support\n", !has_vlpis ? "no " : "",
!has_direct_lpi ? "no " : "");
return 0;
}
static void arm64_gic_init(void)
{
uint8_t cpu;
int err;
cpu = this_cpu();
gic_rdists[cpu] = CONFIG_GICR_BASE +
MPIDR_TO_CORE(GET_MPIDR()) * 0x20000;
err = gic_validate_redist_version();
if (err)
{
sinfo("no redistributor detected, giving up ret=%d\n", err);
return;
}
gicv3_rdist_enable(gic_get_rdist());
gicv3_cpuif_init();
}
int arm64_gic_initialize(void)
{
int err;
err = gic_validate_dist_version();
if (err)
{
sinfo("no distributor detected, giving up ret=%d\n", err);
return err;
}
gicv3_dist_init();
arm64_gic_init();
return 0;
}
#ifdef CONFIG_SMP
void arm64_gic_secondary_init(void)
{
arm64_gic_init();
}
#endif

View file

@ -0,0 +1,306 @@
/****************************************************************************
* arch/arm64/src/common/arm64_head.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* DESCRIPTION
* Bring-up code for ARMv8.
* Based on head.S(arm64 porting) at Xen Hypervisor Project
* Based on reset.S(aarch64 porting) at Zephyr RTOS Project
*
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_arch.h"
#include "arm64_macro.inc"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_head.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/* macro define from xen head, for efi head define */
#define PAGE_SHIFT 12
#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
#define __HEAD_FLAG_PHYS_BASE 1
#define __HEAD_FLAGS ((__HEAD_FLAG_PAGE_SIZE << 1) | \
(__HEAD_FLAG_PHYS_BASE << 3))
#ifdef CONFIG_DEBUG_FEATURES
#define RODATA_STR(label, msg) \
.pushsection .rodata.str, "aMS", %progbits, 1 ; \
label: .asciz msg; \
.popsection
/* Macro to print a string to the UART, if there is one.
* Clobbers x0 - x3, x30 is lr for return
*/
#define PRINT(sym, _s) \
mov x3, x30 ; \
ldr x1, =boot_string_##sym ; \
bl boot_stage_puts; \
mov x30, x3 ; \
RODATA_STR(boot_string_##sym, _s)
#else
#define PRINT(sym, s)
#endif /* CONFIG_DEBUG_FEATURES */
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/* Kernel startup entry point.
* ---------------------------
*
* The requirements are:
* MMU = off, D-cache = off, I-cache = on or off,
* x0 = physical address to the FDT blob.
* it will be used when NuttX support device tree in the future
*
* This must be the very first address in the loaded image.
* It should be loaded at any 4K-aligned address.
*/
.globl __start;
__start:
/* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*
* This add instruction has no meaningful effect except that
* its opcode forms the magic "MZ" signature of a PE/COFF file
* that is required for UEFI applications.
*
* Some bootloader (such imx8 uboot) checking the magic "MZ" to see
* if the image is a valid Linux image. but modifying the bootLoader is
* unnecessary unless we need to do a customize secure boot.
* so just put the ''MZ" in the header to make bootloader happiness
*/
add x13, x18, #0x16 /* the magic "MZ" signature */
b real_start /* branch to kernel start */
.quad 0x480000 /* Image load offset from start of RAM */
.quad _e_initstack - __start /* Effective size of kernel image, little-endian */
.quad __HEAD_FLAGS /* Informative flags, little-endian */
.quad 0 /* reserved */
.quad 0 /* reserved */
.quad 0 /* reserved */
.ascii "ARM\x64" /* Magic number, "ARM\x64" */
.long 0 /* reserved */
real_start:
/* Disable all exceptions and interrupts */
msr DAIFSet, 0xf
#ifdef CONFIG_SMP
ldr x0, =cpu_boot_params
get_cpu_id x1
/* If the cores start up at the same time, we should atomically load and
* store the mpid into arm64_cpu_boot_params.
*/
ldaxr x2, [x0, #BOOT_PARAM_MPID]
cmp x2, #-1
bne 1f
/* try to store x1 (mpid) */
stlxr w3, x1, [x0]
/* If succeed, go to primary_core */
cbz w3, primary_core
/* loop until our turn comes */
1: dmb ld
ldr x2, [x0, #BOOT_PARAM_MPID]
cmp x1, x2
bne 1b
/* we can now load our stack pointer value and move on */
ldr x24, [x0, #BOOT_PARAM_SP]
add x24, x24, #(CONFIG_IDLETHREAD_STACKSIZE)
ldr x25, =arm64_boot_secondary_c_routine
bl __reset_prep_c
b cpu_boot
primary_core:
ldr x24, [x0, #BOOT_PARAM_SP]
add x24, x24, #(CONFIG_IDLETHREAD_STACKSIZE)
#else
/* load stack and entry point */
ldr x24, =(g_idle_stack + CONFIG_IDLETHREAD_STACKSIZE)
#endif
ldr x25, =arm64_boot_primary_c_routine
/* Prepare for calling C code */
bl __reset_prep_c
#ifdef CONFIG_DEBUG_FEATURES
/* Initialize the UART for early print.
* Should only be called on the boot CPU
*/
bl up_earlyserialinit
#endif
cpu_boot:
PRINT(cpu_boot, "- Ready to Boot CPU\r\n")
switch_el:
switch_el x0, 3f, 2f, 1f
3:
PRINT(switch_el3, "- Boot from EL3\r\n")
/* EL3 init */
bl arm64_boot_el3_init
/* Get next EL */
adr x0, switch_el
bl arm64_boot_el3_get_next_el
eret
2:
PRINT(switch_el2, "- Boot from EL2\r\n")
/* EL2 init */
bl arm64_boot_el2_init
/* Move to EL1 with all exceptions masked */
mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T)
msr spsr_el2, x0
adr x0, 1f
msr elr_el2, x0
eret
1:
PRINT(switch_el1, "- Boot from EL1\r\n")
/* EL1 init */
bl arm64_boot_el1_init
/* set SP_ELx and Enable SError interrupts */
msr SPSel, #1
msr DAIFClr, #(DAIFCLR_ABT_BIT)
isb
jump_to_c_entry:
PRINT(jump_to_c_entry, "- Boot to C runtime for OS Initialize\r\n")
ret x25
/* Fail-stop */
fail:
PRINT(fail, "- Boot failed -\r\n")
1: wfe
b 1b
/* Set the minimum necessary to safely call C code */
__reset_prep_c:
/* return address: x23 */
mov x23, lr
switch_el x0, 3f, 2f, 1f
3:
/* Reinitialize SCTLR from scratch in EL3 */
ldr w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
msr sctlr_el3, x0
/* Set SP_EL1 */
msr sp_el1, x24
b out
2:
/* Disable alignment fault checking */
mrs x0, sctlr_el2
bic x0, x0, SCTLR_A_BIT
msr sctlr_el2, x0
/* Set SP_EL1 */
msr sp_el1, x24
b out
1:
/* Disable alignment fault checking */
mrs x0, sctlr_el1
bic x0, x0, SCTLR_A_BIT
msr sctlr_el1, x0
/* Set SP_EL1. We cannot use sp_el1 at EL1 */
msr SPSel, #1
mov sp, x24
out:
isb
/* Select SP_EL0 and Initialize stack */
msr SPSel, #0
mov sp, x24
ret x23
#ifdef CONFIG_DEBUG_FEATURES
/* Print early debug messages.
* x0: Nul-terminated string to print.
* Clobbers x0-x1
*/
boot_stage_puts:
ldrb w0, [x1], #1 /* Load next char */
cmp w0, 0
beq 1f /* Exit on nul */
stp xzr, x30, [sp, #-16]!
bl up_lowputc
ldp xzr, x30, [sp], #16
b boot_stage_puts
1:
ret
.type boot_low_puts, %function;
#endif /* !CONFIG_DEBUG_FEATURES */

View file

@ -0,0 +1,62 @@
/****************************************************************************
* arch/arm64/src/common/arm64_idle.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/arch.h>
#include "arm64_internal.h"
#include "arm64_arch.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_idle
*
* Description:
* up_idle() is the logic that will be executed when there is no other
* ready-to-run task. This is processor idle time and will continue until
* some interrupt occurs to cause a context switch from the idle task.
*
* Processing in this state may be processor-specific. e.g., this is where
* power management operations might be performed.
*
****************************************************************************/
void up_idle(void)
{
#if defined(CONFIG_SUPPRESS_INTERRUPTS) || defined(CONFIG_SUPPRESS_TIMER_INTS)
/* If the system is idle and there are no timer interrupts, then process
* "fake" timer interrupts. Hopefully, something will wake up.
*/
nxsched_process_timer();
#else
/* Sleep until an interrupt occurs to save power */
arch_cpu_idle();
#endif
}

View file

@ -0,0 +1,178 @@
/****************************************************************************
* arch/arm64/src/common/arm64_initialize.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <debug.h>
#include <arch/limits.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include <nuttx/drivers/drivers.h>
#include <nuttx/fs/loop.h>
#include <nuttx/net/loopback.h>
#include <nuttx/net/tun.h>
#include <nuttx/net/telnet.h>
#include <nuttx/power/pm.h>
#include <arch/chip/chip.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "chip.h"
/****************************************************************************
* Public data
****************************************************************************/
#ifdef CONFIG_SMP
INIT_STACK_ARRAY_DEFINE(g_cpu_idlestackalloc, CONFIG_SMP_NCPUS,
SMP_STACK_SIZE);
INIT_STACK_ARRAY_DEFINE(g_interrupt_stacks, CONFIG_SMP_NCPUS,
INTSTACK_SIZE);
#else
/* idle thread stack for primary core */
INIT_STACK_DEFINE(g_idle_stack, CONFIG_IDLETHREAD_STACKSIZE);
INIT_STACK_DEFINE(g_interrupt_stack, INTSTACK_SIZE);
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_intstack_alloc
*
* Description:
* Return a pointer to the "alloc" the correct interrupt stack allocation
* for the current CPU.
*
****************************************************************************/
#ifdef CONFIG_SMP
uintptr_t arm64_intstack_alloc(void)
{
return (uintptr_t)(g_interrupt_stacks[up_cpu_index()]);
}
/****************************************************************************
* Name: arm64_intstack_top
*
* Description:
* Return a pointer to the top the correct interrupt stack allocation
* for the current CPU.
*
****************************************************************************/
uintptr_t arm64_intstack_top(void)
{
return (uintptr_t)(g_interrupt_stacks[up_cpu_index()] + INTSTACK_SIZE);
}
#endif
/****************************************************************************
* Name: up_color_intstack
*
* Description:
* Set the interrupt stack to a value so that later we can determine how
* much stack space was used by interrupt handling logic
*
****************************************************************************/
#if defined(CONFIG_STACK_COLORATION) && CONFIG_ARCH_INTERRUPTSTACK > 3
static void up_color_intstack(void)
{
#ifdef CONFIG_SMP
void *ptr = (void *)g_interrupt_stacks[up_cpu_index()];
#else
void *ptr = (void *)g_interrupt_stack;
#endif
arm64_stack_color(ptr, INTSTACK_SIZE);
}
#else
# define up_color_intstack()
#endif
/****************************************************************************
* Name: up_initialize
*
* Description:
* up_initialize will be called once during OS initialization after the
* basic OS services have been initialized. The architecture specific
* details of initializing the OS will be handled here. Such things as
* setting up interrupt service routines, starting the clock, and
* registering device drivers are some of the things that are different
* for each processor and hardware platform.
*
* up_initialize is called after the OS initialized but before the user
* initialization logic has been started and before the libraries have
* been initialized. OS services and driver services are available.
*
****************************************************************************/
void up_initialize(void)
{
/* Initialize global variables */
up_color_intstack();
/* Add any extra memory fragments to the memory manager */
arm64_addregion();
#ifdef CONFIG_PM
/* Initialize the power management subsystem. This MCU-specific function
* must be called *very* early in the initialization sequence *before* any
* other device drivers are initialized (since they may attempt to register
* with the power management subsystem).
*/
arm64_pminitialize();
#endif
#ifdef CONFIG_ARCH_DMA
arm64_dma_initialize();
#endif
/* Initialize the serial device driver */
#ifdef USE_SERIALDRIVER
arm64_serialinit();
#endif
#ifndef CONFIG_NETDEV_LATEINIT
/* Initialize the network */
arm64_netinitialize();
#endif
#if defined(CONFIG_USBDEV) || defined(CONFIG_USBHOST)
/* Initialize USB -- device and/or host */
arm64_usbinitialize();
#endif
}

View file

@ -0,0 +1,151 @@
/****************************************************************************
* arch/arm64/src/common/arm64_initialstate.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <debug.h>
#include <arch/limits.h>
#include <nuttx/arch.h>
#include <nuttx/board.h>
#include <nuttx/drivers/drivers.h>
#include <nuttx/fs/loop.h>
#include <nuttx/net/loopback.h>
#include <nuttx/net/tun.h>
#include <nuttx/net/telnet.h>
#include <nuttx/note/note_driver.h>
#include <nuttx/syslog/syslog_console.h>
#include <nuttx/serial/pty.h>
#include <nuttx/crypto/crypto.h>
#include <nuttx/power/pm.h>
#include <arch/chip/chip.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "chip.h"
#include "arm64_fatal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
void arm64_new_task(struct tcb_s * tcb)
{
char *stack_ptr = tcb->stack_base_ptr + tcb->adj_stack_size;
struct regs_context * pinitctx;
#ifdef CONFIG_ARCH_FPU
struct fpu_reg * pfpuctx;
pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr);
tcb->xcp.fpu_regs = (uint64_t *)pfpuctx;
/* set fpu context */
arm64_init_fpu(tcb);
stack_ptr = (char *)pfpuctx;
#endif
pinitctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr);
memset(pinitctx, 0, sizeof(struct regs_context));
pinitctx->elr = (uint64_t)tcb->start;
/* Keep using SP_EL1 */
pinitctx->spsr = SPSR_MODE_EL1H;
#ifdef CONFIG_SUPPRESS_INTERRUPTS
pinitctx->spsr |= (DAIF_IRQ_BIT | DAIF_FIQ_BIT);
#endif /* CONFIG_SUPPRESS_INTERRUPTS */
pinitctx->sp_elx = (uint64_t)pinitctx;
pinitctx->sp_el0 = (uint64_t)pinitctx;
pinitctx->exe_depth = 0;
pinitctx->tpidr_el0 = (uint64_t)tcb;
pinitctx->tpidr_el1 = (uint64_t)tcb;
tcb->xcp.regs = (uint64_t *)pinitctx;
}
/****************************************************************************
* Name: up_initial_state
*
* Description:
* A new thread is being started and a new TCB has been created. This
* function is called to initialize the processor specific portions of
* the new TCB.
*
* This function must setup the initial architecture registers and/or
* stack so that execution will begin at tcb->start on the next context
* switch.
*
****************************************************************************/
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;
memset(xcp, 0, sizeof(struct xcptcontext));
if (tcb->pid == IDLE_PROCESS_ID)
{
/* Initialize the idle thread stack */
#ifdef CONFIG_SMP
tcb->stack_alloc_ptr = (void *)(g_cpu_idlestackalloc[0]);
#else
tcb->stack_alloc_ptr = (void *)(g_idle_stack);
#endif
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
tcb->adj_stack_size = CONFIG_IDLETHREAD_STACKSIZE;
#ifdef CONFIG_ARCH_FPU
/* set fpu context */
arm64_init_fpu(tcb);
#endif
/* set initialize idle thread tcb and exception depth
* core 0, idle0
*/
write_sysreg(0, tpidrro_el0);
write_sysreg(tcb, tpidr_el1);
write_sysreg(tcb, tpidr_el0);
#ifdef CONFIG_STACK_COLORATION
/* If stack debug is enabled, then fill the stack with a
* recognizable value that we can use later to test for high
* water marks.
*/
arm64_stack_color(tcb->stack_alloc_ptr, 0);
#endif /* CONFIG_STACK_COLORATION */
return;
}
arm64_new_task(tcb);
}

View file

@ -0,0 +1,364 @@
/****************************************************************************
* arch/arm64/src/common/arm64_internal.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_INTERNAL_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_INTERNAL_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#ifndef __ASSEMBLY__
# include <nuttx/compiler.h>
# include <nuttx/arch.h>
# include <sys/types.h>
# include <stdint.h>
#endif
#include "arm64_arch.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Determine which (if any) console driver to use. If a console is enabled
* and no other console device is specified, then a serial console is
* assumed.
*/
#ifndef CONFIG_DEV_CONSOLE
# undef USE_SERIALDRIVER
# undef USE_EARLYSERIALINIT
#else
# if defined(CONFIG_LWL_CONSOLE)
# undef USE_SERIALDRIVER
# undef USE_EARLYSERIALINIT
# elif defined(CONFIG_CONSOLE_SYSLOG)
# undef USE_SERIALDRIVER
# undef USE_EARLYSERIALINIT
# else
# define USE_SERIALDRIVER 1
# define USE_EARLYSERIALINIT 1
# endif
#endif
/* If some other device is used as the console, then the serial driver may
* still be needed. Let's assume that if the upper half serial driver is
* built, then the lower half will also be needed. There is no need for
* the early serial initialization in this case.
*/
#if !defined(USE_SERIALDRIVER) && defined(CONFIG_STANDARD_SERIAL)
# define USE_SERIALDRIVER 1
#endif
/* Check if an interrupt stack size is configured */
#ifndef CONFIG_ARCH_INTERRUPTSTACK
# define CONFIG_ARCH_INTERRUPTSTACK 0
#endif
/* If the floating point unit is present and enabled, then save the
* floating point registers as well as normal ARM registers.
*/
#define arm64_savestate(regs) (regs = (uint64_t *)CURRENT_REGS)
#define arm64_restorestate(regs) (CURRENT_REGS = regs)
/* This is the value used to mark the stack for subsequent stack monitoring
* logic.
*/
#define STACK_COLOR 0xdeaddead
#define HEAP_COLOR 'h'
/****************************************************************************
* Public Types
****************************************************************************/
#ifndef __ASSEMBLY__
typedef void (*up_vector_t)(void);
#endif
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/* AArch64 the stack-pointer must be 128-bit aligned */
#define STACK_ALIGNMENT 16
/* Stack alignment macros */
#define STACK_ALIGN_MASK (STACK_ALIGNMENT - 1)
#define STACK_ALIGN_DOWN(a) ((a) & ~STACK_ALIGN_MASK)
#define STACK_ALIGN_UP(a) (((a) + STACK_ALIGN_MASK) & ~STACK_ALIGN_MASK)
#define INIT_STACK_DEFINE(sym, size) \
char locate_data(".initstack") \
aligned_data(STACK_ALIGNMENT) sym[size]
#define INIT_STACK_ARRAY_DEFINE(sym, n, size) \
char locate_data(".initstack") \
aligned_data(STACK_ALIGNMENT) sym[n][size]
#define INIT_STACK_DEFINE_EXTERN(sym, size) \
EXTERN char sym[size]
#define INIT_STACK_ARRAY_DEFINE_EXTERN(sym, n, size) \
EXTERN char sym[n][size]
#define STACK_PTR_TO_FRAME(type, ptr) \
(type *)((uintptr_t)(ptr) - sizeof(type))
#define INTSTACK_SIZE (CONFIG_ARCH_INTERRUPTSTACK & ~STACK_ALIGN_MASK)
#ifdef CONFIG_SMP
/* The size of interrupt and idle stack. This is the configured
* value aligned the 8-bytes as required by the ARM EABI.
*/
#define SMP_STACK_SIZE STACK_ALIGN_UP(CONFIG_IDLETHREAD_STACKSIZE)
INIT_STACK_ARRAY_DEFINE_EXTERN(g_cpu_idlestackalloc, CONFIG_SMP_NCPUS,
SMP_STACK_SIZE);
INIT_STACK_ARRAY_DEFINE_EXTERN(g_interrupt_stacks, CONFIG_SMP_NCPUS,
INTSTACK_SIZE);
uintptr_t arm64_intstack_alloc(void);
uintptr_t arm64_intstack_top(void);
#else
/* idle thread stack for primary core */
INIT_STACK_DEFINE_EXTERN(g_idle_stack, CONFIG_IDLETHREAD_STACKSIZE);
INIT_STACK_DEFINE_EXTERN(g_interrupt_stack, INTSTACK_SIZE);
#endif
/* This is the beginning of heap as provided from arm64_head.S.
* This is the first address in DRAM after the loaded
* program + bss + idle stack. The end of the heap is
* CONFIG_RAM_END
*/
/* Address of the saved user stack pointer */
#if CONFIG_ARCH_INTERRUPTSTACK > 3
EXTERN uint64_t g_intstackalloc; /* Allocated stack base */
EXTERN uint64_t g_intstacktop; /* Initial top of interrupt stack */
#else
# error CONFIG_ARCH_INTERRUPTSTACK must be defined (4096 at least) at arm64
#endif
/* These 'addresses' of these values are setup by the linker script. They
* are not actual uint64_t storage locations! They are only used
* meaningfully in the following way:
*
* - The linker script defines, for example, the symbol_sdata.
* - The declaration extern uint64_t _sdata; makes C happy. C will believe
* that the value _sdata is the address of a uint64_t variable _data
* (it is not!).
* - We can recover the linker value then by simply taking the address of
* of _data. like: uint64_t *pdata = &_sdata;
*
* Memory layout for Nuttx at arm64 for FLAT Build
*
* +-------------------------+ <- RAM BASE
* | |
* |<<<<<<<<<<<<<<<<<<<<<<<<<| <- LOAD_BASE
* | text(code) section |
* | vector table |
* +-------------------------+-- page align(4096)
* | rodata section |
* +-------------------------+-- page align(4096)
* | data/bss section |
* +-------------------------+-- page align(4096)
* | initstack section |
* +-------------------------+-- page align(4096)
* | heap alloc area |
* | |
* | |
* | |
* +-------------------------+
*
* Note:
* 1. initstack is for idle_thread and interrupt stack,
* it has dedicated stack for IRQ handler in arm64
* 2. Every section with different memory attribute,
* please check mmu_nxrt_regions at arm64_mmu.c
*
* please check dramboot.ld at specified platform for more detail
*/
EXTERN char _stext[]; /* Start of .text */
EXTERN char _etext[]; /* End of .text */
EXTERN char _sztext[]; /* Size of .text */
EXTERN char _srodata[]; /* Start of .rodata */
EXTERN char _erodata[]; /* End+1 of .rodata */
EXTERN char _szrodata[]; /* Size of .rodata */
EXTERN const char _eronly[]; /* End+1 of read only section (.text + .rodata) */
EXTERN char _sdata[]; /* Start of .data */
EXTERN char _edata[]; /* End+1 of .data */
EXTERN char _sbss[]; /* Start of .bss */
EXTERN char _ebss[]; /* End+1 of .bss */
EXTERN char _szdata[]; /* Size of data(.data + .bss) */
EXTERN char _e_initstack[]; /* End+1 of .initstack */
EXTERN char g_idle_topstack[]; /* End+1 of heap */
# define _START_TEXT _stext
# define _END_TEXT _etext
# define _START_BSS _sbss
# define _END_BSS _ebss
# define _DATA_INIT _eronly
# define _START_DATA _sdata
# define _END_DATA _edata
/****************************************************************************
* Inline Functions
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
void arm64_new_task(struct tcb_s *tak_new);
/* Low level initialization provided by chip logic */
void arm64_chip_boot(void);
int arm64_psci_init(const char *method);
void __start(void);
void arm64_secondary_start(void);
/* Context switching */
void arm64_fullcontextrestore(uint64_t *restoreregs) noreturn_function;
void arm64_switchcontext(uint64_t **saveregs, uint64_t *restoreregs);
void arm64_context_snapshot(void *savereg);
/* Signal handling **********************************************************/
void arm64_sigdeliver(void);
/* Power management *********************************************************/
#ifdef CONFIG_PM
void arm64_pminitialize(void);
#else
# define arm64_pminitialize()
#endif
/* Interrupt handling */
/* Exception handling logic unique to the Cortex-A and Cortex-R families
* (but should be back-ported to the ARM7 and ARM9 families).
*/
/* Interrupt acknowledge and dispatch */
uint64_t *arm64_doirq(int irq, uint64_t *regs);
/* Paging support */
#ifdef CONFIG_PAGING
void arm64_pginitialize(void);
#else /* CONFIG_PAGING */
# define arm64_pginitialize()
#endif /* CONFIG_PAGING */
uint64_t * arm64_syscall_switch(uint64_t *regs);
int arm64_syscall(uint64_t *regs);
#ifdef USE_SERIALDRIVER
void arm64_serialinit(void);
#endif
#ifdef USE_EARLYSERIALINIT
void arm64_earlyserialinit(void);
#endif
/* DMA */
#ifdef CONFIG_ARCH_DMA
void weak_function arm64_dma_initialize(void);
#endif
/* Memory management */
#if CONFIG_MM_REGIONS > 1
void arm64_addregion(void);
#else
# define arm64_addregion()
#endif
/* Networking */
/* Defined in board/xyz_network.c for board-specific Ethernet
* implementations, or chip/xyx_ethernet.c for chip-specific Ethernet
* implementations, or common/arm_etherstub.c for a corner case where the
* network is enabled yet there is no Ethernet driver to be initialized.
*
* Use of common/arm_etherstub.c is deprecated. The preferred mechanism is
* to use CONFIG_NETDEV_LATEINIT=y to suppress the call to
* arm_netinitialize() in up_initialize(). Then this stub would not be
* needed.
*/
#if defined(CONFIG_NET) && !defined(CONFIG_NETDEV_LATEINIT)
void arm64_netinitialize(void);
#else
# define arm64_netinitialize()
#endif
/* USB */
#ifdef CONFIG_USBDEV
void arm64_usbinitialize(void);
void arm64_usbuninitialize(void);
#else
# define arm64_usbinitialize()
# define arm64_usbuninitialize()
#endif
/* Debug */
#ifdef CONFIG_STACK_COLORATION
void arm64_stack_color(void *stackbase, size_t nbytes);
#endif
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_INTERNAL_H */

View file

@ -0,0 +1,82 @@
/****************************************************************************
* arch/arm64/src/common/macro.inc
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************
* DESCRIPTION
* macro define for arm64 assembler
*
***************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_MACRO_INC
#define __ARCH_ARM64_SRC_COMMON_ARM64_MACRO_INC
/*
* Get CPU id
*/
.macro get_cpu_id xreg0
mrs \xreg0, mpidr_el1
/* FIMXME: aff3 not taken into consideration */
ubfx \xreg0, \xreg0, #0, #24
.endm
.macro switch_el, xreg, el3_label, el2_label, el1_label
mrs \xreg, CurrentEL
cmp \xreg, 0xc
beq \el3_label
cmp \xreg, 0x8
beq \el2_label
cmp \xreg, 0x4
beq \el1_label
.endm
/*
* macro to support mov of immediate constant to 64 bit register
* It will generate instruction sequence of 'mov'/ 'movz' and one
* to three 'movk' depending on the immediate value.
*/
.macro mov_imm, xreg, imm
.if ((\imm) == 0)
mov \xreg, \imm
.else
.if (((\imm) >> 31) == 0 || ((\imm) >> 31) == 0x1ffffffff)
movz \xreg, (\imm >> 16) & 0xffff, lsl 16
.else
.if (((\imm) >> 47) == 0 || ((\imm) >> 47) == 0x1ffff)
movz \xreg, (\imm >> 32) & 0xffff, lsl 32
.else
movz \xreg, (\imm >> 48) & 0xffff, lsl 48
movk \xreg, (\imm >> 32) & 0xffff, lsl 32
.endif
movk \xreg, (\imm >> 16) & 0xffff, lsl 16
.endif
movk \xreg, (\imm) & 0xffff, lsl 0
.endif
.endm
#define GTEXT(sym) .global sym; .type sym, %function
#define PERFOPT_ALIGN .balign 4
#define SECTION_FUNC(sect, sym) \
.section .sect.sym, "ax"; \
PERFOPT_ALIGN; sym :
#define SECTION_SUBSEC_FUNC(sect, subsec, sym) \
.section .sect.subsec, "ax"; PERFOPT_ALIGN; sym :
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_MACRO_INC */

View file

@ -0,0 +1,622 @@
/***************************************************************************
* arch/arm64/src/common/arm64_mmu.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
***************************************************************************/
/***************************************************************************
* Included Files
***************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/chip/chip.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
#include "arm64_mmu.h"
/***************************************************************************
* Pre-processor Definitions
***************************************************************************/
/* MMU debug option
* #define CONFIG_MMU_ASSERT 1
* #define CONFIG_MMU_DEBUG 1
* #define CONFIG_MMU_DUMP_PTE 1
*/
#ifdef CONFIG_MMU_DEBUG
#define L0_SPACE ""
#define L1_SPACE " "
#define L2_SPACE " "
#define L3_SPACE " "
#define XLAT_TABLE_LEVEL_SPACE(level) \
(((level) == 0) ? L0_SPACE : \
((level) == 1) ? L1_SPACE : \
((level) == 2) ? L2_SPACE : L3_SPACE)
#endif
#ifdef CONFIG_MMU_ASSERT
#define __MMU_ASSERT(__cond, fmt, ...) \
do \
{ \
if (!(__cond)) \
{ \
trace_printf(fmt, ## __VA_ARGS__); \
PANIC(); \
} \
} \
while (false)
#else
#define __MMU_ASSERT(test, fmt, ...)
#endif
/* We support only 4kB translation granule */
#define PAGE_SIZE_SHIFT 12U
#define PAGE_SIZE (1U << PAGE_SIZE_SHIFT)
#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT /* Size of one
* complete table */
#define XLAT_TABLE_SIZE (1U << XLAT_TABLE_SIZE_SHIFT)
#define XLAT_TABLE_ENTRY_SIZE_SHIFT 3U /* Each table entry is 8 bytes */
#define XLAT_TABLE_LEVEL_MAX 3U
#define XLAT_TABLE_ENTRIES_SHIFT \
(XLAT_TABLE_SIZE_SHIFT - XLAT_TABLE_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (1U << XLAT_TABLE_ENTRIES_SHIFT)
/* Address size covered by each entry at given translation table level */
#define L3_XLAT_VA_SIZE_SHIFT PAGE_SIZE_SHIFT
#define L2_XLAT_VA_SIZE_SHIFT \
(L3_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L1_XLAT_VA_SIZE_SHIFT \
(L2_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L0_XLAT_VA_SIZE_SHIFT \
(L1_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define LEVEL_TO_VA_SIZE_SHIFT(level) \
(PAGE_SIZE_SHIFT + (XLAT_TABLE_ENTRIES_SHIFT * \
(XLAT_TABLE_LEVEL_MAX - (level))))
/* Virtual Address Index within given translation table level */
#define XLAT_TABLE_VA_IDX(va_addr, level) \
((va_addr >> LEVEL_TO_VA_SIZE_SHIFT(level)) & (XLAT_TABLE_ENTRIES - 1))
/* Calculate the initial translation table level from CONFIG_ARM64_VA_BITS
* For a 4 KB page size,
* (va_bits <= 21) - base level 3
* (22 <= va_bits <= 30) - base level 2
* (31 <= va_bits <= 39) - base level 1
* (40 <= va_bits <= 48) - base level 0
*/
#define GET_XLAT_TABLE_BASE_LEVEL(va_bits) \
((va_bits > L0_XLAT_VA_SIZE_SHIFT) \
? 0U \
: (va_bits > L1_XLAT_VA_SIZE_SHIFT) \
? 1U \
: (va_bits > L2_XLAT_VA_SIZE_SHIFT) \
? 2U : 3U)
#define XLAT_TABLE_BASE_LEVEL GET_XLAT_TABLE_BASE_LEVEL(CONFIG_ARM64_VA_BITS)
#define GET_NUM_BASE_LEVEL_ENTRIES(va_bits) \
(1U << (va_bits - LEVEL_TO_VA_SIZE_SHIFT(XLAT_TABLE_BASE_LEVEL)))
#define NUM_BASE_LEVEL_ENTRIES GET_NUM_BASE_LEVEL_ENTRIES( \
CONFIG_ARM64_VA_BITS)
static uint64_t base_xlat_table[NUM_BASE_LEVEL_ENTRIES] aligned_data(
NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
aligned_data(XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#if (CONFIG_ARM64_PA_BITS == 48)
#define TCR_PS_BITS TCR_PS_BITS_256TB
#elif (CONFIG_ARM64_PA_BITS == 44)
#define TCR_PS_BITS TCR_PS_BITS_16TB
#elif (CONFIG_ARM64_PA_BITS == 42)
#define TCR_PS_BITS TCR_PS_BITS_4TB
#elif (CONFIG_ARM64_PA_BITS == 40)
#define TCR_PS_BITS TCR_PS_BITS_1TB
#elif (CONFIG_ARM64_PA_BITS == 36)
#define TCR_PS_BITS TCR_PS_BITS_64GB
#else
#define TCR_PS_BITS TCR_PS_BITS_4GB
#endif
/***************************************************************************
* Private Data
***************************************************************************/
/* NuttX RTOS execution regions with appropriate attributes */
static const struct arm_mmu_region mmu_nxrt_regions[] =
{
/* Mark text segment cacheable,read only and executable */
MMU_REGION_FLAT_ENTRY("nx_code",
(uint64_t)_stext,
(uint64_t)_sztext,
MT_CODE | MT_SECURE),
/* Mark rodata segment cacheable, read only and execute-never */
MMU_REGION_FLAT_ENTRY("nx_rodata",
(uint64_t)_srodata,
(uint64_t)_szrodata,
MT_RODATA | MT_SECURE),
/* Mark rest of the mirtos execution regions (data, bss, noinit, etc.)
* cacheable, read-write
* Note: read-write region is marked execute-ever internally
*/
MMU_REGION_FLAT_ENTRY("nx_data",
(uint64_t)_sdata,
(uint64_t)_szdata,
MT_NORMAL | MT_RW | MT_SECURE),
};
/***************************************************************************
* Private Functions
***************************************************************************/
/* Translation table control register settings */
static uint64_t get_tcr(int el)
{
uint64_t tcr;
uint64_t va_bits = CONFIG_ARM64_VA_BITS;
uint64_t tcr_ps_bits;
tcr_ps_bits = TCR_PS_BITS;
if (el == 1)
{
tcr = (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
/* TCR_EL1.EPD1: Disable translation table walk for addresses
* that are translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_DISABLE;
}
else
{
tcr = (tcr_ps_bits << TCR_EL3_PS_SHIFT);
}
tcr |= TCR_T0SZ(va_bits);
/* Translation table walk is cacheable, inner/outer WBWA and
* inner shareable
*/
tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
return tcr;
}
static int pte_desc_type(uint64_t *pte)
{
return *pte & PTE_DESC_TYPE_MASK;
}
static uint64_t *calculate_pte_index(uint64_t addr, int level)
{
int base_level = XLAT_TABLE_BASE_LEVEL;
uint64_t *pte;
uint64_t idx;
unsigned int i;
/* Walk through all translation tables to find pte index */
pte = (uint64_t *)base_xlat_table;
for (i = base_level; i <= XLAT_TABLE_LEVEL_MAX; i++)
{
idx = XLAT_TABLE_VA_IDX(addr, i);
pte += idx;
/* Found pte index */
if (i == level)
{
return pte;
}
/* if PTE is not table desc, can't traverse */
if (pte_desc_type(pte) != PTE_TABLE_DESC)
{
return NULL;
}
/* Move to the next translation table level */
pte = (uint64_t *)(*pte & 0x0000fffffffff000);
}
return NULL;
}
static void set_pte_table_desc(uint64_t *pte, uint64_t *table,
unsigned int level)
{
#ifdef CONFIG_MMU_DEBUG
sinfo("%s", XLAT_TABLE_LEVEL_SPACE(level));
sinfo("%p: [Table] %p\n", pte, table);
#endif
/* Point pte to new table */
*pte = PTE_TABLE_DESC | (uint64_t)table;
}
static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
unsigned int attrs, unsigned int level)
{
uint64_t desc = addr_pa;
unsigned int mem_type;
desc |= (level == 3) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
/* NS bit for security memory access from secure state */
desc |= (attrs & MT_NS) ? PTE_BLOCK_DESC_NS : 0;
/* AP bits for Data access permission */
desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO;
/* the access flag */
desc |= PTE_BLOCK_DESC_AF;
/* memory attribute index field */
mem_type = MT_TYPE(attrs);
desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type);
switch (mem_type)
{
case MT_DEVICE_NGNRNE:
case MT_DEVICE_NGNRE:
case MT_DEVICE_GRE:
{
/* Access to Device memory and non-cacheable memory are coherent
* for all observers in the system and are treated as
* Outer shareable, so, for these 2 types of memory,
* it is not strictly needed to set shareability field
*/
desc |= PTE_BLOCK_DESC_OUTER_SHARE;
/* Map device memory as execute-never */
desc |= PTE_BLOCK_DESC_PXN;
desc |= PTE_BLOCK_DESC_UXN;
break;
}
case MT_NORMAL_NC:
case MT_NORMAL:
{
/* Make Normal RW memory as execute never */
if ((attrs & MT_RW) || (attrs & MT_EXECUTE_NEVER))
{
desc |= PTE_BLOCK_DESC_PXN;
}
if (mem_type == MT_NORMAL)
{
desc |= PTE_BLOCK_DESC_INNER_SHARE;
}
else
{
desc |= PTE_BLOCK_DESC_OUTER_SHARE;
}
}
}
#if defined(CONFIG_MMU_DEBUG) && defined(CONFIG_MMU_DUMP_PTE)
sinfo("%s ", XLAT_TABLE_LEVEL_SPACE(level));
sinfo("%p: ", pte);
sinfo("%s ",
(mem_type ==
MT_NORMAL) ? "MEM" :((mem_type == MT_NORMAL_NC) ? "NC" : "DEV"));
sinfo("%s ", (attrs & MT_RW) ? "-RW" : "-RO");
sinfo("%s ", (attrs & MT_NS) ? "-NS" : "-S");
sinfo("%s ", (attrs & MT_EXECUTE_NEVER) ? "-XN" : "-EXEC");
sinfo("\n");
#endif
*pte = desc;
}
/* Returns a new reallocated table */
static uint64_t *new_prealloc_table(void)
{
static unsigned int table_idx;
__MMU_ASSERT(table_idx < CONFIG_MAX_XLAT_TABLES,
"Enough xlat tables not allocated");
return (uint64_t *)(xlat_tables[table_idx++]);
}
/* Splits a block into table with entries spanning the old block */
static void split_pte_block_desc(uint64_t *pte, int level)
{
uint64_t old_block_desc = *pte;
uint64_t *new_table;
unsigned int i = 0;
/* get address size shift bits for next level */
int levelshift = LEVEL_TO_VA_SIZE_SHIFT(level + 1);
#ifdef CONFIG_MMU_DEBUG
sinfo("Splitting existing PTE %p(L%d)\n", pte, level);
#endif
new_table = new_prealloc_table();
for (i = 0; i < XLAT_TABLE_ENTRIES; i++)
{
new_table[i] = old_block_desc | (i << levelshift);
if (level == 2)
{
new_table[i] |= PTE_PAGE_DESC;
}
}
/* Overwrite existing PTE set the new table into effect */
set_pte_table_desc(pte, new_table, level);
}
/* Create/Populate translation table(s) for given region */
static void init_xlat_tables(const struct arm_mmu_region *region)
{
uint64_t *pte;
uint64_t virt = region->base_va;
uint64_t phys = region->base_pa;
uint64_t size = region->size;
uint64_t attrs = region->attrs;
uint64_t level_size;
uint64_t *new_table;
unsigned int level = XLAT_TABLE_BASE_LEVEL;
#ifdef CONFIG_MMU_DEBUG
sinfo("mmap: virt %llx phys %llx size %llx\n", virt, phys, size);
#endif
/* check minimum alignment requirement for given mmap region */
__MMU_ASSERT(((virt & (PAGE_SIZE - 1)) == 0) &&
((size & (PAGE_SIZE - 1)) == 0),
"address/size are not page aligned\n");
while (size)
{
__MMU_ASSERT(level <= XLAT_TABLE_LEVEL_MAX,
"max translation table level exceeded\n");
/* Locate PTE for given virtual address and page table level */
pte = calculate_pte_index(virt, level);
__MMU_ASSERT(pte != NULL, "pte not found\n");
level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level);
if (size >= level_size && !(virt & (level_size - 1)))
{
/* Given range fits into level size,
* create block/page descriptor
*/
set_pte_block_desc(pte, phys, attrs, level);
virt += level_size;
phys += level_size;
size -= level_size;
/* Range is mapped, start again for next range */
level = XLAT_TABLE_BASE_LEVEL;
}
else if (pte_desc_type(pte) == PTE_INVALID_DESC)
{
/* Range doesn't fit, create subtable */
new_table = new_prealloc_table();
set_pte_table_desc(pte, new_table, level);
level++;
}
else if (pte_desc_type(pte) == PTE_BLOCK_DESC)
{
split_pte_block_desc(pte, level);
level++;
}
else if (pte_desc_type(pte) == PTE_TABLE_DESC)
{
level++;
}
}
}
static void setup_page_tables(void)
{
unsigned int index;
const struct arm_mmu_region *region;
uint64_t max_va = 0, max_pa = 0;
for (index = 0; index < mmu_config.num_regions; index++)
{
region = &mmu_config.mmu_regions[index];
max_va = MAX(max_va, region->base_va + region->size);
max_pa = MAX(max_pa, region->base_pa + region->size);
}
__MMU_ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS),
"Maximum VA not supported\n");
__MMU_ASSERT(max_pa <= (1ULL << CONFIG_ARM64_PA_BITS),
"Maximum PA not supported\n");
/* create translation tables for user provided platform regions */
for (index = 0; index < mmu_config.num_regions; index++)
{
region = &mmu_config.mmu_regions[index];
if (region->size || region->attrs)
{
init_xlat_tables(region);
}
}
/* setup translation table for mirtos execution regions */
for (index = 0; index < ARRAY_SIZE(mmu_nxrt_regions); index++)
{
region = &mmu_nxrt_regions[index];
if (region->size || region->attrs)
{
init_xlat_tables(region);
}
}
}
static void enable_mmu_el1(unsigned int flags)
{
uint64_t value;
UNUSED(flags);
/* Set MAIR, TCR and TBBR registers */
write_sysreg(MEMORY_ATTRIBUTES, mair_el1);
write_sysreg(get_tcr(1), tcr_el1);
write_sysreg(((uint64_t)base_xlat_table), ttbr0_el1);
/* Ensure these changes are seen before MMU is enabled */
ARM64_ISB();
/* Enable the MMU and data cache */
value = read_sysreg(sctlr_el1);
write_sysreg((value | SCTLR_M_BIT | SCTLR_C_BIT), sctlr_el1);
/* Ensure the MMU enable takes effect immediately */
ARM64_ISB();
#ifdef CONFIG_MMU_DEBUG
sinfo("MMU enabled with dcache\n");
#endif
}
/***************************************************************************
* Public Functions
***************************************************************************/
int arm_mmu_set_memregion(const struct arm_mmu_region *region)
{
uint64_t virt = region->base_va;
uint64_t size = region->size;
if (((virt & (PAGE_SIZE - 1)) == 0) &&
((size & (PAGE_SIZE - 1)) == 0))
{
init_xlat_tables(region);
}
else
{
sinfo("address/size are not page aligned\n");
return -EINVAL;
}
return 0;
}
/* MMU default configuration
*
* This function provides the default configuration mechanism for the Memory
* Management Unit (MMU).
*/
int arm64_mmu_init(bool is_primary_core)
{
uint64_t val;
unsigned flags = 0;
uint64_t ctr_el0;
uint32_t dminline;
/* Current MMU code supports only EL1 */
__asm__ volatile ("mrs %0, CurrentEL" : "=r" (val));
__MMU_ASSERT(GET_EL(val) == MODE_EL1,
"Exception level not EL1, MMU not enabled!\n");
/* Ensure that MMU is already not enabled */
__asm__ volatile ("mrs %0, sctlr_el1" : "=r" (val));
__MMU_ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\n");
#ifdef CONFIG_MMU_DEBUG
sinfo("xlat tables:\n");
sinfo("base table(L%d): %p, %d entries\n", XLAT_TABLE_BASE_LEVEL,
(uint64_t *)base_xlat_table, NUM_BASE_LEVEL_ENTRIES);
for (int idx = 0; idx < CONFIG_MAX_XLAT_TABLES; idx++)
{
sinfo("%d: %p\n", idx, (uint64_t *)(xlat_tables + idx));
}
#endif
if (is_primary_core)
{
setup_page_tables();
}
/* currently only EL1 is supported */
enable_mmu_el1(flags);
/* get cache line size */
ctr_el0 = read_sysreg(CTR_EL0);
dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
dcache_line_size = 4 << dminline;
return 0;
}

View file

@ -0,0 +1,254 @@
/****************************************************************************
* arch/arm64/src/common/arm64_mmu.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_MMU_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_MMU_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_arch.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Following Memory types supported through MAIR encodings can be passed
* by user through "attrs"(attributes) field of specified memory region.
* As MAIR supports such 8 encodings, we will reserve attrs[2:0];
* so that we can provide encodings upto 7 if needed in future.
*
* See Arm® Architecture Reference Manual, ARM DDI 0487E.a, B2.7.2
*/
#define MT_TYPE_MASK 0x7U
#define MT_TYPE(attr) ((attr) & MT_TYPE_MASK)
#define MT_DEVICE_NGNRNE 0U
#define MT_DEVICE_NGNRE 1U
#define MT_DEVICE_GRE 2U
#define MT_NORMAL_NC 3U
#define MT_NORMAL 4U
#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_NGNRNE * 8)) | \
(0x04 << (MT_DEVICE_NGNRE * 8)) | \
(0x0c << (MT_DEVICE_GRE * 8)) | \
(0x44 << (MT_NORMAL_NC * 8)) | \
(0xffUL << (MT_NORMAL * 8)))
/* More flags from user's perpective are supported using remaining bits
* of "attrs" field, i.e. attrs[31:3], underlying code will take care
* of setting PTE fields correctly.
*
* current usage of attrs[31:3] is:
* attrs[3] : Access Permissions
* attrs[4] : Memory access from secure/ns state
* attrs[5] : Execute Permissions
*
* See Arm® Architecture Reference Manual, ARM DDI 0487E.a
*/
#define MT_PERM_SHIFT 3U
#define MT_SEC_SHIFT 4U
#define MT_EXECUTE_SHIFT 5U
#define MT_RO (0U << MT_PERM_SHIFT)
#define MT_RW (1U << MT_PERM_SHIFT)
#define MT_SECURE (0U << MT_SEC_SHIFT)
#define MT_NS (1U << MT_SEC_SHIFT)
#define MT_EXECUTE (0U << MT_EXECUTE_SHIFT)
#define MT_EXECUTE_NEVER (1U << MT_EXECUTE_SHIFT)
/* Some compound attributes for most common usages */
#define MT_CODE (MT_NORMAL | MT_RO | MT_EXECUTE)
#define MT_RODATA (MT_NORMAL | MT_RO | MT_EXECUTE_NEVER)
/* PTE descriptor can be Block descriptor or Table descriptor
* or Page descriptor.
*/
#define PTE_DESC_TYPE_MASK 3U
#define PTE_BLOCK_DESC 1U
#define PTE_TABLE_DESC 3U
#define PTE_PAGE_DESC 3U
#define PTE_INVALID_DESC 0U
/* Block and Page descriptor attributes fields */
#define PTE_BLOCK_DESC_MEMTYPE(x) ((x) << 2)
#define PTE_BLOCK_DESC_NS (1ULL << 5)
#define PTE_BLOCK_DESC_AP_RO (1ULL << 7)
#define PTE_BLOCK_DESC_AP_RW (0ULL << 7)
#define PTE_BLOCK_DESC_NON_SHARE (0ULL << 8)
#define PTE_BLOCK_DESC_OUTER_SHARE (2ULL << 8)
#define PTE_BLOCK_DESC_INNER_SHARE (3ULL << 8)
#define PTE_BLOCK_DESC_AF (1ULL << 10)
#define PTE_BLOCK_DESC_NG (1ULL << 11)
#define PTE_BLOCK_DESC_PXN (1ULL << 53)
#define PTE_BLOCK_DESC_UXN (1ULL << 54)
/* TCR definitions.
*
* See Arm® Architecture Reference Manual, ARM DDI 0487E.a D13.2.112~114
*
*/
#define TCR_EL1_IPS_SHIFT 32U
#define TCR_EL2_PS_SHIFT 16U
#define TCR_EL3_PS_SHIFT 16U
#define TCR_T0SZ_SHIFT 0U
#define TCR_T0SZ(x) ((64 - (x)) << TCR_T0SZ_SHIFT)
#define TCR_IRGN_NC (0ULL << 8)
#define TCR_IRGN_WBWA (1ULL << 8)
#define TCR_IRGN_WT (2ULL << 8)
#define TCR_IRGN_WBNWA (3ULL << 8)
#define TCR_IRGN_MASK (3ULL << 8)
#define TCR_ORGN_NC (0ULL << 10)
#define TCR_ORGN_WBWA (1ULL << 10)
#define TCR_ORGN_WT (2ULL << 10)
#define TCR_ORGN_WBNWA (3ULL << 10)
#define TCR_ORGN_MASK (3ULL << 10)
#define TCR_SHARED_NON (0ULL << 12)
#define TCR_SHARED_OUTER (2ULL << 12)
#define TCR_SHARED_INNER (3ULL << 12)
#define TCR_TG0_4K (0ULL << 14)
#define TCR_TG0_64K (1ULL << 14)
#define TCR_TG0_16K (2ULL << 14)
#define TCR_EPD1_DISABLE (1ULL << 23)
#define TCR_PS_BITS_4GB 0x0ULL
#define TCR_PS_BITS_64GB 0x1ULL
#define TCR_PS_BITS_1TB 0x2ULL
#define TCR_PS_BITS_4TB 0x3ULL
#define TCR_PS_BITS_16TB 0x4ULL
#define TCR_PS_BITS_256TB 0x5ULL
#define CTR_EL0_DMINLINE_SHIFT 16
#define CTR_EL0_DMINLINE_MASK BIT_MASK(4)
#define CTR_EL0_CWG_SHIFT 24
#define CTR_EL0_CWG_MASK BIT_MASK(4)
/* clidr_el1 */
#define CLIDR_EL1_LOC_SHIFT 24
#define CLIDR_EL1_LOC_MASK BIT_MASK(3)
#define CLIDR_EL1_CTYPE_SHIFT(level) ((level) * 3)
#define CLIDR_EL1_CTYPE_MASK BIT_MASK(3)
/* ccsidr_el1 */
#define CCSIDR_EL1_LN_SZ_SHIFT 0
#define CCSIDR_EL1_LN_SZ_MASK BIT_MASK(3)
#define CCSIDR_EL1_WAYS_SHIFT 3
#define CCSIDR_EL1_WAYS_MASK BIT_MASK(10)
#define CCSIDR_EL1_SETS_SHIFT 13
#define CCSIDR_EL1_SETS_MASK BIT_MASK(15)
/* Convenience macros to represent the ARMv8-A-specific
* configuration for memory access permission and
* cache-ability attribution.
*/
#define MMU_REGION_ENTRY(_name, _base_pa, _base_va, _size, _attrs) \
{ \
.name = (_name), \
.base_pa = (_base_pa), \
.base_va = (_base_va), \
.size = (_size), \
.attrs = (_attrs), \
}
#define MMU_REGION_FLAT_ENTRY(name, adr, sz, attrs) \
MMU_REGION_ENTRY(name, adr, adr, sz, attrs)
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Types
****************************************************************************/
/* Region definition data structure */
struct arm_mmu_region
{
/* Region Base Physical Address */
uint64_t base_pa;
/* Region Base Virtual Address */
uint64_t base_va;
/* Region size */
uint64_t size;
/* Region Name */
const char *name;
/* Region Attributes */
unsigned int attrs;
};
/* MMU configuration data structure */
struct arm_mmu_config
{
/* Number of regions */
uint32_t num_regions;
/* Regions */
const struct arm_mmu_region *mmu_regions;
};
struct arm_mmu_ptables
{
uint64_t *base_xlat_table;
};
/* Reference to the MMU configuration.
*
* This struct is defined and populated for each SoC,
* and holds the build-time configuration information for the fixed MMU
* regions enabled during kernel initialization.
*/
extern const struct arm_mmu_config mmu_config;
extern size_t dcache_line_size;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
int arm64_mmu_init(bool is_primary_core);
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_MMU_H */

View file

@ -0,0 +1,125 @@
/****************************************************************************
* arch/arm64/src/common/arm64_releasepending.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "group/group.h"
#include "arm64_internal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_release_pending
*
* Description:
* Release and ready-to-run tasks that have collected in the pending task
* list. This can call a context switch if a new task is placed at the
* head of the ready to run list.
*
****************************************************************************/
void up_release_pending(void)
{
struct tcb_s *rtcb = this_task();
/* Merge the g_pendingtasks list into the ready-to-run task list */
if (nxsched_merge_pending())
{
/* The currently active task has changed! We will need to
* switch contexts.
*/
/* Update scheduler parameters */
nxsched_suspend_scheduler(rtcb);
/* Are we operating in interrupt context? */
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
arm64_savestate(rtcb->xcp.regs);
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts. Any necessary address environment
* changes will be made when the interrupt returns.
*/
arm64_restorestate(rtcb->xcp.regs);
}
/* No, then we will need to perform the user context switch */
else
{
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm64_switchcontext(&rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View file

@ -0,0 +1,114 @@
/****************************************************************************
* arch/arm64/src/common/arm64_releasestack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/tls.h>
#include <nuttx/board.h>
#include <arch/irq.h>
#include "arm64_internal.h"
#include "arm64_fatal.h"
/****************************************************************************
* Pre-processor Macros
****************************************************************************/
/****************************************************************************
* Private Types
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_release_stack
*
* Description:
* A task has been stopped. Free all stack related resources retained in
* the defunct TCB.
*
* Input Parameters:
* - dtcb: The TCB containing information about the stack to be released
* - ttype: The thread type. This may be one of following (defined in
* include/nuttx/sched.h):
*
* TCB_FLAG_TTYPE_TASK Normal user task
* TCB_FLAG_TTYPE_PTHREAD User pthread
* TCB_FLAG_TTYPE_KERNEL Kernel thread
*
* This thread type is normally available in the flags field of the TCB,
* however, there are certain error recovery contexts where the TCB may
* not be fully initialized when up_release_stack is called.
*
* If either CONFIG_BUILD_PROTECTED or CONFIG_BUILD_KERNEL are defined,
* then this thread type may affect how the stack is freed. For example,
* kernel thread stacks may have been allocated from protected kernel
* memory. Stacks for user tasks and threads must have come from memory
* that is accessible to user code.
*
* Returned Value:
* None
*
****************************************************************************/
void up_release_stack(struct tcb_s *dtcb, uint8_t ttype)
{
/* Is there a stack allocated? */
if (dtcb->stack_alloc_ptr && (dtcb->flags & TCB_FLAG_FREE_STACK))
{
#ifdef CONFIG_MM_KERNEL_HEAP
/* Use the kernel allocator if this is a kernel thread */
if (ttype == TCB_FLAG_TTYPE_KERNEL)
{
kmm_free(dtcb->stack_alloc_ptr);
}
else
#endif
{
/* Use the user-space allocator if this is a task or pthread */
kumm_free(dtcb->stack_alloc_ptr);
}
}
/* Mark the stack freed */
dtcb->flags &= ~TCB_FLAG_FREE_STACK;
dtcb->stack_alloc_ptr = NULL;
dtcb->stack_base_ptr = NULL;
dtcb->adj_stack_size = 0;
}

View file

@ -0,0 +1,182 @@
/****************************************************************************
* arch/arm64/src/common/arm64_reprioritizertr.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdbool.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "group/group.h"
#include "arm64_internal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_reprioritize_rtr
*
* Description:
* Called when the priority of a running or
* ready-to-run task changes and the reprioritization will
* cause a context switch. Two cases:
*
* 1) The priority of the currently running task drops and the next
* task in the ready to run list has priority.
* 2) An idle, ready to run task's priority has been raised above the
* priority of the current, running task and it now has the priority.
*
* Input Parameters:
* tcb: The TCB of the task that has been reprioritized
* priority: The new task priority
*
****************************************************************************/
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
|| priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
|| priority > SCHED_PRIORITY_MAX
#endif
)
{
DEBUGPANIC();
}
else
{
struct tcb_s *rtcb = this_task();
bool switch_needed;
/* Remove the tcb task from the ready-to-run list.
* nxsched_remove_readytorun will return true if we just
* remove the head of the ready to run list.
*/
switch_needed = nxsched_remove_readytorun(tcb);
/* Setup up the new task priority */
tcb->sched_priority = (uint8_t)priority;
/* Return the task to the specified blocked task list.
* nxsched_add_readytorun will return true if the task was
* added to the new list. We will need to perform a context
* switch only if the EXCLUSIVE or of the two calls is non-zero
* (i.e., one and only one the calls changes the head of the
* ready-to-run list).
*/
switch_needed ^= nxsched_add_readytorun(tcb);
/* Now, perform the context switch if one is needed */
if (switch_needed)
{
/* If we are going to do a context switch, then now is the right
* time to add any pending tasks back into the ready-to-run list.
*/
if (g_pendingtasks.head)
{
nxsched_merge_pending();
}
/* Update scheduler parameters */
nxsched_suspend_scheduler(rtcb);
/* Are we in an interrupt handler? */
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
arm64_savestate(rtcb->xcp.regs);
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts. Any necessary address environment
* changes will be made when the interrupt returns.
*/
arm64_restorestate(rtcb->xcp.regs);
}
/* No, then we will need to perform the user context switch */
else
{
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm64_switchcontext(&rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the
* blocked task is again ready to run and has execution
* priority.
*/
}
}
}
}

View file

@ -0,0 +1,361 @@
/****************************************************************************
* arch/arm64/src/common/arm64_schedulesigaction.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include "sched/sched.h"
#include "arm64_internal.h"
#include "arm64_arch.h"
#include "irq/irq.h"
#include "arm64_fatal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
void arm64_init_signal_process(struct tcb_s *tcb)
{
struct regs_context *pctx = (struct regs_context *)tcb->xcp.regs;
struct regs_context *psigctx;
char *stack_ptr = (char *)pctx->sp_elx;
#ifdef CONFIG_ARCH_FPU
struct fpu_reg *pfpuctx;
pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr);
tcb->xcp.fpu_regs = (uint64_t *)pfpuctx;
/* set fpu context */
arm64_init_fpu(tcb);
stack_ptr = (char *)pfpuctx;
#endif
psigctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr);
memset(psigctx, 0, sizeof(struct regs_context));
psigctx->elr = (uint64_t)arm64_sigdeliver;
/* Keep using SP_EL1 */
psigctx->spsr = SPSR_MODE_EL1H | DAIF_FIQ_BIT | DAIF_IRQ_BIT;
psigctx->sp_elx = (uint64_t)psigctx;
psigctx->sp_el0 = (uint64_t)psigctx;
psigctx->exe_depth = 1;
psigctx->tpidr_el0 = (uint64_t)tcb;
psigctx->tpidr_el1 = (uint64_t)tcb;
tcb->xcp.regs = (uint64_t *)psigctx;
}
/****************************************************************************
* Name: up_schedule_sigaction
*
* Description:
* This function is called by the OS when one or more
* signal handling actions have been queued for execution.
* The architecture specific code must configure things so
* that the 'sigdeliver' callback is executed on the thread
* specified by 'tcb' as soon as possible.
*
* This function may be called from interrupt handling logic.
*
* This operation should not cause the task to be unblocked
* nor should it cause any immediate execution of sigdeliver.
* Typically, a few cases need to be considered:
*
* (1) This function may be called from an interrupt handler
* During interrupt processing, all xcptcontext structures
* should be valid for all tasks. That structure should
* be modified to invoke sigdeliver() either on return
* from (this) interrupt or on some subsequent context
* switch to the recipient task.
* (2) If not in an interrupt handler and the tcb is NOT
* the currently executing task, then again just modify
* the saved xcptcontext structure for the recipient
* task so it will invoke sigdeliver when that task is
* later resumed.
* (3) If not in an interrupt handler and the tcb IS the
* currently executing task -- just call the signal
* handler now.
*
* Assumptions:
* Called from critical section
*
****************************************************************************/
#ifndef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
if (!tcb->xcp.sigdeliver)
{
/* First, handle some special cases when the signal is being delivered
* to task that is currently executing on this CPU.
*/
if (tcb == this_task())
{
/* CASE 1: We are not in an interrupt handler and a task is
* signaling itself for some reason.
*/
if (!CURRENT_REGS)
{
/* In this case just deliver the signal now.
* REVISIT: Signal handler will run in a critical section!
*/
sigdeliver(tcb);
}
/* CASE 2: We are in an interrupt handler AND the interrupted
* task is the same as the one that must receive the signal, then
* we will have to modify the return state as well as the state
* in the TCB.
*
* Hmmm... there looks like a latent bug here: The following logic
* would fail in the strange case where we are in an interrupt
* handler, the thread is signaling itself, but a context switch
* to another task has occurred so that CURRENT_REGS does not
* refer to the thread of this_task()!
*/
else
{
/* Save the return lr and cpsr and one scratch register
* These will be restored by the signal trampoline after
* the signals have been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
/* create signal process context */
tcb->xcp.saved_reg = tcb->xcp.regs;
#ifdef CONFIG_ARCH_FPU
tcb->xcp.saved_fpu_regs = tcb->xcp.fpu_regs;
#endif
arm64_init_signal_process(tcb);
/* trigger switch to signal process */
CURRENT_REGS = tcb->xcp.regs;
}
}
/* Otherwise, we are (1) signaling a task is not running from an
* interrupt handler or (2) we are not in an interrupt handler and the
* running task is signaling some other non-running task.
*/
else
{
/* Save the return lr and cpsr and one scratch register. These
* will be restored by the signal trampoline after the signals
* have been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
#ifdef CONFIG_ARCH_FPU
tcb->xcp.saved_fpu_regs = tcb->xcp.fpu_regs;
#endif
/* create signal process context */
tcb->xcp.saved_reg = tcb->xcp.regs;
arm64_init_signal_process(tcb);
}
}
}
#endif /* !CONFIG_SMP */
#ifdef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
int cpu;
int me;
sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver);
/* Refuse to handle nested signal actions */
if (!tcb->xcp.sigdeliver)
{
/* First, handle some special cases when the signal is being delivered
* to task that is currently executing on any CPU.
*/
sinfo("rtcb=%p CURRENT_REGS=%p\n", this_task(), CURRENT_REGS);
if (tcb->task_state == TSTATE_TASK_RUNNING)
{
me = this_cpu();
cpu = tcb->cpu;
/* CASE 1: We are not in an interrupt handler and a task is
* signaling itself for some reason.
*/
if (cpu == me && !CURRENT_REGS)
{
/* In this case just deliver the signal now.
* REVISIT: Signal handler will run in a critical section!
*/
sigdeliver(tcb);
}
/* CASE 2: The task that needs to receive the signal is running.
* This could happen if the task is running on another CPU OR if
* we are in an interrupt handler and the task is running on this
* CPU. In the former case, we will have to PAUSE the other CPU
* first. But in either case, we will have to modify the return
* state as well as the state in the TCB.
*/
else
{
/* If we signaling a task running on the other CPU, we have
* to PAUSE the other CPU.
*/
if (cpu != me)
{
/* Pause the CPU */
up_cpu_pause(cpu);
/* Wait while the pause request is pending */
while (up_cpu_pausereq(cpu))
{
}
/* Now tcb on the other CPU can be accessed safely */
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be
* restored by the signal trampoline after the signal has
* been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
#ifdef CONFIG_ARCH_FPU
tcb->xcp.saved_fpu_regs = tcb->xcp.fpu_regs;
#endif
/* create signal process context */
tcb->xcp.saved_reg = tcb->xcp.regs;
arm64_init_signal_process(tcb);
}
else
{
/* tcb is running on the same CPU */
/* Save the return PC, CPSR and either the BASEPRI or
* PRIMASK registers (and perhaps also the LR). These will
* be restored by the signal trampoline after the signal
* has been delivered.
*/
tcb->xcp.sigdeliver = (void *)sigdeliver;
/* create signal process context */
tcb->xcp.saved_reg = tcb->xcp.regs;
#ifdef CONFIG_ARCH_FPU
tcb->xcp.sig_save_fpu_regs = tcb->xcp.fpu_regs;
#endif
arm64_init_signal_process(tcb);
/* trigger switch to signal process */
CURRENT_REGS = tcb->xcp.regs;
}
/* Increment the IRQ lock count so that when the task is
* restarted, it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
/* NOTE: If the task runs on another CPU(cpu), adjusting
* global IRQ controls will be done in the pause handler
* on the CPU(cpu) by taking a critical section.
* If the task is scheduled on this CPU(me), do nothing
* because this CPU already took a critical section
*/
/* RESUME the other CPU if it was PAUSED */
if (cpu != me)
{
up_cpu_resume(cpu);
}
}
}
/* Otherwise, we are (1) signaling a task is not running from an
* interrupt handler or (2) we are not in an interrupt handler and the
* running task is signaling some other non-running task.
*/
else
{
/* Save the return lr and cpsr and one scratch register. These
* will be restored by the signal trampoline after the signals
* have been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
#ifdef CONFIG_ARCH_FPU
tcb->xcp.sig_save_fpu_regs = tcb->xcp.fpu_regs;
#endif
tcb->xcp.saved_reg = tcb->xcp.regs;
/* create signal process context */
arm64_init_signal_process(tcb);
/* Increment the IRQ lock count so that when the task is restarted,
* it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
}
}
}
#endif /* CONFIG_SMP */

View file

@ -0,0 +1,159 @@
/****************************************************************************
* arch/arm64/src/common/arm64_sigdeliver.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include "sched/sched.h"
#include "arm64_internal.h"
#include "arm64_arch.h"
#include "irq/irq.h"
#include "arm64_fatal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_sigdeliver
*
* Description:
* This is the a signal handling trampoline. When a signal action was
* posted. The task context was mucked with and forced to branch to this
* location with interrupts disabled.
*
****************************************************************************/
void arm64_sigdeliver(void)
{
struct tcb_s *rtcb = this_task();
#ifdef CONFIG_SMP
/* In the SMP case, we must terminate the critical section while the signal
* handler executes, but we also need to restore the irqcount when the
* we resume the main thread of the task.
*/
irqstate_t flags;
int16_t saved_irqcount;
struct regs_context *pctx =
(struct regs_context *)rtcb->xcp.saved_reg;
flags = (pctx->spsr & SPSR_DAIF_MASK);
#endif
sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
DEBUGASSERT(rtcb->xcp.sigdeliver != NULL);
#ifdef CONFIG_SMP
/* In the SMP case, up_schedule_sigaction(0) will have incremented
* 'irqcount' in order to force us into a critical section. Save the
* pre-incremented irqcount.
*/
saved_irqcount = rtcb->irqcount - 1;
DEBUGASSERT(saved_irqcount >= 0);
/* Now we need call leave_critical_section() repeatedly to get the irqcount
* to zero, freeing all global spinlocks that enforce the critical section.
*/
do
{
leave_critical_section(flags);
}
while (rtcb->irqcount > 0);
#endif /* CONFIG_SMP */
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* Then make sure that interrupts are enabled. Signal handlers must always
* run with interrupts enabled.
*/
up_irq_enable();
#endif
/* Deliver the signal */
((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
/* Output any debug messages BEFORE restoring errno (because they may
* alter errno), then disable interrupts again and restore the original
* errno that is needed by the user logic (it is probably EINTR).
*
* I would prefer that all interrupts are disabled when
* arm_fullcontextrestore() is called, but that may not be necessary.
*/
sinfo("Resuming\n");
#ifdef CONFIG_SMP
/* Restore the saved 'irqcount' and recover the critical section
* spinlocks.
*/
DEBUGASSERT(rtcb->irqcount == 0);
while (rtcb->irqcount < saved_irqcount)
{
enter_critical_section();
}
#endif
#ifndef CONFIG_SUPPRESS_INTERRUPTS
up_irq_save();
#endif
/* Modify the saved return state with the actual saved values in the
* TCB. This depends on the fact that nested signal handling is
* not supported. Therefore, these values will persist throughout the
* signal handling action.
*
* Keeping this data in the TCB resolves a security problem in protected
* and kernel mode: The regs[] array is visible on the user stack and
* could be modified by a hostile program.
*/
rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */
rtcb->xcp.regs = rtcb->xcp.saved_reg;
#ifdef CONFIG_ARCH_FPU
arm64_destory_fpu(rtcb);
rtcb->xcp.fpu_regs = rtcb->xcp.saved_fpu_regs;
#endif
/* Then restore the correct state for this thread of execution. */
arm64_fullcontextrestore(rtcb->xcp.regs);
}

View file

@ -0,0 +1,74 @@
/****************************************************************************
* arch/arm64/src/common/arm64_smccc.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arch/syscall.h"
#include "arm64_macro.inc"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_smccc.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/* The SMC instruction is used to generate a synchronous exception that is
* handled by Secure Monitor code running in EL3.
*/
GTEXT(arm64_smccc_smc)
SECTION_FUNC(text, arm64_smccc_smc)
smc #0
ldr x4, [sp]
stp x0, x1, [x4, #8 * ARM_SMCC_RES_A0]
stp x2, x3, [x4, #8 * ARM_SMCC_RES_A2]
stp x4, x5, [x4, #8 * ARM_SMCC_RES_A4]
stp x6, x7, [x4, #8 * ARM_SMCC_RES_A6]
ret
/* The HVC instruction is used to generate a synchronous exception that is
* handled by a hypervisor running in EL2.
*/
GTEXT(arm64_smccc_hvc)
SECTION_FUNC(text, arm64_smccc_hvc)
hvc #0
ldr x4, [sp]
stp x0, x1, [x4, #8 * ARM_SMCC_RES_A0]
stp x2, x3, [x4, #8 * ARM_SMCC_RES_A2]
stp x4, x5, [x4, #8 * ARM_SMCC_RES_A4]
stp x6, x7, [x4, #8 * ARM_SMCC_RES_A6]
ret

View file

@ -0,0 +1,83 @@
/****************************************************************************
* arch/arm64/src/common/arm64_smp.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_SMP_H
#define __ARCH_ARM64_SRC_COMMON_ARM64_SMP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_internal.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm_cpu_boot
*
* Description:
* Continues the C-level initialization started by the assembly language
* __cpu[n]_start function. At a minimum, this function needs to
* initialize interrupt handling and, perhaps, wait on WFI for
* arm_cpu_start() to issue an SGI.
*
* This function must be provided by the each ARMv7-A MCU and implement
* MCU-specific initialization logic.
*
* Input Parameters:
* cpu - The CPU index. This is the same value that would be obtained by
* calling up_cpu_index();
*
* Returned Value:
* Does not return.
*
****************************************************************************/
void arm64_cpu_boot(int cpu);
/****************************************************************************
* Name: arm64_enable_smp
*
* Description:
*
* Returned Value:
*
****************************************************************************/
void arm64_enable_smp(int cpu);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_SMP */
#endif /* __ARCH_ARM64_SRC_COMMON_ARM64_SMP_H */

View file

@ -0,0 +1,100 @@
/****************************************************************************
* arch/arm64/src/common/arm64_stackframe.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/tls.h>
#include <nuttx/board.h>
#include <arch/irq.h>
#include "arm64_internal.h"
#include "arm64_fatal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_stack_frame
*
* Description:
* Allocate a stack frame in the TCB's stack to hold thread-specific data.
* This function may be called anytime after up_create_stack() or
* up_use_stack() have been called but before the task has been started.
*
* Thread data may be kept in the stack (instead of in the TCB) if it is
* accessed by the user code directly. This includes such things as
* argv[]. The stack memory is guaranteed to be in the same protection
* domain as the thread.
*
* The following TCB fields will be re-initialized:
*
* - adj_stack_size: Stack size after removal of the stack frame from
* the stack
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
* Arguments has been removed from the stack allocation.
*
* Input Parameters:
* - tcb: The TCB of new task
* - frame_size: The size of the stack frame to allocate.
*
* Returned Value:
* - A pointer to bottom of the allocated stack frame. NULL will be
* returned on any failures. The alignment of the returned value is
* the same as the alignment of the stack itself.
*
****************************************************************************/
void *up_stack_frame(struct tcb_s *tcb, size_t frame_size)
{
void *ret;
/* Align the frame_size */
frame_size = STACK_ALIGN_UP(frame_size);
/* Is there already a stack allocated? Is it big enough? */
if (!tcb->stack_alloc_ptr || tcb->adj_stack_size <= frame_size)
{
return NULL;
}
ret = tcb->stack_base_ptr;
memset(ret, 0, frame_size);
/* Save the adjusted stack values in the struct tcb_s */
tcb->stack_base_ptr = (uint8_t *)tcb->stack_base_ptr + frame_size;
tcb->adj_stack_size -= frame_size;
/* And return the pointer to the allocated region */
return ret;
}

View file

@ -0,0 +1,540 @@
/****************************************************************************
* arch/arm64/src/common/arm64_syscall.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <inttypes.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <debug.h>
#include <syscall.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/addrenv.h>
#include "arch/irq.h"
#include "signal/signal.h"
#include "addrenv.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: dispatch_syscall
*
* Description:
* Call the stub function corresponding to the system call. NOTE the non-
* standard parameter passing:
*
* x0 = SYS_ call number
* x1 = parm0
* x2 = parm1
* x3 = parm2
* x4 = parm3
* x5 = parm4
* x6 = parm5
*
* The values of X4-X5 may be preserved in the proxy called by the user
* code if they are used (but otherwise will not be).
*
* WARNING: There are hard-coded values in this logic!
*
****************************************************************************/
#ifdef CONFIG_LIB_SYSCALL
static void dispatch_syscall(void) naked_function;
static void dispatch_syscall(void)
{
__asm__ __volatile__
(
" sub sp, sp, #16\n" /* Create a stack frame to hold 3 parms + lr */
" str r4, [sp, #0]\n" /* Move parameter 4 (if any) into position */
" str r5, [sp, #4]\n" /* Move parameter 5 (if any) into position */
" str r6, [sp, #8]\n" /* Move parameter 6 (if any) into position */
" str lr, [sp, #12]\n" /* Save lr in the stack frame */
" ldr ip, =g_stublookup\n" /* R12=The base of the stub lookup table */
" ldr ip, [ip, r0, lsl #2]\n" /* R12=The address of the stub for this SYSCALL */
" blx ip\n" /* Call the stub (modifies lr) */
" ldr lr, [sp, #12]\n" /* Restore lr */
" add sp, sp, #16\n" /* Destroy the stack frame */
" mov r2, r0\n" /* R2=Save return value in R2 */
" mov r0, %0\n" /* R0=SYS_syscall_return */
" svc %1\n"::"i"(SYS_syscall_return),
"i"(SYS_syscall) /* Return from the SYSCALL */
);
}
#endif
/****************************************************************************
* Private Functions
****************************************************************************/
static void arm64_dump_syscall(const char *tag, uint64_t cmd,
const struct regs_context * f_regs)
{
svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, f_regs, cmd);
svcinfo("x0: 0x%-16lx x1: 0x%lx\n",
f_regs->regs[REG_X0], f_regs->regs[REG_X1]);
svcinfo("x2: 0x%-16lx x3: 0x%lx\n",
f_regs->regs[REG_X2], f_regs->regs[REG_X3]);
svcinfo("x4: 0x%-16lx x5: 0x%lx\n",
f_regs->regs[REG_X4], f_regs->regs[REG_X5]);
svcinfo("x6: 0x%-16lx x7: 0x%lx\n",
f_regs->regs[REG_X6], f_regs->regs[REG_X7]);
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_syscall_switch
*
* Description:
* task switch syscall
*
****************************************************************************/
uint64_t *arm64_syscall_switch(uint64_t * regs)
{
uint64_t cmd;
struct regs_context *f_regs;
uint64_t *ret_regs;
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
switch (cmd)
{
/* x0 = SYS_restore_context: Restore task context
*
* void arm64_fullcontextrestore(uint64_t *restoreregs)
* noreturn_function;
*
* At this point, the following values are saved in context:
*
* x0 = SYS_restore_context
* x1 = restoreregs( xcp->regs, callee saved register save area)
*/
case SYS_restore_context:
{
/* Replace 'regs' with the pointer to the register set in
* regs[REG_R1]. On return from the system call, that register
* set will determine the restored context.
*/
ret_regs = (uint64_t *)f_regs->regs[REG_X1];
f_regs->regs[REG_X1] = 0; /* set the saveregs = 0 */
DEBUGASSERT(ret_regs);
}
break;
/* x0 = SYS_switch_context: This a switch context command:
*
* void arm64_switchcontext(uint64_t *saveregs, uint64_t *restoreregs);
*
* At this point, the following values are saved in context:
*
* x0 = SYS_switch_context
* x1 = saveregs (xcp->regs, callee saved register save area)
* x2 = restoreregs (xcp->regs, callee saved register save area)
*
* In this case, we do both: We save the context registers to the save
* register area reference by the saved contents of x1 and then set
* regs to the save register area referenced by the saved
* contents of x2.
*/
case SYS_switch_context:
{
DEBUGASSERT(f_regs->regs[REG_X1] != 0 &&
f_regs->regs[REG_X2] != 0);
*(uint64_t **)f_regs->regs[REG_X1] = regs;
ret_regs = (uint64_t *) f_regs->regs[REG_X2];
}
break;
default:
{
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
ret_regs = 0;
return 0;
}
break;
}
return ret_regs;
}
/****************************************************************************
* Name: arm64_syscall
*
* Description:
* SVC interrupts will vector here with insn=the SVC instruction and
* xcp=the interrupt context
*
* The handler may get the SVC number be de-referencing the return
* address saved in the xcp and decoding the SVC instruction
*
****************************************************************************/
int arm64_syscall(uint64_t *regs)
{
uint64_t cmd;
struct regs_context *f_regs;
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
switch (cmd)
{
/* R0=SYS_syscall_return: This a SYSCALL return command:
*
* void arm_syscall_return(void);
*
* At this point, the following values are saved in context:
*
* R0 = SYS_syscall_return
*
* We need to restore the saved return address and return in
* unprivileged thread mode.
*/
#ifdef CONFIG_LIB_SYSCALL
case SYS_syscall_return:
{
struct tcb_s *rtcb = nxsched_self();
int index = (int)rtcb->xcp.nsyscalls - 1;
/* Make sure that there is a saved SYSCALL return address. */
DEBUGASSERT(index >= 0);
/* Setup to return to the saved SYSCALL return address in
* the original mode.
*/
regs[REG_PC] = rtcb->xcp.syscall[index].sysreturn;
#ifdef CONFIG_BUILD_KERNEL
regs[REG_CPSR] = rtcb->xcp.syscall[index].cpsr;
#endif
/* The return value must be in R0-R1. dispatch_syscall()
* temporarily moved the value for R0 into R2.
*/
regs[REG_R0] = regs[REG_R2];
#ifdef CONFIG_ARCH_KERNEL_STACK
/* If this is the outermost SYSCALL and if there is a saved user
* stack pointer, then restore the user stack pointer on this
* final return to user code.
*/
if (index == 0 && rtcb->xcp.ustkptr != NULL)
{
regs[REG_SP] = (uint32_t)rtcb->xcp.ustkptr;
rtcb->xcp.ustkptr = NULL;
}
#endif
/* Save the new SYSCALL nesting level */
rtcb->xcp.nsyscalls = index;
/* Handle any signal actions that were deferred while processing
* the system call.
*/
rtcb->flags &= ~TCB_FLAG_SYSCALL;
(void)nxsig_unmask_pendingsignal();
}
break;
#endif
/* R0=SYS_task_start: This a user task start
*
* void up_task_start(main_t taskentry, int argc, char *argv[])
* noreturn_function;
*
* At this point, the following values are saved in context:
*
* R0 = SYS_task_start
* R1 = taskentry
* R2 = argc
* R3 = argv
*/
#ifdef CONFIG_BUILD_KERNEL
case SYS_task_start:
{
/* Set up to return to the user-space _start function in
* unprivileged mode. We need:
*
* R0 = argc
* R1 = argv
* PC = taskentry
* CSPR = user mode
*/
regs[REG_PC] = regs[REG_R1];
regs[REG_R0] = regs[REG_R2];
regs[REG_R1] = regs[REG_R3];
cpsr = regs[REG_CPSR] & ~PSR_MODE_MASK;
regs[REG_CPSR] = cpsr | PSR_MODE_USR;
}
break;
#endif
/* R0=SYS_pthread_start: This a user pthread start
*
* void up_pthread_start(pthread_startroutine_t entrypt,
* pthread_addr_t arg) noreturn_function;
*
* At this point, the following values are saved in context:
*
* R0 = SYS_pthread_start
* R1 = entrypt
* R2 = arg
*/
#if !defined(CONFIG_BUILD_FLAT) && !defined(CONFIG_DISABLE_PTHREAD)
case SYS_pthread_start:
{
/* Set up to enter the user-space pthread start-up function in
* unprivileged mode. We need:
*
* R0 = entrypt
* R1 = arg
* PC = startup
* CSPR = user mode
*/
regs[REG_PC] = regs[REG_R1];
regs[REG_R0] = regs[REG_R2];
regs[REG_R1] = regs[REG_R3];
cpsr = regs[REG_CPSR] & ~PSR_MODE_MASK;
regs[REG_CPSR] = cpsr | PSR_MODE_USR;
}
break;
#endif
#ifdef CONFIG_BUILD_KERNEL
/* R0=SYS_signal_handler: This a user signal handler callback
*
* void signal_handler(_sa_sigaction_t sighand, int signo,
* siginfo_t *info, void *ucontext);
*
* At this point, the following values are saved in context:
*
* R0 = SYS_signal_handler
* R1 = sighand
* R2 = signo
* R3 = info
* ucontext (on the stack)
*/
case SYS_signal_handler:
{
struct tcb_s *rtcb = nxsched_self();
/* Remember the caller's return address */
DEBUGASSERT(rtcb->xcp.sigreturn == 0);
rtcb->xcp.sigreturn = regs[REG_PC];
/* Set up to return to the user-space trampoline function in
* unprivileged mode.
*/
regs[REG_PC] = (uint32_t)ARCH_DATA_RESERVE->ar_sigtramp;
cpsr = regs[REG_CPSR] & ~PSR_MODE_MASK;
regs[REG_CPSR] = cpsr | PSR_MODE_USR;
/* Change the parameter ordering to match the expectation of struct
* userpace_s signal_handler.
*/
regs[REG_R0] = regs[REG_R1]; /* sighand */
regs[REG_R1] = regs[REG_R2]; /* signal */
regs[REG_R2] = regs[REG_R3]; /* info */
regs[REG_R3] = regs[REG_R4]; /* ucontext */
#ifdef CONFIG_ARCH_KERNEL_STACK
/* If we are signalling a user process, then we must be operating
* on the kernel stack now. We need to switch back to the user
* stack before dispatching the signal handler to the user code.
* The existence of an allocated kernel stack is sufficient
* information to make this decision.
*/
if (rtcb->xcp.kstack != NULL)
{
DEBUGASSERT(rtcb->xcp.kstkptr == NULL &&
rtcb->xcp.ustkptr != NULL);
rtcb->xcp.kstkptr = (uint32_t *)regs[REG_SP];
regs[REG_SP] = (uint32_t)rtcb->xcp.ustkptr;
}
#endif
}
break;
#endif
#ifdef CONFIG_BUILD_KERNEL
/* R0=SYS_signal_handler_return: This a user signal handler callback
*
* void signal_handler_return(void);
*
* At this point, the following values are saved in context:
*
* R0 = SYS_signal_handler_return
*/
case SYS_signal_handler_return:
{
struct tcb_s *rtcb = nxsched_self();
/* Set up to return to the kernel-mode signal dispatching logic. */
DEBUGASSERT(rtcb->xcp.sigreturn != 0);
regs[REG_PC] = rtcb->xcp.sigreturn;
cpsr = regs[REG_CPSR] & ~PSR_MODE_MASK;
regs[REG_CPSR] = cpsr | PSR_MODE_SVC;
rtcb->xcp.sigreturn = 0;
#ifdef CONFIG_ARCH_KERNEL_STACK
/* We must enter here be using the user stack. We need to switch
* to back to the kernel user stack before returning to the kernel
* mode signal trampoline.
*/
if (rtcb->xcp.kstack != NULL)
{
DEBUGASSERT(rtcb->xcp.kstkptr != NULL &&
(uint32_t)rtcb->xcp.ustkptr == regs[REG_SP]);
regs[REG_SP] = (uint32_t)rtcb->xcp.kstkptr;
rtcb->xcp.kstkptr = NULL;
}
#endif
}
break;
#endif
/* This is not an architecture-specific system call. If NuttX is built
* as a standalone kernel with a system call interface, then all of the
* additional system calls must be handled as in the default case.
*/
default:
{
#ifdef CONFIG_LIB_SYSCALL
struct tcb_s *rtcb = nxsched_self();
int index = rtcb->xcp.nsyscalls;
/* Verify that the SYS call number is within range */
DEBUGASSERT(cmd >= CONFIG_SYS_RESERVED && cmd < SYS_maxsyscall);
/* Make sure that there is a no saved SYSCALL return address. We
* cannot yet handle nested system calls.
*/
DEBUGASSERT(index < CONFIG_SYS_NNEST);
/* Setup to return to dispatch_syscall in privileged mode. */
rtcb->xcp.syscall[index].sysreturn = regs[REG_PC];
#ifdef CONFIG_BUILD_KERNEL
rtcb->xcp.syscall[index].cpsr = regs[REG_CPSR];
#endif
regs[REG_PC] = (uint32_t)dispatch_syscall;
#ifdef CONFIG_BUILD_KERNEL
cpsr = regs[REG_CPSR] & ~PSR_MODE_MASK;
regs[REG_CPSR] = cpsr | PSR_MODE_SVC;
#endif
/* Offset R0 to account for the reserved values */
regs[REG_R0] -= CONFIG_SYS_RESERVED;
/* Indicate that we are in a syscall handler. */
rtcb->flags |= TCB_FLAG_SYSCALL;
#ifdef CONFIG_ARCH_KERNEL_STACK
/* If this is the first SYSCALL and if there is an allocated
* kernel stack, then switch to the kernel stack.
*/
if (index == 0 && rtcb->xcp.kstack != NULL)
{
rtcb->xcp.ustkptr = (uint32_t *)regs[REG_SP];
regs[REG_SP] = (uint32_t)rtcb->xcp.kstack +
ARCH_KERNEL_STACKSIZE;
}
#endif
/* Save the new SYSCALL nesting level */
rtcb->xcp.nsyscalls = index + 1;
#else
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
#endif
}
break;
}
return 0;
}

View file

@ -0,0 +1,119 @@
/****************************************************************************
* arch/arm64/src/common/arm64_task_sched.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdbool.h>
#include <sched.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <arch/syscall.h>
#include "sched/sched.h"
#include "group/group.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_saveusercontext
*
* Description:
* Save the current thread context. Full prototype is:
*
* int up_saveusercontext(void *saveregs);
*
* Returned Value:
* 0: Normal return
* 1: Context switch return
*
****************************************************************************/
#ifdef CONFIG_BUILD_FLAT
int up_saveusercontext(void *saveregs)
{
irqstate_t flags;
/* Take a snapshot of the thread context right now */
flags = enter_critical_section();
arm64_context_snapshot(saveregs);
leave_critical_section(flags);
return 0;
}
#else
int up_saveusercontext(void *saveregs)
{
return sys_call1(SYS_save_context, (uintptr_t)saveregs);
}
#endif
/****************************************************************************
* Name: arm64_fullcontextrestore
*
* Description:
* Restore the current thread context. Full prototype is:
*
* void arm64_fullcontextrestore(uint64_t *restoreregs) noreturn_function;
*
* Returned Value:
* None
*
****************************************************************************/
void arm64_fullcontextrestore(uint64_t *restoreregs)
{
sys_call1(SYS_restore_context, (uintptr_t)restoreregs);
__builtin_unreachable();
}
/****************************************************************************
* Name: arm64_switchcontext
*
* Description:
* Save the current thread context and restore the specified context.
*
* Returned Value:
* None
*
****************************************************************************/
void arm64_switchcontext(uint64_t **saveregs, uint64_t *restoreregs)
{
sys_call2(SYS_switch_context, (uintptr_t)saveregs, (uintptr_t)restoreregs);
}

View file

@ -0,0 +1,94 @@
/****************************************************************************
* arch/arm64/src/common/arm64_testset.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/spinlock.h>
#include "arch/syscall.h"
#include "arm64_macro.inc"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_testset.S"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_testset
*
* Description:
* Perform an atomic test and set operation on the provided spinlock.
*
* This function must be provided via the architecture-specific logic.
*
* Input Parameters:
* lock - A reference to the spinlock object.
*
* Returned Value:
* The spinlock is always locked upon return. The previous value of the
* spinlock variable is returned, either SP_LOCKED if the spinlock was
* previously locked (meaning that the test-and-set operation failed to
* obtain the lock) or SP_UNLOCKED if the spinlock was previously unlocked
* (meaning that we successfully obtained the lock).
*
****************************************************************************/
GTEXT(up_testset)
SECTION_FUNC(text, up_testset)
up_testset:
mov x1, #SP_LOCKED
/* Test if the spinlock is locked or not */
1:
ldaxr x2, [x0] /* Test if spinlock is locked or not */
cmp x2, x1 /* Already locked? */
beq 2f /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
stxr w2, x1, [x0] /* Attempt to set the locked state */
cbnz w2, 1b /* w2 will be 1 is stxr failed */
/* Lock acquired -- return SP_UNLOCKED */
mov x0, #SP_UNLOCKED
ret
/* Lock not acquired -- return SP_LOCKED */
2:
mov x0, #SP_LOCKED
ret

View file

@ -0,0 +1,144 @@
/****************************************************************************
* arch/arm64/src/common/arm64_unblocktask.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sched.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "group/group.h"
#include "clock/clock.h"
#include "arm64_internal.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_unblock_task
*
* Description:
* A task is currently in an inactive task list
* but has been prepped to execute. Move the TCB to the
* ready-to-run list, restore its context, and start execution.
*
* Input Parameters:
* tcb: Refers to the tcb to be unblocked. This tcb is
* in one of the waiting tasks lists. It must be moved to
* the ready-to-run list and, if it is the highest priority
* ready to run task, executed.
*
****************************************************************************/
void up_unblock_task(struct tcb_s *tcb)
{
struct tcb_s *rtcb = this_task();
/* Verify that the context switch can be performed */
DEBUGASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
(tcb->task_state <= LAST_BLOCKED_STATE));
/* Remove the task from the blocked task list */
nxsched_remove_blocked(tcb);
/* Add the task in the correct location in the prioritized
* ready-to-run task list
*/
if (nxsched_add_readytorun(tcb))
{
/* The currently active task has changed! We need to do
* a context switch to the new task.
*/
/* Update scheduler parameters */
nxsched_suspend_scheduler(rtcb);
/* Are we in an interrupt handler? */
if (CURRENT_REGS)
{
/* Yes, then we have to do things differently.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
arm64_savestate(rtcb->xcp.regs);
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
/* Update scheduler parameters */
nxsched_resume_scheduler(rtcb);
/* Then switch contexts. Any necessary address environment
* changes will be made when the interrupt returns.
*/
arm64_restorestate(rtcb->xcp.regs);
}
/* No, then we will need to perform the user context switch */
else
{
struct tcb_s *nexttcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
group_addrenv(nexttcb);
#endif
/* Update scheduler parameters */
nxsched_resume_scheduler(nexttcb);
/* Switch context to the context of the task at the head of the
* ready to run list.
*/
arm64_switchcontext(&rtcb->xcp.regs, nexttcb->xcp.regs);
/* arm_switchcontext forces a context switch to the task at the
* head of the ready-to-run list. It does not 'return' in the
* normal sense. When it does return, it is because the blocked
* task is again ready to run and has execution priority.
*/
}
}
}

View file

@ -0,0 +1,130 @@
/****************************************************************************
* arch/arm64/src/common/arm64_usestack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <sched.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/tls.h>
#include <nuttx/board.h>
#include <arch/irq.h>
#include "arm64_internal.h"
#include "arm64_fatal.h"
/****************************************************************************
* Pre-processor Macros
****************************************************************************/
/****************************************************************************
* Private Types
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_use_stack
*
* Description:
* Setup up stack-related information in the TCB using pre-allocated stack
* memory. This function is called only from nxtask_init() when a task or
* kernel thread is started (never for pthreads).
*
* The following TCB fields must be initialized:
*
* - adj_stack_size: Stack size after adjustment for hardware,
* processor, etc. This value is retained only for debug
* purposes.
* - stack_alloc_ptr: Pointer to allocated stack
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
* Arguments has been removed from the stack allocation.
*
* Input Parameters:
* - tcb: The TCB of new task
* - stack_size: The allocated stack size.
*
* NOTE: Unlike up_stack_create() and up_stack_release, this function
* does not require the task type (ttype) parameter. The TCB flags will
* always be set to provide the task type to up_use_stack() if it needs
* that information.
*
****************************************************************************/
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
#ifdef CONFIG_TLS_ALIGNED
/* Make certain that the user provided stack is properly aligned */
DEBUGASSERT(((uintptr_t)stack & TLS_STACK_MASK) == 0);
#endif
/* Is there already a stack allocated? */
if (tcb->stack_alloc_ptr)
{
/* Yes... Release the old stack allocation */
up_release_stack(tcb, tcb->flags & TCB_FLAG_TTYPE_MASK);
}
/* The ARM uses a "full descending" stack:
* the stack grows toward lower addresses in memory.
* The stack pointer register, points to the last pushed item
* in the stack.
* Items on the stack are referenced as positive word offsets from sp.
*/
/* We align all sizes and pointer to CONFIG_STACK_ALIGNMENT.
* Since the stack ptr is decremented before
* the first write, we can directly save our variables to struct
* tcb_s.
*/
/* Save the new stack allocation */
tcb->stack_alloc_ptr = stack;
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
tcb->adj_stack_size =
STACK_ALIGN_DOWN((uintptr_t)stack + stack_size) - (uintptr_t)stack;
#ifdef CONFIG_STACK_COLORATION
/* If stack debug is enabled, then fill the stack with a
* recognizable value that we can use later to test for high
* water marks.
*/
arm64_stack_color(tcb->stack_base_ptr, tcb->adj_stack_size);
#endif /* CONFIG_STACK_COLORATION */
return OK;
}

View file

@ -0,0 +1,278 @@
/****************************************************************************
* arch/arm64/src/common/arm64_vector_table.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_macro.inc"
#include "arch/irq.h"
#include "arm64_fatal.h"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_vector_table.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/* Save Corruptible Registers and exception context
* on the task stack
* note: allocate stackframe with XCPTCONTEXT_GP_REGS
* which is ARM64_ESF_REGS + ARM64_CS_REGS
* but only save ARM64_ESF_REGS
*/
.macro arm64_enter_exception xreg0, xreg1
sub sp, sp, #8 * XCPTCONTEXT_GP_REGS
stp x0, x1, [sp, #8 * REG_X0]
stp x2, x3, [sp, #8 * REG_X2]
stp x4, x5, [sp, #8 * REG_X4]
stp x6, x7, [sp, #8 * REG_X6]
stp x8, x9, [sp, #8 * REG_X8]
stp x10, x11, [sp, #8 * REG_X10]
stp x12, x13, [sp, #8 * REG_X12]
stp x14, x15, [sp, #8 * REG_X14]
stp x16, x17, [sp, #8 * REG_X16]
stp x18, x19, [sp, #8 * REG_X18]
stp x20, x21, [sp, #8 * REG_X20]
stp x22, x23, [sp, #8 * REG_X22]
stp x24, x25, [sp, #8 * REG_X24]
stp x26, x27, [sp, #8 * REG_X26]
stp x28, x29, [sp, #8 * REG_X28]
/* Save the current task's SP_ELx and x30 */
mov \xreg0, sp
stp x30, \xreg0, [sp, #8 * REG_X30]
/* ELR and SPSR */
mrs \xreg0, elr_el1
mrs \xreg1, spsr_el1
stp \xreg0, \xreg1, [sp, #8 * REG_ELR]
/* increment exception depth */
mrs \xreg0, tpidrro_el0
mov \xreg1, #1
add \xreg0, \xreg0, \xreg1
msr tpidrro_el0, \xreg0
#ifdef CONFIG_ARCH_FPU
bl arm64_fpu_enter_exception
#endif
.endm
/****************************************************************************
* Public Functions
****************************************************************************/
/* Four types of exceptions:
* - synchronous: aborts from MMU, SP/CP alignment checking, unallocated
* instructions, SVCs/SMCs/HVCs, ...)
* - IRQ: group 1 (normal) interrupts
* - FIQ: group 0 or secure interrupts
* - SError: fatal system errors
*
* Four different contexts:
* - from same exception level, when using the SP_EL0 stack pointer
* - from same exception level, when using the SP_ELx stack pointer
* - from lower exception level, when this is AArch64
* - from lower exception level, when this is AArch32
*
* +------------------+------------------+-------------------------+
* | Address | Exception type | Description |
* +------------------+------------------+-------------------------+
* | VBAR_ELn + 0x000 | Synchronous | Current EL with SP0 |
* | + 0x080 | IRQ / vIRQ | |
* | + 0x100 | FIQ / vFIQ | |
* | + 0x180 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x200 | Synchronous | Current EL with SPx |
* | + 0x280 | IRQ / vIRQ | |
* | + 0x300 | FIQ / vFIQ | |
* | + 0x380 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x400 | Synchronous | Lower EL using AArch64 |
* | + 0x480 | IRQ / vIRQ | |
* | + 0x500 | FIQ / vFIQ | |
* | + 0x580 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x600 | Synchronous | Lower EL using AArch64 |
* | + 0x680 | IRQ / vIRQ | |
* | + 0x700 | FIQ / vFIQ | |
* | + 0x780 | SError / vSError | |
* +------------------+------------------+-------------------------+
*/
GTEXT(_vector_table)
/* The whole table must be 2K aligned */
.align 11
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
/* Current EL with SP0 / Synchronous */
.align 7
arm64_enter_exception x0, x1
b arm64_sync_exc
/* Current EL with SP0 / IRQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_handler
/* Current EL with SP0 / FIQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_spurious
/* Current EL with SP0 / SError */
.align 7
arm64_enter_exception x0, x1
b arm64_serror_handler
/* Current EL with SPx / Synchronous */
.align 7
arm64_enter_exception x0, x1
b arm64_sync_exc
/* Current EL with SPx / IRQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_handler
/* Current EL with SPx / FIQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_spurious
/* Current EL with SPx / SError */
.align 7
arm64_enter_exception x0, x1
b arm64_serror_handler
/* Lower EL using AArch64 / Synchronous */
.align 7
arm64_enter_exception x0, x1
b arm64_sync_exc
/* Lower EL using AArch64 / IRQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_handler
/* Lower EL using AArch64 / FIQ */
.align 7
arm64_enter_exception x0, x1
b arm64_irq_spurious
/* Lower EL using AArch64 / SError */
.align 7
arm64_enter_exception x0, x1
b arm64_serror_handler
/* Lower EL using AArch32 / Synchronous */
.align 7
arm64_enter_exception x0, x1
b arm64_mode32_error
/* Lower EL using AArch32 / IRQ */
.align 7
arm64_enter_exception x0, x1
b arm64_mode32_error
/* Lower EL using AArch32 / FIQ */
.align 7
arm64_enter_exception x0, x1
b arm64_mode32_error
/* Lower EL using AArch32 / SError */
.align 7
arm64_enter_exception x0, x1
b arm64_mode32_error
/* Restore Corruptible Registers and exception context
* from the task stack.
*/
GTEXT(arm64_exit_exception)
SECTION_FUNC(text, arm64_exit_exception)
#ifdef CONFIG_ARCH_FPU
bl arm64_fpu_exit_exception
GTEXT(arm64_exit_exc_fpu_done)
arm64_exit_exc_fpu_done:
#endif
/* restore spsr and elr at el1*/
ldp x0, x1, [sp, #8 * REG_ELR]
msr elr_el1, x0
msr spsr_el1, x1
/* decrement exception depth */
mrs x0, tpidrro_el0
mov x1, #1
sub x0, x0, x1
msr tpidrro_el0, x0
ldp x30, x0, [sp, #8 * REG_X30]
ldp x0, x1, [sp, #8 * REG_X0]
ldp x2, x3, [sp, #8 * REG_X2]
ldp x4, x5, [sp, #8 * REG_X4]
ldp x6, x7, [sp, #8 * REG_X6]
ldp x8, x9, [sp, #8 * REG_X8]
ldp x10, x11, [sp, #8 * REG_X10]
ldp x12, x13, [sp, #8 * REG_X12]
ldp x14, x15, [sp, #8 * REG_X14]
ldp x16, x17, [sp, #8 * REG_X16]
ldp x18, x19, [sp, #8 * REG_X18]
ldp x20, x21, [sp, #8 * REG_X20]
ldp x22, x23, [sp, #8 * REG_X22]
ldp x24, x25, [sp, #8 * REG_X24]
ldp x26, x27, [sp, #8 * REG_X26]
ldp x28, x29, [sp, #8 * REG_X28]
add sp, sp, #8 * XCPTCONTEXT_GP_REGS
eret

View file

@ -0,0 +1,438 @@
/****************************************************************************
* arch/arm64/src/common/arm64_vectors.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arch/syscall.h"
#include "arm64_macro.inc"
#include "arch/irq.h"
#include "arm64_fatal.h"
#include "arm64_internal.h"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_vectors.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
.macro arm64_exception_context_save xreg0, xreg1 xfp
/* Save the current task's SP_EL0 and exception depth */
mrs \xreg0, sp_el0
mrs \xreg1, tpidrro_el0
stp \xreg0, \xreg1, [\xfp, #8 * REG_SP_EL0]
/* Save the TPIDR0/TPIDR1, which is the current tcb */
mrs \xreg0, tpidr_el0
mrs \xreg1, tpidr_el1
stp \xreg0, \xreg1, [\xfp, #8 * REG_TPIDR_EL0]
.endm
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Function: arm64_context_snapshot
*
* Description:
* Take a snapshot of the thread GP registers context
* x0 --- pointer to struct regs_context
*
****************************************************************************/
GTEXT(arm64_context_snapshot)
SECTION_FUNC(text, arm64_context_snapshot)
str x0, [sp, #-16]!
stp x0, x1, [x0, #8 * REG_X0]
stp x2, x3, [x0, #8 * REG_X2]
stp x4, x5, [x0, #8 * REG_X4]
stp x6, x7, [x0, #8 * REG_X6]
stp x8, x9, [x0, #8 * REG_X8]
stp x10, x11, [x0, #8 * REG_X10]
stp x12, x13, [x0, #8 * REG_X12]
stp x14, x15, [x0, #8 * REG_X14]
stp x16, x17, [x0, #8 * REG_X16]
stp x18, x19, [x0, #8 * REG_X18]
stp x20, x21, [x0, #8 * REG_X20]
stp x22, x23, [x0, #8 * REG_X22]
stp x24, x25, [x0, #8 * REG_X24]
stp x26, x27, [x0, #8 * REG_X26]
stp x28, x29, [x0, #8 * REG_X28]
/* Save the current task's SP_ELx and x30 */
mov x4, sp
stp x30, x4, [x0, #8 * REG_X30]
/* ELR and SPSR */
mrs x4, elr_el1
mrs x5, spsr_el1
stp x4, x5, [x0, #8 * REG_ELR]
arm64_exception_context_save x4 x5 x0
ldr x0, [sp], #16
ret
/****************************************************************************
* Function: arm64_context_switch
*
* Description:
* Routine to handle context switch
*
* arm64_context_switch( x0, x1)
* x0: restore thread stack context
* x1: save thread stack context
* note:
* x1 = 0, only restore x0
*
****************************************************************************/
GTEXT(arm64_context_switch)
SECTION_FUNC(text, arm64_context_switch)
cmp x1, #0x0
beq restore_new
/* Save the current SP_EL0 */
mov x4, sp
str x4, [x1, #8 * REG_SP_ELX]
/* Save the current task's SP_EL0 and exception depth */
mrs x4, sp_el0
mrs x5, tpidrro_el0
stp x4, x5, [x1, #8 * REG_SP_EL0]
/* Save the TPIDR0/TPIDR1, which is the current tcb */
mrs x4, tpidr_el0
mrs x5, tpidr_el1
stp x4, x5, [x1, #8 * REG_TPIDR_EL0]
restore_new:
/* Restore SP_EL0 and thread's exception dept */
ldp x4, x5, [x0, #8 * REG_SP_EL0]
msr tpidrro_el0, x5
msr sp_el0, x4
/* restore the TPIDR0/TPIDR1 */
ldp x4, x5, [x0, #8 * REG_TPIDR_EL0]
msr tpidr_el0, x4
msr tpidr_el1, x5
/* retrieve new thread's SP_ELx */
ldr x4, [x0, #8 * REG_SP_ELX]
mov sp, x4
#ifdef CONFIG_SCHED_INSTRUMENTATION_SWITCH
stp xzr, x30, [sp, #-16]!
bl arm64_trace_context_switch
ldp xzr, x30, [sp], #16
#endif
#ifdef CONFIG_ARCH_FPU
stp xzr, x30, [sp, #-16]!
bl arm64_fpu_context_restore
ldp xzr, x30, [sp], #16
#endif
/* Return to arm64_sync_exc() or arm64_irq_handler() */
ret
/****************************************************************************
* Function: arm64_sync_exc
*
* Description:
* handle synchronous exception for AArch64
*
****************************************************************************/
GTEXT(arm64_sync_exc)
SECTION_FUNC(text, arm64_sync_exc)
/* checking the EC value to see which exception need to be handle */
mrs x0, esr_el1
lsr x1, x0, #26
#ifdef CONFIG_ARCH_FPU
/* fpu trap */
cmp x1, #0x07 /*Access to SIMD or floating-point */
bne 1f
mov x0, sp
bl arm64_fpu_trap
/* when the fpu trap is handled */
b arm64_exit_exc_fpu_done
1:
#endif
/* 0x15 = SVC system call */
cmp x1, #0x15
/* if this is a svc call ?*/
bne exc_handle
/* x0 = syscall_cmd
* if ( x0 <= SYS_switch_context ) {
* call context_switch
* it's a context switch syscall, so context need to be done
* }
* #define SYS_save_context (0)
* #define SYS_restore_context (1)
* #define SYS_switch_context (2)
*/
ldr x0, [sp, #8 * REG_X0]
cmp x0, #SYS_save_context
beq save_context
cmp x0, #SYS_switch_context
beq context_switch
cmp x0, #SYS_restore_context
beq context_switch
/* Normal syscall, thread context will not switch
*
* call the SVC handler with interrupts disabled.
* void arm64_syscall(uint64_t *regs)
* in:
* regs = pointer to struct reg_context allocating
* from stack, esf_reg has put on it
* regs[REG_X0]: syscall cmd
* regs[REG_X1] ~ regs[REG_X6]: syscall parameter
* out:
* x0: return by arm64_syscall
*/
mov x0, sp /* x0 = reg frame */
/* Call arm64_syscall() on the user stack */
bl arm64_syscall /* Call the handler */
/* Save the return value into the */
str x0, [sp, #8 * REG_X0]
/* Return from exception */
b arm64_exit_exception
context_switch:
/* Call arm64_syscall_switch() for context switch
*
* uint64_t * arm64_syscall_switch(uint64_t * regs)
* out:
* x0: return by arm64_syscall_switch, restore task context
* regs[REG_X1]: save task context, if x1 = 0, only restore x0
*/
mov x0, sp
bl arm64_syscall_switch
/* get save task reg context pointer */
ldr x1, [sp, #8 * REG_X1]
cmp x1, #0x0
beq do_switch
ldr x1, [x1]
do_switch:
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
#endif
bl arm64_context_switch
#ifdef CONFIG_ARCH_FPU
/* when the fpu trap is handled */
b arm64_exit_exc_fpu_done
#else
b arm64_exit_exception
#endif
save_context:
arm64_exception_context_save x0 x1 sp
mov x0, sp
bl arm64_syscall_save_context
/* Save the return value into the ESF */
str x0, [sp, #8 * REG_X0]
/* Return from exception */
b arm64_exit_exception
exc_handle:
arm64_exception_context_save x0 x1 sp
mov x0, #K_ERR_CPU_EXCEPTION
mov x1, sp
/* void arm64_fatal_error(unsigned int reason, const uint64_t *regs)
* x0 = reason
* x1 = Exception stack frame
*/
bl arm64_fatal_error
/* Return here only in case of recoverable error */
b arm64_exit_exception
/****************************************************************************
* Name: arm64_irq_handler
*
* Description:
* Interrupt exception handler
*
****************************************************************************/
GTEXT(arm64_irq_handler)
SECTION_FUNC(text, arm64_irq_handler)
/* switch to IRQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x1
ldr x0, =(g_cpu_int_stacktop)
lsl x1, x1, #3
ldr x0, [x0, x1]
#else
ldr x0, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
/* save the task's stack and switch irq stack */
mov x1, sp
mov sp, x0
str x1, [sp, #-16]!
mov x0, x1 /* x0 = reg frame */
/* Call arm64_decodeirq() on the interrupt stack
* with interrupts disabled
*/
bl arm64_decodeirq
/* Upon return from arm64_decodeirq, x0 holds the pointer to the
* call reg context area, which can be use to restore context.
* This may or may not be the same value that was passed to arm64_decodeirq:
* It will differ if a context switch is required.
*/
ldr x1, [sp], #16
/* retrieve the task's stack. */
mov sp, x1
cmp x0, x1
beq irq_exit
irq_context_switch:
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
#endif
/* Switch thread
* - x0: restore task reg context, return by arm64_decodeirq,
* - x1: save task reg context, save before call arm64_decodeirq
* call arm64_context_switch(x0) to switch
*/
bl arm64_context_switch
#ifdef CONFIG_ARCH_FPU
/* when the fpu trap is handled */
b arm64_exit_exc_fpu_done
#endif
irq_exit:
b arm64_exit_exception
/* TODO: if the arm64_fatal_error return success, maybe need context switch */
GTEXT(arm64_serror_handler)
SECTION_FUNC(text, arm64_serror_handler)
arm64_exception_context_save x0 x1 sp
mov x0, #K_ERR_CPU_EXCEPTION
mov x1, sp
bl arm64_fatal_error
/* Return here only in case of recoverable error */
b arm64_exit_exception
GTEXT(arm64_mode32_error)
SECTION_FUNC(text, arm64_mode32_error)
arm64_exception_context_save x0 x1 sp
mov x1, sp
mov x0, #K_ERR_CPU_MODE32
bl arm64_fatal_error
/* Return here only in case of recoverable error */
b arm64_exit_exception
GTEXT(arm64_irq_spurious)
SECTION_FUNC(text, arm64_irq_spurious)
arm64_exception_context_save x0 x1 sp
mov x1, sp
mov x0, #K_ERR_SPURIOUS_IRQ /* K_ERR_SPURIOUS_IRQ */
bl arm64_fatal_error
/* Return here only in case of recoverable error */
b arm64_exit_exception

View file

@ -0,0 +1,248 @@
/****************************************************************************
* arch/arm64/src/common/arm64_vfork.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <inttypes.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include "sched/sched.h"
#include "arm64_arch.h"
#include "arm64_vfork.h"
#include "arm64_internal.h"
#include "arm64_fatal.h"
#ifdef CONFIG_ARCH_FPU
#include "arm64_fpu.h"
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
void arm64_vfork_fpureg_save(struct vfork_s *context)
{
irqstate_t flags;
arm64_fpu_disable();
/* Take a snapshot of the thread fpu reg context right now */
flags = enter_critical_section();
arm64_fpu_save(&context->fpu);
ARM64_DSB();
leave_critical_section(flags);
arm64_fpu_enable();
}
#endif
/****************************************************************************
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork() and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
* - Allocate and initialize the stack
* - Setup the input parameters for the task.
* - Initialization of the TCB (including call to up_initial_state())
* 4) up_vfork() provides any additional operating context. up_vfork must:
* - Initialize special values in any CPU registers that were not
* already configured by up_initial_state()
* 5) up_vfork() then calls nxtask_start_vfork()
* 6) nxtask_start_vfork() then executes the child thread.
*
* nxtask_abort_vfork() may be called if an error occurs between steps 3 and
* 6.
*
* Input Parameters:
* context - Caller context information saved by vfork()
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
pid_t up_vfork(const struct vfork_s *context)
{
struct tcb_s *parent = this_task();
struct task_tcb_s *child;
uint64_t newsp;
uint64_t newfp;
uint64_t newtop;
uint64_t stacktop;
uint64_t stackutil;
char *stack_ptr;
struct regs_context *pvforkctx;
#ifdef CONFIG_ARCH_FPU
struct fpu_reg *pfpuctx;
#endif
/* Allocate and initialize a TCB for the child task. */
child = nxtask_setup_vfork((start_t)(context->lr & ~1));
if (!child)
{
serr("ERROR: nxtask_setup_vfork failed\n");
return (pid_t)ERROR;
}
/* How much of the parent's stack was utilized? The ARM uses
* a push-down stack so that the current stack pointer should
* be lower than the initial, adjusted stack pointer. The
* stack usage should be the difference between those two.
*/
stacktop = (uint64_t)parent->stack_base_ptr +
parent->adj_stack_size;
DEBUGASSERT(stacktop > context->sp);
stackutil = stacktop - context->sp;
/* Make some feeble effort to preserve the stack contents. This is
* feeble because the stack surely contains invalid pointers and other
* content that will not work in the child context. However, if the
* user follows all of the caveats of vfork() usage, even this feeble
* effort is overkill.
*/
newtop = (uint64_t)child->cmn.stack_base_ptr +
child->cmn.adj_stack_size;
newsp = newtop - stackutil;
memcpy((void *)newsp, (const void *)context->sp, stackutil);
/* Was there a frame pointer in place before? */
if (context->fp >= context->sp && context->fp < stacktop)
{
uint64_t frameutil = stacktop - context->fp;
newfp = newtop - frameutil;
}
else
{
newfp = context->fp;
}
/* Update the stack pointer, frame pointer, and volatile registers. When
* the child TCB was initialized, all of the values were set to zero.
* up_initial_state() altered a few values, but the return value in R0
* should be cleared to zero, providing the indication to the newly started
* child thread.
*/
/* make the vfork stack frame */
stack_ptr = (char *)newsp;
#ifdef CONFIG_ARCH_FPU
pfpuctx = STACK_PTR_TO_FRAME(struct fpu_reg, stack_ptr);
child->cmn.xcp.fpu_regs = (uint64_t *)pfpuctx;
memcpy(pfpuctx, &context->fpu, sizeof(struct fpu_reg));
stack_ptr = (char *)pfpuctx;
#endif
pvforkctx = STACK_PTR_TO_FRAME(struct regs_context, stack_ptr);
pvforkctx->regs[REG_X0] = 0;
pvforkctx->regs[REG_X8] = context->regs[VFORK_REG_X8];
pvforkctx->regs[REG_X9] = context->regs[VFORK_REG_X9];
pvforkctx->regs[REG_X10] = context->regs[VFORK_REG_X10];
pvforkctx->regs[REG_X11] = context->regs[VFORK_REG_X11];
pvforkctx->regs[REG_X12] = context->regs[VFORK_REG_X12];
pvforkctx->regs[REG_X13] = context->regs[VFORK_REG_X13];
pvforkctx->regs[REG_X14] = context->regs[VFORK_REG_X14];
pvforkctx->regs[REG_X15] = context->regs[VFORK_REG_X15];
pvforkctx->regs[REG_X16] = context->regs[VFORK_REG_X16];
pvforkctx->regs[REG_X17] = context->regs[VFORK_REG_X17];
pvforkctx->regs[REG_X18] = context->regs[VFORK_REG_X18];
pvforkctx->regs[REG_X19] = context->regs[VFORK_REG_X19];
pvforkctx->regs[REG_X20] = context->regs[VFORK_REG_X20];
pvforkctx->regs[REG_X21] = context->regs[VFORK_REG_X21];
pvforkctx->regs[REG_X22] = context->regs[VFORK_REG_X22];
pvforkctx->regs[REG_X23] = context->regs[VFORK_REG_X23];
pvforkctx->regs[REG_X24] = context->regs[VFORK_REG_X24];
pvforkctx->regs[REG_X25] = context->regs[VFORK_REG_X25];
pvforkctx->regs[REG_X26] = context->regs[VFORK_REG_X26];
pvforkctx->regs[REG_X27] = context->regs[VFORK_REG_X27];
pvforkctx->regs[REG_X28] = context->regs[VFORK_REG_X28];
pvforkctx->regs[REG_X29] = newfp;
pvforkctx->spsr = SPSR_MODE_EL1H;
#ifdef CONFIG_SUPPRESS_INTERRUPTS
pvforkctx->spsr |= (DAIF_IRQ_BIT | DAIF_FIQ_BIT);
#endif /* CONFIG_SUPPRESS_INTERRUPTS */
pvforkctx->elr = (uint64_t)context->lr;
pvforkctx->exe_depth = 0;
pvforkctx->sp_elx = (uint64_t)pvforkctx;
pvforkctx->sp_el0 = (uint64_t)pvforkctx;
pvforkctx->tpidr_el0 = (uint64_t)(&child->cmn);
pvforkctx->tpidr_el1 = (uint64_t)(&child->cmn);
child->cmn.xcp.regs = (uint64_t *)pvforkctx;
/* And, finally, start the child task. On a failure, nxtask_start_vfork()
* will discard the TCB by calling nxtask_abort_vfork().
*/
return nxtask_start_vfork(child);
}

View file

@ -0,0 +1,89 @@
/****************************************************************************
* arch/arm64/src/common/arm64_vfork.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_SRC_COMMON_ARM_VFORK_H
#define __ARCH_ARM_SRC_COMMON_ARM_VFORK_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define VFORK_REG_X0 (0)
#define VFORK_REG_X1 (1)
#define VFORK_REG_X2 (2)
#define VFORK_REG_X3 (3)
#define VFORK_REG_X4 (4)
#define VFORK_REG_X5 (5)
#define VFORK_REG_X6 (6)
#define VFORK_REG_X7 (7)
#define VFORK_REG_X8 (8)
#define VFORK_REG_X9 (9)
#define VFORK_REG_X10 (10)
#define VFORK_REG_X11 (11)
#define VFORK_REG_X12 (12)
#define VFORK_REG_X13 (13)
#define VFORK_REG_X14 (14)
#define VFORK_REG_X15 (15)
#define VFORK_REG_X16 (16)
#define VFORK_REG_X17 (17)
#define VFORK_REG_X18 (18)
#define VFORK_REG_X19 (19)
#define VFORK_REG_X20 (20)
#define VFORK_REG_X21 (21)
#define VFORK_REG_X22 (22)
#define VFORK_REG_X23 (23)
#define VFORK_REG_X24 (24)
#define VFORK_REG_X25 (25)
#define VFORK_REG_X26 (26)
#define VFORK_REG_X27 (27)
#define VFORK_REG_X28 (28)
#define VFORK_REG_FP (29) /* Frame pointer*/
#define VFORK_REG_LR (30) /* Return address*/
#define VFORK_REG_SP (31) /* Stack pointer*/
#ifdef CONFIG_ARCH_FPU
#define VFORK_REGS_SIZE (32 + XCPTCONTEXT_FPU_REGS)
#else
#define VFORK_REGS_SIZE (32)
#endif
#ifndef __ASSEMBLY__
struct vfork_s
{
uint64_t regs[29];
uint64_t fp;
uint64_t lr;
uint64_t sp;
#ifdef CONFIG_ARCH_FPU
struct fpu_reg fpu;
#endif
};
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM_SRC_COMMON_ARM_VFORK_H */

View file

@ -0,0 +1,131 @@
/****************************************************************************
* arch/arm64/src/common/arm64_vfork_func.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arch/syscall.h"
#include "arm64_macro.inc"
#include "arm64_vfork.h"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "arm64_vfork_func.S"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork() and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
* - Allocate and initialize the stack
* - Setup the input parameters for the task.
* - Initialization of the TCB (including call to up_initial_state())
* 4) up_vfork() provides any additional operating context. up_vfork must:
* - Initialize special values in any CPU registers that were not
* already configured by up_initial_state()
* 5) up_vfork() then calls nxtask_start_vfork()
* 6) nxtask_start_vfork() then executes the child thread.
*
* Input Parameters:
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
GTEXT(vfork)
SECTION_FUNC(text, vfork)
/* Create a stack frame */
mov x0, sp /* Save the value of the stack on entry */
stp x29, x30, [sp]
sub sp, sp, #8 * VFORK_REGS_SIZE /* Allocate the structure on the stack */
/* CPU registers, save all register*/
stp x0, x1, [sp, #8 * VFORK_REG_X0]
stp x2, x3, [sp, #8 * VFORK_REG_X2]
stp x4, x5, [sp, #8 * VFORK_REG_X4]
stp x6, x7, [sp, #8 * VFORK_REG_X6]
stp x8, x9, [sp, #8 * VFORK_REG_X8]
stp x10, x11, [sp, #8 * VFORK_REG_X10]
stp x12, x13, [sp, #8 * VFORK_REG_X12]
stp x14, x15, [sp, #8 * VFORK_REG_X14]
stp x16, x17, [sp, #8 * VFORK_REG_X16]
stp x18, x19, [sp, #8 * VFORK_REG_X18]
stp x20, x21, [sp, #8 * VFORK_REG_X20]
stp x22, x23, [sp, #8 * VFORK_REG_X22]
stp x24, x25, [sp, #8 * VFORK_REG_X24]
stp x26, x27, [sp, #8 * VFORK_REG_X26]
stp x28, x29, [sp, #8 * VFORK_REG_X28]
/* Save the LR, stack pointer */
stp x30, x0, [sp, #8 * VFORK_REG_LR]
/* Floating point registers */
#ifdef CONFIG_ARCH_FPU
mov x0, sp
stp x0, x30, [sp, #-16]!
bl arm64_vfork_fpureg_save
ldp x0, x30, [sp], #16
#endif
/* Then, call up_vfork(), passing it a pointer to the stack structure */
mov x0, sp
bl up_vfork
/* Release the stack data and return the value returned by up_vfork */
add sp, sp, #8 * VFORK_REGS_SIZE
ldp x29, x30, [sp]
ret

View file

@ -0,0 +1,67 @@
/****************************************************************************
* arch/arm64/src/common/barriers.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef ___ARCH_ARM64_SRC_COMMON_BARRIERS_H
#define ___ARCH_ARM64_SRC_COMMON_BARRIERS_H
/****************************************************************************
* Included Files
****************************************************************************/
#ifndef __ASSEMBLY__
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* See Arm® Architecture Reference Manual
* ARM DDI 0487E.a C6.2.81
*/
static inline void arm64_dsb(void)
{
__asm__ volatile ("dsb sy" : : : "memory");
}
/* See Arm® Architecture Reference Manual
* ARM DDI 0487E.a C6.2.79
*/
static inline void arm64_dmb(void)
{
__asm__ volatile ("dmb sy" : : : "memory");
}
/* See Arm® Architecture Reference Manual
* ARM DDI 0487E.a C6.2.96
*/
static inline void arm64_isb(void)
{
__asm__ volatile ("isb" : : : "memory");
}
#define ARM64_DSB() arm64_dsb()
#define ARM64_ISB() arm64_isb()
#define ARM64_DMB() arm64_dmb()
#endif /* __ASSEMBLY__ */
#endif /* ___ARCH_ARM64_SRC_COMMON_BARRIERS_H */

View file

@ -0,0 +1,138 @@
/****************************************************************************
* arch/arm64/src/common/crt0.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdlib.h>
#include <nuttx/addrenv.h>
#include <arch/syscall.h>
#ifdef CONFIG_BUILD_KERNEL
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
int main(int argc, char *argv[]);
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: up_signal_handler
*
* Description:
* This function is the user-space, signal handler trampoline function. It
* is called from up_signal_dispatch() in user-mode.
*
* R0-R3, R11 - volatile registers need not be preserved.
* R4-R10 - static registers must be preserved
* R12-R14 - LR and SP must be preserved
*
* Input Parameters:
* R0 = sighand
* The address user-space signal handling function
* R1-R3 = signo, info, and ucontext
* Standard arguments to be passed to the signal handling function.
*
* Returned Value:
* None. This function does not return in the normal sense. It returns
* via the SYS_signal_handler_return (see syscall.h)
*
****************************************************************************/
static void sig_trampoline(void) naked_function;
static void sig_trampoline(void)
{
__asm__ __volatile__
(
" push {lr}\n" /* Save LR on the stack */
" mov ip, r0\n" /* IP=sighand */
" mov r0, r1\n" /* R0=signo */
" mov r1, r2\n" /* R1=info */
" mov r2, r3\n" /* R2=ucontext */
" blx ip\n" /* Call the signal handler */
" pop {r2}\n" /* Recover LR in R2 */
" mov lr, r2\n" /* Restore LR */
" mov r0, %0\n" /* SYS_signal_handler_return */
" svc %1\n" /* Return from the SYSCALL */
::"i"(SYS_signal_handler_return),
"i"(SYS_syscall)
);
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: _start
*
* Description:
* This function is the low level entry point into the main thread of
* execution of a task. It receives initial control when the task is
* started and calls main entry point of the newly started task.
*
* Input Parameters:
* argc - The number of parameters being passed.
* argv - The parameters being passed. These lie in kernel-space memory
* and will have to be reallocated in user-space memory.
*
* Returned Value:
* This function should not return. It should call the user-mode start-up
* main() function. If that function returns, this function will call
* exit.
*
****************************************************************************/
void _start(int argc, char *argv[])
{
int ret;
/* Initialize the reserved area at the beginning of the .bss/.data region
* that is visible to the RTOS.
*/
ARCH_DATA_RESERVE->ar_sigtramp = (addrenv_sigtramp_t)sig_trampoline;
/* Call C++ constructors */
/* Setup so that C++ destructors called on task exit */
/* REVISIT: Missing logic */
/* Call the main() entry point passing argc and argv. */
ret = main(argc, argv);
/* Call exit() if/when the main() returns */
exit(ret);
}
#endif /* CONFIG_BUILD_KERNEL */

View file

@ -0,0 +1,30 @@
#
# For a description of the syntax of this configuration file,
# see the file kconfig-language.txt in the NuttX tools repository.
#
if ARCH_CHIP_QEMU
menu "Qemu Virt Chip Selection"
choice
prompt "Qemu Core Configuration"
default ARCH_CHIP_QEMU_A53
config ARCH_CHIP_QEMU_A53
bool "Qemu virtual Processor (cortex-a53)"
select ARCH_HAVE_MULTICPU
select ARMV8A_HAVE_GICv3
endchoice # Qemu Chip Selection
endmenu # "Qemu Chip Selection"
menu "Qemu Peripheral Selection"
config QEMU_UART_PL011
bool "UART"
default n
select UART1_SERIALDRIVER
endmenu # Qemu Peripheral Selection
endif # ARCH_CHIP_QEMU

View file

@ -0,0 +1,26 @@
############################################################################
# arch/arm64/src/qemu/Make.defs
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
include common/Make.defs
# qemu-specific C source files
CHIP_CSRCS = qemu_boot.c qemu_serial.c
CHIP_ASRCS = qemu_lowputc.S

View file

@ -0,0 +1,42 @@
/****************************************************************************
* arch/arm64/src/qemu/chip.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_QEMU_CHIP_H
#define __ARCH_ARM64_SRC_QEMU_CHIP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#ifndef __ASSEMBLY__
# include <nuttx/arch.h>
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Macro Definitions
****************************************************************************/
#endif /* __ARCH_ARM64_SRC_QEMU_CHIP_H */

View file

@ -0,0 +1,112 @@
/****************************************************************************
* arch/arm64/src/qemu/qemu_boot.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/cache.h>
#ifdef CONFIG_PAGING
# include <nuttx/page.h>
#endif
#include <arch/chip/chip.h>
#ifdef CONFIG_SMP
#include "arm64_smp.h"
#endif
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "arm64_mmu.h"
#include "qemu_boot.h"
#include "qemu_serial.h"
/****************************************************************************
* Private Data
****************************************************************************/
static const struct arm_mmu_region mmu_regions[] =
{
MMU_REGION_FLAT_ENTRY("DEVICE_REGION",
CONFIG_DEVICEIO_BASEADDR, MB(512),
MT_DEVICE_NGNRNE | MT_RW | MT_SECURE),
MMU_REGION_FLAT_ENTRY("DRAM0_S0",
CONFIG_RAMBANK1_ADDR, MB(512),
MT_NORMAL | MT_RW | MT_SECURE),
};
const struct arm_mmu_config mmu_config =
{
.num_regions = ARRAY_SIZE(mmu_regions),
.mmu_regions = mmu_regions,
};
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm64_chip_boot
*
* Description:
* Complete boot operations started in arm64_head.S
*
****************************************************************************/
void arm64_chip_boot(void)
{
/* MAP IO and DRAM, enable MMU. */
arm64_mmu_init(true);
#ifdef CONFIG_SMP
arm64_psci_init("smc");
#endif
/* Perform board-specific device initialization. This would include
* configuration of board specific resources such as GPIOs, LEDs, etc.
*/
qemu_board_initialize();
#ifdef USE_EARLYSERIALINIT
/* Perform early serial initialization if we are going to use the serial
* driver.
*/
qemu_earlyserialinit();
#endif
}
#if defined(CONFIG_NET) && !defined(CONFIG_NETDEV_LATEINIT)
void arm64_netinitialize(void)
{
/* TODO: qemu net support will be support */
}
#endif

View file

@ -0,0 +1,80 @@
/****************************************************************************
* arch/arm64/src/qemu/qemu_boot.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_QEMU_QEMU_BOOT_H
#define __ARCH_ARM64_SRC_QEMU_QEMU_BOOT_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/compiler.h>
#include <sys/types.h>
#include <stdint.h>
#include <stdbool.h>
#include <arch/chip/chip.h>
#include "arm64_internal.h"
#include "arm64_arch.h"
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#ifndef __ASSEMBLY__
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Name: qemu_board_initialize
*
* Description:
* All qemu architectures must provide the following entry point. This
* entry point is called in the initialization phase -- after
* imx_memory_initialize and after all memory has been configured and
* mapped but before any devices have been initialized.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void qemu_board_initialize(void);
void up_lowputc(char c);
void up_low_flush(void);
#undef EXTERN
#if defined(__cplusplus)
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_QEMU_QEMU_BOOT_H */

View file

@ -0,0 +1,101 @@
/****************************************************************************
* arch/arm64/src/qemu/qemu_lowputc.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************
*
* DESCRIPTION
* Wrapper for early printk
*
***************************************************************************/
#include <nuttx/config.h>
#include "arm64_macro.inc"
/****************************************************************************
* Public Symbols
****************************************************************************/
.file "qemu_lowputc.S"
/****************************************************************************
* Assembly Macros
****************************************************************************/
/* 32-bit register definition for qemu pl011 uart */
#define UART1_BASE_ADDRESS 0x9000000
#define EARLY_UART_PL011_BAUD_RATE 115200
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/* PL011 UART initialization
* xb: register which contains the UART base address
* c: scratch register number
*/
GTEXT(up_earlyserialinit)
SECTION_FUNC(text, up_earlyserialinit)
ldr x15, =UART1_BASE_ADDRESS
mov x0, #(7372800 / EARLY_UART_PL011_BAUD_RATE % 16)
strh w0, [x15, #0x28] /* -> UARTFBRD (Baud divisor fraction) */
mov x0, #(7372800 / EARLY_UART_PL011_BAUD_RATE / 16)
strh w0, [x15, #0x24] /* -> UARTIBRD (Baud divisor integer) */
mov x0, #0x60 /* 8n1 */
str w0, [x15, #0x2C] /* -> UARTLCR_H (Line control) */
ldr x0, =0x00000301 /* RXE | TXE | UARTEN */
str w0, [x15, #0x30] /* -> UARTCR (Control Register) */
ret
/* PL011 UART wait UART to be ready to transmit
* xb: register which contains the UART base address
* c: scratch register number
*/
.macro early_uart_ready xb, wt
1:
ldrh \wt, [\xb, #0x18] /* <- UARTFR (Flag register) */
tst \wt, #0x8 /* Check BUSY bit */
b.ne 1b /* Wait for the UART to be ready */
.endm
/* PL011 UART transmit character
* xb: register which contains the UART base address
* wt: register which contains the character to transmit
*/
.macro early_uart_transmit xb, wt
strb \wt, [\xb] /* -> UARTDR (Data Register) */
.endm
/* Print a character on the UART - this function is called by C
* x0: character to print
*/
GTEXT(up_lowputc)
SECTION_FUNC(text, up_lowputc)
ldr x15, =UART1_BASE_ADDRESS
early_uart_ready x15, w2
early_uart_transmit x15, w0
ret

View file

@ -0,0 +1,878 @@
/***************************************************************************
* arch/arm64/src/qemu/qemu_serial.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
***************************************************************************/
/***************************************************************************
* Included Files
***************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <stdbool.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <debug.h>
#ifdef CONFIG_SERIAL_TERMIOS
# include <termios.h>
#endif
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/spinlock.h>
#include <nuttx/init.h>
#include <nuttx/fs/ioctl.h>
#include <nuttx/semaphore.h>
#include <nuttx/serial/serial.h>
#include "arm64_arch.h"
#include "arm64_internal.h"
#include "qemu_serial.h"
#include "arm64_arch_timer.h"
#include "qemu_boot.h"
#include "arm64_gic.h"
#ifdef USE_SERIALDRIVER
/***************************************************************************
* Pre-processor Definitions
***************************************************************************/
/* Which UART with be tty0/console and which tty1-4? The console will
* always be ttyS0. If there is no console then will use the lowest
* numbered UART.
*/
/* First pick the console and ttys0. This could be any of UART1-5 */
#if defined(CONFIG_UART1_SERIAL_CONSOLE)
# define CONSOLE_DEV g_uart1port /* UART1 is console */
# define TTYS0_DEV g_uart1port /* UART1 is ttyS0 */
# define UART1_ASSIGNED 1
#endif
#define PL011_BIT_MASK(x, y) (((2 << (x)) - 1) << (y))
/* PL011 Uart Flags Register */
#define PL011_FR_CTS BIT(0) /* clear to send - inverted */
#define PL011_FR_DSR BIT(1) /* data set ready - inverted
*/
#define PL011_FR_DCD BIT(2) /* data carrier detect -
* inverted */
#define PL011_FR_BUSY BIT(3) /* busy transmitting data */
#define PL011_FR_RXFE BIT(4) /* receive FIFO empty */
#define PL011_FR_TXFF BIT(5) /* transmit FIFO full */
#define PL011_FR_RXFF BIT(6) /* receive FIFO full */
#define PL011_FR_TXFE BIT(7) /* transmit FIFO empty */
#define PL011_FR_RI BIT(8) /* ring indicator - inverted */
/* PL011 Integer baud rate register */
#define PL011_IBRD_BAUD_DIVINT_MASK 0xff /* 16 bits of divider */
/* PL011 Fractional baud rate register */
#define PL011_FBRD_BAUD_DIVFRAC 0x3f
#define PL011_FBRD_WIDTH 6u
/* PL011 Receive status register / error clear register */
#define PL011_RSR_ECR_FE BIT(0) /* framing error */
#define PL011_RSR_ECR_PE BIT(1) /* parity error */
#define PL011_RSR_ECR_BE BIT(2) /* break error */
#define PL011_RSR_ECR_OE BIT(3) /* overrun error */
#define PL011_RSR_ERROR_MASK (PL011_RSR_ECR_FE | PL011_RSR_ECR_PE | \
PL011_RSR_ECR_BE | PL011_RSR_ECR_OE)
/* PL011 Line Control Register */
#define PL011_LCRH_BRK BIT(0) /* send break */
#define PL011_LCRH_PEN BIT(1) /* enable parity */
#define PL011_LCRH_EPS BIT(2) /* select even parity */
#define PL011_LCRH_STP2 BIT(3) /* select two stop bits */
#define PL011_LCRH_FEN BIT(4) /* enable FIFOs */
#define PL011_LCRH_WLEN_SHIFT 5 /* word length */
#define PL011_LCRH_WLEN_WIDTH 2
#define PL011_LCRH_SPS BIT(7) /* stick parity bit */
#define PL011_LCRH_WLEN_SIZE(x) ((x) - 5)
#define PL011_LCRH_FORMAT_MASK (PL011_LCRH_PEN | PL011_LCRH_EPS | \
PL011_LCRH_SPS | \
PL011_BIT_MASK(PL011_LCRH_WLEN_WIDTH, \
PL011_LCRH_WLEN_SHIFT))
#define PL011_LCRH_PARTIY_EVEN (PL011_LCRH_PEN | PL011_LCRH_EPS)
#define PL011_LCRH_PARITY_ODD (PL011_LCRH_PEN)
#define PL011_LCRH_PARITY_NONE (0)
/* PL011 Control Register */
#define PL011_CR_UARTEN BIT(0) /* enable uart operations */
#define PL011_CR_SIREN BIT(1) /* enable IrDA SIR */
#define PL011_CR_SIRLP BIT(2) /* IrDA SIR low power mode */
#define PL011_CR_LBE BIT(7) /* loop back enable */
#define PL011_CR_TXE BIT(8) /* transmit enable */
#define PL011_CR_RXE BIT(9) /* receive enable */
#define PL011_CR_DTR BIT(10) /* data transmit ready */
#define PL011_CR_RTS BIT(11) /* request to send */
#define PL011_CR_Out1 BIT(12)
#define PL011_CR_Out2 BIT(13)
#define PL011_CR_RTSEn BIT(14) /* RTS hw flow control enable
*/
#define PL011_CR_CTSEn BIT(15) /* CTS hw flow control enable
*/
/* PL011 Interrupt Fifo Level Select Register */
#define PL011_IFLS_TXIFLSEL_SHIFT 0 /* bits 2:0 */
#define PL011_IFLS_TXIFLSEL_WIDTH 3
#define PL011_IFLS_RXIFLSEL_SHIFT 3 /* bits 5:3 */
#define PL011_IFLS_RXIFLSEL_WIDTH 3
/* PL011 Interrupt Mask Set/Clear Register */
#define PL011_IMSC_RIMIM BIT(0) /* RTR modem interrupt mask */
#define PL011_IMSC_CTSMIM BIT(1) /* CTS modem interrupt mask */
#define PL011_IMSC_DCDMIM BIT(2) /* DCD modem interrupt mask */
#define PL011_IMSC_DSRMIM BIT(3) /* DSR modem interrupt mask */
#define PL011_IMSC_RXIM BIT(4) /* receive interrupt mask */
#define PL011_IMSC_TXIM BIT(5) /* transmit interrupt mask */
#define PL011_IMSC_RTIM BIT(6) /* receive timeout interrupt
* mask */
#define PL011_IMSC_FEIM BIT(7) /* framing error interrupt
* mask */
#define PL011_IMSC_PEIM BIT(8) /* parity error interrupt mask
*/
#define PL011_IMSC_BEIM BIT(9) /* break error interrupt mask
*/
#define PL011_IMSC_OEIM BIT(10) /* overrun error interrupt
* mask */
#define PL011_IMSC_ERROR_MASK (PL011_IMSC_FEIM | \
PL011_IMSC_PEIM | PL011_IMSC_BEIM | \
PL011_IMSC_OEIM)
#define PL011_IMSC_MASK_ALL (PL011_IMSC_OEIM | PL011_IMSC_BEIM | \
PL011_IMSC_PEIM | PL011_IMSC_FEIM | \
PL011_IMSC_RIMIM | \
PL011_IMSC_CTSMIM | \
PL011_IMSC_DCDMIM | \
PL011_IMSC_DSRMIM | \
PL011_IMSC_RXIM | PL011_IMSC_TXIM | \
PL011_IMSC_RTIM)
/***************************************************************************
* Private Types
***************************************************************************/
/* UART PL011 register map structure */
struct pl011_regs
{
uint32_t dr; /* data register */
union
{
uint32_t rsr;
uint32_t ecr;
};
uint32_t reserved_0[4];
uint32_t fr; /* flags register */
uint32_t reserved_1;
uint32_t ilpr;
uint32_t ibrd;
uint32_t fbrd;
uint32_t lcr_h;
uint32_t cr;
uint32_t ifls;
uint32_t imsc;
uint32_t ris;
uint32_t mis;
uint32_t icr;
uint32_t dmacr;
};
struct pl011_config
{
volatile struct pl011_regs *uart;
uint32_t sys_clk_freq;
};
/* Device data structure */
struct pl011_data
{
uint32_t baud_rate;
bool sbsa;
};
struct pl011_uart_port_s
{
struct pl011_data data;
struct pl011_config config;
unsigned int irq_num;
bool is_console;
};
/***************************************************************************
* Private Functions
***************************************************************************/
static void pl011_enable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->cr |= PL011_CR_UARTEN;
}
static void pl011_disable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->cr &= ~PL011_CR_UARTEN;
}
static void pl011_enable_fifo(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->lcr_h |= PL011_LCRH_FEN;
}
static void pl011_disable_fifo(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->lcr_h &= ~PL011_LCRH_FEN;
}
static int pl011_set_baudrate(const struct pl011_uart_port_s *sport,
uint32_t clk, uint32_t baudrate)
{
const struct pl011_config *config = &sport->config;
/* Avoiding float calculations, bauddiv is left shifted by 6 */
uint64_t bauddiv =
(((uint64_t)clk) << PL011_FBRD_WIDTH) / (baudrate * 16U);
/* Valid bauddiv value
* uart_clk (min) >= 16 x baud_rate (max)
* uart_clk (max) <= 16 x 65535 x baud_rate (min)
*/
if ((bauddiv < (1U << PL011_FBRD_WIDTH)) ||
(bauddiv > (65535U << PL011_FBRD_WIDTH)))
{
return -EINVAL;
}
config->uart->ibrd = bauddiv >> PL011_FBRD_WIDTH;
config->uart->fbrd = bauddiv & ((1U << PL011_FBRD_WIDTH) - 1U);
ARM64_DMB();
/* In order to internally update the contents of ibrd or fbrd, a
* lcr_h write must always be performed at the end
* ARM DDI 0183F, Pg 3-13
*/
config->uart->lcr_h = config->uart->lcr_h;
return 0;
}
static void pl011_irq_tx_enable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->imsc |= PL011_IMSC_TXIM;
}
static void pl011_irq_tx_disable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->imsc &= ~PL011_IMSC_TXIM;
}
static void pl011_irq_rx_enable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->imsc |= PL011_IMSC_RXIM | PL011_IMSC_RTIM;
}
static void pl011_irq_rx_disable(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
config->uart->imsc &= ~(PL011_IMSC_RXIM | PL011_IMSC_RTIM);
}
static int pl011_irq_tx_complete(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
/* check for TX FIFO empty */
return config->uart->fr & PL011_FR_TXFE;
}
static int pl011_irq_rx_ready(const struct pl011_uart_port_s *sport)
{
const struct pl011_config *config = &sport->config;
const struct pl011_data *data = &sport->data;
if (!data->sbsa && !(config->uart->cr & PL011_CR_RXE))
{
return false;
}
return (config->uart->imsc & PL011_IMSC_RXIM) &&
(!(config->uart->fr & PL011_FR_RXFE));
}
/***************************************************************************
* Name: qemu_pl011_txready
*
* Description:
* Return true if the tranmsit fifo is not full
*
***************************************************************************/
static bool qemu_pl011_txready(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
const struct pl011_config *config = &sport->config;
struct pl011_data *data = &sport->data;
if (!data->sbsa && !(config->uart->cr & PL011_CR_TXE))
{
return false;
}
return (config->uart->imsc & PL011_IMSC_TXIM) &&
pl011_irq_tx_complete(sport);
}
/***************************************************************************
* Name: qemu_pl011_txempty
*
* Description:
* Return true if the transmit fifo is empty
*
***************************************************************************/
static bool qemu_pl011_txempty(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
return pl011_irq_tx_complete(sport);
}
/***************************************************************************
* Name: qemu_pl011_send
*
* Description:
* This method will send one byte on the UART
*
***************************************************************************/
static void qemu_pl011_send(struct uart_dev_s *dev, int ch)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
const struct pl011_config *config = &sport->config;
config->uart->dr = ch;
}
/***************************************************************************
* Name: qemu_pl011_rxavailable
*
* Description:
* Return true if the receive fifo is not empty
*
***************************************************************************/
static bool qemu_pl011_rxavailable(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
const struct pl011_config *config = &sport->config;
struct pl011_data *data = &sport->data;
if (!data->sbsa &&
(!(config->uart->cr & PL011_CR_UARTEN) ||
!(config->uart->cr & PL011_CR_RXE)))
{
return false;
}
return (config->uart->fr & PL011_FR_RXFE) == 0U;
}
/***************************************************************************
* Name: qemu_pl011_rxint
*
* Description:
* Call to enable or disable RX interrupts
*
***************************************************************************/
static void qemu_pl011_rxint(struct uart_dev_s *dev, bool enable)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
if (enable)
{
pl011_irq_rx_enable(sport);
}
else
{
pl011_irq_rx_disable(sport);
}
}
/***************************************************************************
* Name: qemu_pl011_txint
*
* Description:
* Call to enable or disable TX interrupts
*
***************************************************************************/
static void qemu_pl011_txint(struct uart_dev_s *dev, bool enable)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
if (enable)
{
pl011_irq_tx_enable(sport);
}
else
{
pl011_irq_tx_disable(sport);
}
}
/***************************************************************************
* Name: qemu_pl011_receive
*
* Description:
* Called (usually) from the interrupt level to receive one
* character from the UART. Error bits associated with the
* receipt are provided in the return 'status'.
*
***************************************************************************/
static int qemu_pl011_receive(struct uart_dev_s *dev, unsigned int *status)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
const struct pl011_config *config = &sport->config;
unsigned int rx;
rx = config->uart->dr;
*status = 0;
return rx;
}
/***************************************************************************
* Name: qemu_pl011_ioctl
*
* Description:
* All ioctl calls will be routed through this method
* for current qemu configure,
*
***************************************************************************/
static int qemu_pl011_ioctl(struct file *filep, int cmd, unsigned long arg)
{
int ret = OK;
UNUSED(filep);
UNUSED(arg);
switch (cmd)
{
case TIOCSBRK: /* BSD compatibility: Turn break on, unconditionally */
case TIOCCBRK: /* BSD compatibility: Turn break off, unconditionally */
default:
{
ret = -ENOTTY;
break;
}
}
return ret;
}
/***************************************************************************
* Name: qemu_pl011_irq_handler (and front-ends)
*
* Description:
* This is the common UART interrupt handler. It should cal
* uart_transmitchars or uart_receivechar to perform the appropriate data
* transfers.
*
***************************************************************************/
static int qemu_pl011_irq_handler(int irq, void *context, void *arg)
{
struct uart_dev_s *dev = (struct uart_dev_s *)arg;
struct pl011_uart_port_s *sport;
UNUSED(irq);
UNUSED(context);
DEBUGASSERT(dev != NULL && dev->priv != NULL);
sport = (struct pl011_uart_port_s *)dev->priv;
if (pl011_irq_rx_ready(sport))
{
uart_recvchars(dev);
}
if (qemu_pl011_txready(dev))
{
uart_xmitchars(dev);
}
return OK;
}
/***************************************************************************
* Name: qemu_pl011_detach
*
* Description:
* Detach UART interrupts. This method is called when the serial port is
* closed normally just before the shutdown method is called. The
* exception is the serial console which is never shutdown.
*
***************************************************************************/
static void qemu_pl011_detach(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
up_disable_irq(sport->irq_num);
irq_detach(sport->irq_num);
}
/***************************************************************************
* Name: qemu_pl011_attach
*
* Description:
* Configure the UART to operation in interrupt driven mode.
* This method is called when the serial port is opened.
* Normally, this is just after the setup() method is called,
* however, the serial console may operate in
* a non-interrupt driven mode during the boot phase.
*
* RX and TX interrupts are not enabled when by the attach method
* (unless the hardware supports multiple levels of interrupt
* enabling). The RX and TX interrupts are not enabled until
* the txint() and rxint() methods are called.
*
***************************************************************************/
static int qemu_pl011_attach(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport;
struct pl011_data *data;
int ret;
sport = (struct pl011_uart_port_s *)dev->priv;
data = &sport->data;
ret = irq_attach(sport->irq_num, qemu_pl011_irq_handler, dev);
arm64_gic_irq_set_priority(sport->irq_num, IRQ_TYPE_LEVEL, 0);
if (ret == OK)
{
up_enable_irq(sport->irq_num);
}
else
{
sinfo("error ret=%d\n", ret);
}
if (!data->sbsa)
{
pl011_enable(sport);
}
return ret;
}
/***************************************************************************
* Name: qemu_pl011_shutdown
*
* Description:
* Disable the UART. This method is called when the serial
* port is closed
*
***************************************************************************/
static void qemu_pl011_shutdown(struct uart_dev_s *dev)
{
UNUSED(dev);
sinfo("%s: call unexpected\n", __func__);
}
static int qemu_pl011_setup(struct uart_dev_s *dev)
{
struct pl011_uart_port_s *sport = (struct pl011_uart_port_s *)dev->priv;
const struct pl011_config *config = &sport->config;
struct pl011_data *data = &sport->data;
int ret;
uint32_t lcrh;
irqstate_t i_flags;
i_flags = up_irq_save();
/* If working in SBSA mode, we assume that UART is already configured,
* or does not require configuration at all (if UART is emulated by
* virtualization software).
*/
if (!data->sbsa)
{
/* disable the uart */
pl011_disable(sport);
pl011_disable_fifo(sport);
/* Set baud rate */
ret = pl011_set_baudrate(sport, config->sys_clk_freq,
data->baud_rate);
if (ret != 0)
{
up_irq_restore(i_flags);
return ret;
}
/* Setting the default character format */
lcrh = config->uart->lcr_h & ~(PL011_LCRH_FORMAT_MASK);
lcrh &= ~(BIT(0) | BIT(7));
lcrh |= PL011_LCRH_WLEN_SIZE(8) << PL011_LCRH_WLEN_SHIFT;
config->uart->lcr_h = lcrh;
/* Enabling the FIFOs */
pl011_enable_fifo(sport);
}
/* initialize all IRQs as masked */
config->uart->imsc = 0U;
config->uart->icr = PL011_IMSC_MASK_ALL;
if (!data->sbsa)
{
config->uart->dmacr = 0U;
ARM64_ISB();
config->uart->cr &= ~(BIT(14) | BIT(15) | BIT(1));
config->uart->cr |= PL011_CR_RXE | PL011_CR_TXE;
ARM64_ISB();
}
up_irq_restore(i_flags);
return 0;
}
/***************************************************************************
* Private Data
***************************************************************************/
/* Serial driver UART operations */
static const struct uart_ops_s g_uart_ops =
{
.setup = qemu_pl011_setup,
.shutdown = qemu_pl011_shutdown,
.attach = qemu_pl011_attach,
.detach = qemu_pl011_detach,
.ioctl = qemu_pl011_ioctl,
.receive = qemu_pl011_receive,
.rxint = qemu_pl011_rxint,
.rxavailable = qemu_pl011_rxavailable,
#ifdef CONFIG_SERIAL_IFLOWCONTROL
.rxflowcontrol = NULL,
#endif
.send = qemu_pl011_send,
.txint = qemu_pl011_txint,
.txready = qemu_pl011_txready,
.txempty = qemu_pl011_txempty,
};
/* This describes the state of the uart1 port. */
static struct pl011_uart_port_s g_uart1priv =
{
.data =
{
.baud_rate = CONFIG_UART1_BAUD,
.sbsa = false,
},
.config =
{
.uart = (volatile struct pl011_regs *)CONFIG_QEMU_UART_BASE,
.sys_clk_freq = 24000000,
},
.irq_num = CONFIG_QEMU_UART_IRQ,
.is_console = 1,
};
/* I/O buffers */
#ifdef CONFIG_QEMU_UART_PL011
static char g_uart1rxbuffer[CONFIG_UART1_RXBUFSIZE];
static char g_uart1txbuffer[CONFIG_UART1_TXBUFSIZE];
static struct uart_dev_s g_uart1port =
{
.recv =
{
.size = CONFIG_UART1_RXBUFSIZE,
.buffer = g_uart1rxbuffer,
},
.xmit =
{
.size = CONFIG_UART1_TXBUFSIZE,
.buffer = g_uart1txbuffer,
},
.ops = &g_uart_ops,
.priv = &g_uart1priv,
};
#endif
/***************************************************************************
* Public Functions
***************************************************************************/
/***************************************************************************
* Name: qemu_earlyserialinit
*
* Description:
* Performs the low level UART initialization early in
* debug so that the serial console will be available
* during bootup. This must be called before arm_serialinit.
*
***************************************************************************/
void qemu_earlyserialinit(void)
{
/* NOTE: This function assumes that low level hardware configuration
* -- including all clocking and pin configuration -- was performed by the
* function imx8_lowsetup() earlier in the boot sequence.
*/
/* Enable the console UART. The other UARTs will be initialized if and
* when they are first opened.
*/
#ifdef CONSOLE_DEV
CONSOLE_DEV.isconsole = true;
qemu_pl011_setup(&CONSOLE_DEV);
#endif
}
/* Used to assure mutually exclusive access up_putc() */
/* static sem_t g_putc_lock = SEM_INITIALIZER(1); */
/***************************************************************************
* Name: up_putc
*
* Description:
* Provide priority, low-level access to support OS debug
* writes
*
***************************************************************************/
int up_putc(int ch)
{
/* Check for LF */
if (ch == '\n')
{
/* Add CR */
up_lowputc('\r');
}
up_lowputc((uint8_t)ch);
return ch;
}
/***************************************************************************
* Name: arm64_serialinit
*
* Description:
* Register serial console and serial ports. This assumes
* that imx_earlyserialinit was called previously.
*
***************************************************************************/
void arm64_serialinit(void)
{
int ret;
ret = uart_register("/dev/console", &CONSOLE_DEV);
if (ret < 0)
{
sinfo("error at register dev/console, ret =%d\n", ret);
}
ret = uart_register("/dev/ttyS0", &TTYS0_DEV);
if (ret < 0)
{
sinfo("error at register dev/ttyS0, ret =%d\n", ret);
}
}
#else /* USE_SERIALDRIVER */
/***************************************************************************
* Public Functions
***************************************************************************/
int up_putc(int ch)
{
/* Check for LF */
if (ch == '\n')
{
/* Add CR */
up_lowputc('\r');
}
up_lowputc((uint8_t)ch);
return ch;
}
#endif /* USE_SERIALDRIVER */

View file

@ -0,0 +1,75 @@
/****************************************************************************
* arch/arm64/src/qemu/qemu_serial.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM64_SRC_QEMU_QEMU_SERIAL_H
#define __ARCH_ARM64_SRC_QEMU_QEMU_SERIAL_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "arm64_internal.h"
#include "arm64_gic.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Inline Functions
****************************************************************************/
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Data
****************************************************************************/
#ifdef CONFIG_ARCH_CHIP_QEMU_A53
#define CONFIG_QEMU_UART_BASE 0x9000000
#define CONFIG_QEMU_UART_IRQ (GIC_SPI_INT_BASE + 1)
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: qemu_earlyserialinit
*
* Description:
* Performs the low level UART initialization early in debug so that the
* serial console will be available during bootup. This must be called
* before arm_serialinit.
*
****************************************************************************/
#ifdef USE_EARLYSERIALINIT
void qemu_earlyserialinit(void);
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM64_SRC_QEMU_QEMU_SERIAL_H */

View file

@ -1606,6 +1606,15 @@ config ARCH_BOARD_SABRE_6QUAD
This options selects support for NuttX on the NXP/Freescale Sabre
board featuring the iMX 6Quad CPU.
config ARCH_BOARD_QEMU_A53
bool "Qemu A53 board"
depends on ARCH_CHIP_QEMU_A53
select ARCH_HAVE_IRQBUTTONS
---help---
This options selects support for NuttX on the QEMU A53
board featuring the qemu a53 CPU.
config ARCH_BOARD_SAMA5D2_XULT
bool "Atmel SAMA5D2 Xplained Ultra development board"
depends on ARCH_CHIP_ATSAMA5D27
@ -2648,6 +2657,7 @@ config ARCH_BOARD
default "s32k146evb" if ARCH_BOARD_S32K146EVB
default "s32k148evb" if ARCH_BOARD_S32K148EVB
default "sabre-6quad" if ARCH_BOARD_SABRE_6QUAD
default "qemu-a53" if ARCH_BOARD_QEMU_A53
default "sama5d2-xult" if ARCH_BOARD_SAMA5D2_XULT
default "giant-board" if ARCH_BOARD_GIANT_BOARD
default "sama5d3x-ek" if ARCH_BOARD_SAMA5D3X_EK
@ -2820,6 +2830,9 @@ endif
if ARCH_BOARD_SABRE_6QUAD
source "boards/arm/imx6/sabre-6quad/Kconfig"
endif
if ARCH_BOARD_QEMU_A53
source "boards/arm64/qemu/qemu-a53/Kconfig"
endif
if ARCH_BOARD_IMXRT1020_EVK
source "boards/arm/imxrt/imxrt1020-evk/Kconfig"
endif

View file

@ -0,0 +1,7 @@
#
# For a description of the syntax of this configuration file,
# see the file kconfig-language.txt in the NuttX tools repository.
#
if ARCH_BOARD_QEMU_A53
endif

View file

@ -0,0 +1,312 @@
README.txt
==========
This board configuration will use QEMU to emulate a generic Cortex-A53
hardware platform and provides support for these devices:
- GICv3 interrupt controller
- ARM Generic Timer
- PL011 UART controller
Contents
========
- Getting Started
- Status
- Platform Features
- Debugging with QEMU
- FPU Support and Performance
- SMP Support
- References
Getting Started
===============
1. Compile Toolchain
1.1 Host environment
GNU/Linux: Ubuntu 18.04 or greater
1.2 Download and Install
$ wget https://developer.arm.com/-/media/Files/downloads/gnu/11.2-2022.02/binrel/gcc-arm-11.2-2022.02-x86_64-aarch64-none-elf.tar.xz
$ xz -d gcc-arm-11.2-2022.02-x86_64-aarch64-none-elf.tar.xz
$ tar xf gcc-arm-11.2-2022.02-x86_64-aarch64-none-elf.tar
Put gcc-arm-11.2-2022.02-x86_64-aarch64-none-elf/bin/ to your host PATH environment variable, like:
$ export PATH=$PATH:/opt/software/arm/linaro-toolchain/gcc-arm-11.2-2022.02-x86_64-aarch64-none-elf/bin
check the toolchain:
$ aarch64-none-elf-gcc -v
2. Install QEMU
In Ubuntu 18.04(or greater), install qemu:
$ sudo apt-get install qemu-system-arm qemu-efi-aarch64 qemu-utils
And make sure install is properly:
$ qemu-system-aarch64 --help
3. Configuring and running
3.1 Single Core
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-a53:nsh
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
3.2 SMP
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-a53:nsh_smp
$ make
Runing with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
Note:
1. Make sure the aarch64-none-elf toolchain install PATH has been added to environment variable
2. To quit QEMU, type Ctrl + X
3. Nuttx default core number is 4, and Changing CONFIG_SMP_NCPUS > 4 and setting qemu command
option -smp will boot more core. For qemu, core limit is 32.
Status
======
2022-07-01:
1. It's very stranger to see that signal testing of ostest is PASSED at Physical Ubuntu PC
rather than an Ubuntu at VMWare. For Physical Ubuntu PC, I have run the ostest
for 10 times at least but never see the crash again, but it's almost crashed every time
running the ostest at Virtual Ubuntu in VMWare
I check the fail point. It's seem at signal routine to access another CPU's task context reg
will get a NULL pointer, but I watch the task context with GDB, everything is OK.
So maybe this is a SMP cache synchronize issue? But I have done cache synchronize
operation at thread switch and how to explain why the crash not happening at
Physical Ubuntu PC?
So maybe this is a qemu issue at VMWare. I am planning to run
the arm64 to real hardware platform like IMX8 and will check the issue again
2022-06-12:
1. SMP is support at QEMU. Add psci interface, armv8 cache operation(data cache)
and smccc support. The system can run into nsh shell, SMP test is PASSED, but
ostest crash at signal testing
2022-05-22:
Arm64 support version for NuttX is Ready, These Features supported:
1.Cotex-a53 single core support: With the supporting of GICv3,
Arch timer, PL101 UART, The system can run into nsh shell.
Running ostest seem PASSED.
2.qemu-a53 board configuration support: qemu-a53 board can configuring
and compiling, And runing with qemu-system-aarch64
at Ubuntu 18.04.
3.FPU support for armv8-a: FPU context switching in NEON/floating-point
TRAP was supported. FPU registers saving at vfork and independent
FPU context for signal routine was considered but more testing
needs to be do.
Platform Features
=================
The following hardware features are supported:
+--------------+------------+----------------------+
| Interface | Controller | Driver/Component |
+==============+============+======================+
| GIC | on-chip | interrupt controller |
+--------------+------------+----------------------+
| PL011 UART | on-chip | serial port |
+--------------+------------+----------------------+
| ARM TIMER | on-chip | system clock |
+--------------+------------+----------------------+
The kernel currently does not support other hardware features on this
qemu platform.
Debugging with QEMU
===================
The nuttx ELF image can be debugged with QEMU.
1. To debug the nuttx (ELF) with symbols, make sure the following change have
applied to defconfig.
+CONFIG_DEBUG_SYMBOLS=y
2. Run QEMU(at shell terminal 1)
Single Core
$ qemu-system-aarch64 -cpu cortex-a53 -nographic -machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline \
-kernel ./nuttx -S -s
SMP
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline \
-kernel ./nuttx -S -s
3. Run gdb with TUI, connect to QEMU, load nuttx and continue (at shell terminal 2)
$ aarch64-none-elf-gdb -tui --eval-command='target remote localhost:1234' nuttx
(gdb) set debug aarch64
(gdb) c
Continuing.
^C
Program received signal SIGINT, Interrupt.
arch_cpu_idle () at common/arm64_cpu_idle.S:37
(gdb)
(gdb) where
#0 arch_cpu_idle () at common/arm64_cpu_idle.S:37
#1 0x00000000402823ec in nx_start () at init/nx_start.c:742
#2 0x0000000040280148 in arm64_boot_primary_c_routine () at common/arm64_boot.c:184
#3 0x00000000402a5bf8 in switch_el () at common/arm64_head.S:201
(gdb)
SMP Case
Thread 1 received signal SIGINT, Interrupt.
arch_cpu_idle () at common/arm64_cpu_idle.S:37
(gdb) info threads
Id Target Id Frame
* 1 Thread 1 (CPU#0 [halted ]) arch_cpu_idle () at common/arm64_cpu_idle.S:37
2 Thread 2 (CPU#1 [halted ]) arch_cpu_idle () at common/arm64_cpu_idle.S:37
3 Thread 3 (CPU#2 [halted ]) arch_cpu_idle () at common/arm64_cpu_idle.S:37
4 Thread 4 (CPU#3 [halted ]) arch_cpu_idle () at common/arm64_cpu_idle.S:37
(gdb)
Note:
1. it will make your debugging more easier in source level if you setting
CONFIG_DEBUG_FULLOPT=n. but there is a risk of stack overflow when the
option is disabled. Just enlarging your stack size will avoid the
issue (eg. enlarging CONFIG_DEFAULT_TASK_STACKSIZE)
2. TODO: ARMv8-A Supporting for tools/nuttx-gdbinit
FPU Support and Performance
===========================
I was using FPU trap to handle FPU context switch. For threads accessing
the FPU (FPU instructions or registers), a trap will happen at this thread,
the FPU context will be saved/restore for the thread at the trap handler.
It will improve performance for thread switch since it's not to save/restore
the FPU context (almost 512 bytes) at the thread switch anymore. But some issue
need to be considered:
1. Floating point argument passing issue
In many cases, the FPU trap is triggered by va_start() that copies
the content of FP registers used for floating point argument passing
into the va_list object in case there were actual float arguments from
the caller. But In practice this is almost never the case.
Seeing the save_count/restore_count at the g_cpu_fpu_ctx, which will
be increase when saving/restoring FPU context. After runing ostest,
we can see the count with GDB:
(gdb) p g_cpu_fpu_ctx
$1 = {{fpu_owner = 0x0, idle_thread = 0x402b3110 <g_idletcb>,
save_count = 1293, restore_count = 2226, switch_count = 4713,
exe_depth_count = 0}}
(gdb)
adding -mgeneral-regs-only option will make compiler not use the FPU
register, we can use the following patch to syslog:
diff --git a/libs/libc/syslog/Make.defs b/libs/libc/syslog/Make.defs
index c58fb45512..acac6febaa
--- a/libs/libc/syslog/Make.defs
+++ b/libs/libc/syslog/Make.defs
@@ -26,3 +26,4 @@ CSRCS += lib_syslog.c lib_setlogmask.c
DEPPATH += --dep-path syslog
VPATH += :syslog
+syslog/lib_syslog.c_CFLAGS += -mgeneral-regs-only
With the option to make NuttX and booting. After runing ostest, see
the count with GDB again:
(gdb) p g_cpu_fpu_ctx
$1 = {{fpu_owner = 0x0, idle_thread = 0x402b3110 <g_idletcb>, save_count = 141,
restore_count = 170, switch_count = 4715, exe_depth_count = 0}}
(gdb)
it's only 141/170 for saving/restoring FPU context, which is 1293/2226 before
add this compile option. Almost all of FPU accessing switch is argument passing
at the syslog.
I cannot commit the patch for NuttX mainline because it's very special case
since ostest is using syslog for lots of information printing. but this is
a clue for FPU performance analysis. va_list object is using for many C code to
handle argument passing, but if it's not passing floating point argument indeed.
Add the option to your code maybe increase FPU performance
2. FPU trap at IRQ handler
it's probably need to handle FPU trap at IRQ routine. Exception_depth is
handling for this case, it will inc/dec at enter/leave exception. If the
exception_depth > 1, that means an exception occurring when another exception
is executing, the present implement is to switch FPU context to idle thread,
it will handle most case for calling printf-like rountine at IRQ routine.
But in fact, this case will make uncertainty interrupt processing time sine
it's uncertainty for trap exception handling. It would be best to add
"-mgeneral-regs-only" option to compile the IRQ code avoiding accessing FP
register.
if it's necessarily for the exception routine to use FPU, calling function to
save/restore FPU context directly maybe become a solution. Linux kernel introduce
kernel_neon_begin/kernel_neon_end function for this case. Similar function will
be add to NuttX if this issue need to be handle.
SMP Support
===========
1. Booting
Primary core call sequence
arm64_start
->arm64_boot_primary_c_routine
->arm64_chip_boot
->set init TBBR and Enable MMU
->nx_start
->OS component initialize
->Initialize GIC: GICD and Primary core GICR
->nx_smp_start
for every CPU core
->up_cpu_start
->arm64_start_cpu(call PCSI to boot CPU)
->waiting for every core to boot
->nx_bringup
Secondary Core call sequence
arm64_start
->arm64_boot_secondary_c_routine
->Enable MMU
->Initialize GIC: Secondary core GICR
->Notify Primary core booting is Ready
->nx_idle_trampoline
2. interrupt
SGI
SGI_CPU_PAUSE: for core pause request, for every core
PPI
ARM_ARCH_TIMER_IRQ: timer interrupt, handle by primary Core
SPI
CONFIG_QEMU_UART_IRQ: serial driver interrupt, handle by primary Core
3. Timer
The origin design for ARMv8-A timer is assigned private timer to
every PE(CPU core), the ARM_ARCH_TIMER_IRQ is a PPI so it's
should be enabled at every core.
But for NuttX, it's design only for primary core to handle timer
interrupt and call nxsched_process_timer at timer tick mode.
So we need only enable timer for primary core
IMX6 use GPT which is a SPI rather than generic timer to handle
timer interrupt
References
===========
1. (ID050815) ARM® Cortex®-A Series - Programmers Guide for ARMv8-A
2. (ID020222) Arm® Architecture Reference Manual - for A profile architecture
3. (ARM062-948681440-3280) Armv8-A Instruction Set Architecture
4. AArch64 Exception and Interrupt Handling
5. AArch64 Programmer's Guides Generic Timer
6. Arm Generic Interrupt Controller v3 and v4 Overview
7. Arm® Generic Interrupt Controller Architecture Specification GIC architecture version 3 and version 4
8. (DEN0022D.b) Arm Power State Coordination Interface Platform Design Document

View file

@ -0,0 +1,65 @@
#
# This file is autogenerated: PLEASE DO NOT EDIT IT.
#
# You can use "make menuconfig" to make any modifications to the installed .config file.
# You can then do "make savedefconfig" to generate a new defconfig file that includes your
# modifications.
#
CONFIG_ARCH="arm64"
CONFIG_ARCH_ARM64=y
CONFIG_ARCH_BOARD="qemu-a53"
CONFIG_ARCH_BOARD_QEMU_A53=y
CONFIG_ARCH_CHIP="qemu"
CONFIG_ARCH_CHIP_QEMU=y
CONFIG_ARCH_CHIP_QEMU_A53=y
CONFIG_ARCH_INTERRUPTSTACK=4096
CONFIG_BUILTIN=y
CONFIG_DEBUG_ASSERTIONS=y
CONFIG_DEBUG_ERROR=y
CONFIG_DEBUG_FEATURES=y
CONFIG_DEBUG_FULLOPT=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SCHED=y
CONFIG_DEBUG_SCHED_ERROR=y
CONFIG_DEBUG_SCHED_INFO=y
CONFIG_DEBUG_SCHED_WARN=y
CONFIG_DEBUG_SYMBOLS=y
CONFIG_DEBUG_WARN=y
CONFIG_DEFAULT_TASK_STACKSIZE=8192
CONFIG_DEV_ZERO=y
CONFIG_EXAMPLES_HELLO=y
CONFIG_EXPERIMENTAL=y
CONFIG_FS_PROCFS=y
CONFIG_FS_ROMFS=y
CONFIG_HAVE_CXX=y
CONFIG_HAVE_CXXINITIALIZE=y
CONFIG_IDLETHREAD_STACKSIZE=8192
CONFIG_INIT_ENTRYPOINT="nsh_main"
CONFIG_INTELHEX_BINARY=y
CONFIG_NSH_ARCHINIT=y
CONFIG_NSH_BUILTIN_APPS=y
CONFIG_NSH_FILEIOSIZE=512
CONFIG_NSH_READLINE=y
CONFIG_NSH_ROMFSETC=y
CONFIG_PREALLOC_TIMERS=4
CONFIG_PTHREAD_STACK_MIN=8192
CONFIG_QEMU_UART_PL011=y
CONFIG_RAMLOG=y
CONFIG_RAM_SIZE=134217728
CONFIG_RAM_START=0x40000000
CONFIG_RAW_BINARY=y
CONFIG_READLINE_CMD_HISTORY=y
CONFIG_RR_INTERVAL=200
CONFIG_SCHED_HPWORK=y
CONFIG_SCHED_HPWORKPRIORITY=192
CONFIG_SPINLOCK=y
CONFIG_STACK_COLORATION=y
CONFIG_START_MONTH=3
CONFIG_START_YEAR=2022
CONFIG_SYMTAB_ORDEREDBYNAME=y
CONFIG_SYSTEM_NSH=y
CONFIG_SYSTEM_SYSTEM=y
CONFIG_TESTING_GETPRIME=y
CONFIG_TESTING_OSTEST=y
CONFIG_UART1_SERIAL_CONSOLE=y
CONFIG_USEC_PER_TICK=1000

View file

@ -0,0 +1,66 @@
#
# This file is autogenerated: PLEASE DO NOT EDIT IT.
#
# You can use "make menuconfig" to make any modifications to the installed .config file.
# You can then do "make savedefconfig" to generate a new defconfig file that includes your
# modifications.
#
# CONFIG_ARCH_FPU is not set
CONFIG_ARCH="arm64"
CONFIG_ARCH_ARM64=y
CONFIG_ARCH_BOARD="qemu-a53"
CONFIG_ARCH_BOARD_QEMU_A53=y
CONFIG_ARCH_CHIP="qemu"
CONFIG_ARCH_CHIP_QEMU=y
CONFIG_ARCH_CHIP_QEMU_A53=y
CONFIG_ARCH_INTERRUPTSTACK=8192
CONFIG_BUILTIN=y
CONFIG_DEBUG_ASSERTIONS=y
CONFIG_DEBUG_ERROR=y
CONFIG_DEBUG_FEATURES=y
CONFIG_DEBUG_FULLOPT=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SCHED=y
CONFIG_DEBUG_SCHED_ERROR=y
CONFIG_DEBUG_SCHED_INFO=y
CONFIG_DEBUG_SCHED_WARN=y
CONFIG_DEBUG_SYMBOLS=y
CONFIG_DEBUG_WARN=y
CONFIG_DEFAULT_TASK_STACKSIZE=16384
CONFIG_DEV_ZERO=y
CONFIG_EXAMPLES_HELLO=y
CONFIG_EXPERIMENTAL=y
CONFIG_FS_PROCFS=y
CONFIG_FS_ROMFS=y
CONFIG_IDLETHREAD_STACKSIZE=16384
CONFIG_INIT_ENTRYPOINT="nsh_main"
CONFIG_INTELHEX_BINARY=y
CONFIG_NSH_ARCHINIT=y
CONFIG_NSH_BUILTIN_APPS=y
CONFIG_NSH_FILEIOSIZE=512
CONFIG_NSH_READLINE=y
CONFIG_NSH_ROMFSETC=y
CONFIG_PREALLOC_TIMERS=4
CONFIG_PTHREAD_STACK_MIN=16384
CONFIG_QEMU_UART_PL011=y
CONFIG_RAMLOG=y
CONFIG_RAM_SIZE=134217728
CONFIG_RAM_START=0x40000000
CONFIG_RAW_BINARY=y
CONFIG_READLINE_CMD_HISTORY=y
CONFIG_RR_INTERVAL=200
CONFIG_SCHED_HPWORK=y
CONFIG_SCHED_HPWORKPRIORITY=192
CONFIG_SMP=y
CONFIG_STACK_COLORATION=y
CONFIG_START_MONTH=3
CONFIG_START_YEAR=2022
CONFIG_SYMTAB_ORDEREDBYNAME=y
CONFIG_SYSTEM_NSH=y
CONFIG_SYSTEM_SYSTEM=y
CONFIG_SYSTEM_TASKSET=y
CONFIG_TESTING_GETPRIME=y
CONFIG_TESTING_OSTEST=y
CONFIG_TESTING_SMP=y
CONFIG_UART1_SERIAL_CONSOLE=y
CONFIG_USEC_PER_TICK=1000

View file

@ -0,0 +1,59 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/include/board_memorymap.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __BOARDS_ARM64_QEMU_QEMU_A53_INCLUDE_BOARD_MEMORYMAP_H
#define __BOARDS_ARM64_QEMU_QEMU_A53_INCLUDE_BOARD_MEMORYMAP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#undef EXTERN
#if defined(__cplusplus)
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __BOARDS_ARM64_QEMU_QEMU_A53_INCLUDE_BOARD_MEMORYMAP_H */

View file

@ -0,0 +1,48 @@
############################################################################
# boards/arm64/qemu/qemu-a53/scripts/Make.defs
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
include $(TOPDIR)/.config
include $(TOPDIR)/tools/Config.mk
include $(TOPDIR)/arch/arm64/src/Toolchain.defs
LDSCRIPT = dramboot.ld
ARCHSCRIPT += $(BOARD_DIR)$(DELIM)scripts$(DELIM)$(LDSCRIPT)
CFLAGS := $(ARCHCFLAGS) $(ARCHOPTIMIZATION) $(ARCHCPUFLAGS) $(ARCHINCLUDES) $(ARCHDEFINES) $(EXTRAFLAGS) -pipe
CPICFLAGS = $(ARCHPICFLAGS) $(CFLAGS)
CXXFLAGS := $(ARCHCXXFLAGS) $(ARCHOPTIMIZATION) $(ARCHCPUFLAGS) $(ARCHXXINCLUDES) $(ARCHDEFINES) $(EXTRAFLAGS) -pipe
CXXPICFLAGS = $(ARCHPICFLAGS) $(CXXFLAGS)
CPPFLAGS := $(ARCHINCLUDES) $(ARCHDEFINES) $(EXTRAFLAGS)
AFLAGS := $(CFLAGS) -D__ASSEMBLY__
# NXFLAT module definitions
NXFLATLDFLAGS1 = -r -d -warn-common
NXFLATLDFLAGS2 = $(NXFLATLDFLAGS1) -T$(TOPDIR)$(DELIM)binfmt$(DELIM)libnxflat$(DELIM)gnu-nxflat-pcrel.ld -no-check-sections
LDNXFLATFLAGS = -e main -s 2048
# ELF module definitions
CELFFLAGS = $(CFLAGS) -mlong-calls # --target1-abs
CXXELFFLAGS = $(CXXFLAGS) -mlong-calls # --target1-abs
LDELFFLAGS = -r -e main
LDELFFLAGS += -T $(call CONVERT_PATH,$(TOPDIR)/binfmt/libelf/gnu-elf.ld)

View file

@ -0,0 +1,126 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/scripts/dramboot.ld
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
OUTPUT_ARCH(aarch64)
ENTRY(__start)
PHDRS
{
text PT_LOAD ;
}
SECTIONS
{
. = 0x40280000; /* uboot load address */
_start = .;
.text : {
_stext = .; /* Text section */
*(.text)
*(.text.cold)
*(.text.unlikely)
*(.fixup)
*(.gnu.warning)
} :text = 0x9090
. = ALIGN(4096);
.init_section : {
_sinit = ABSOLUTE(.);
*(.init_array .init_array.*)
_einit = ABSOLUTE(.);
}
. = ALIGN(4096);
.vector : {
_vector_start = .;
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
KEEP(*(.vectors))
_vector_end = .;
} :text
. = ALIGN(4096);
_etext = .; /* End_1 of .text */
_sztext = _etext - _stext;
. = ALIGN(4096);
.rodata : {
_srodata = .; /* Read-only data */
*(.rodata)
*(.rodata.*)
*(.data.rel.ro)
*(.data.rel.ro.*)
} :text
. = ALIGN(4096);
_erodata = .; /* End of read-only data */
_szrodata = _erodata - _srodata;
_eronly = .; /* End of read-only data */
. = ALIGN(4096);
.data : { /* Data */
_sdata = .;
*(.data.page_aligned)
*(.data)
. = ALIGN(8);
*(.data.rel)
*(.data.rel.*)
CONSTRUCTORS
} :text
_edata = .; /* End+1 of .data */
.bss : { /* BSS */
_sbss = .;
*(.bss)
. = ALIGN(1 << 3);
} :text
. = ALIGN(4096);
_ebss = .;
_szbss = _ebss - _sbss;
.initstack : { /* INIT STACK */
_s_initstack = .;
*(.initstack)
. = ALIGN(16);
} :text
. = ALIGN(4096);
_e_initstack = . ;
g_idle_topstack = . ;
_szdata = _e_initstack - _sdata;
/* Sections to be discarded */
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
*(.eh_frame)
}
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
}

View file

@ -0,0 +1,29 @@
############################################################################
# boards/arm64/qemu/qemu-a53/src/Makefile
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership. The
# ASF licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
############################################################################
include $(TOPDIR)/Make.defs
CSRCS = qemu_boardinit.c qemu_bringup.c
ifeq ($(CONFIG_BOARDCTL),y)
CSRCS += qemu_appinit.c
endif
include $(TOPDIR)/boards/Board.mk

View file

@ -0,0 +1,59 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/src/qemu-a53.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __BOARDS_ARM64_QEMU_QEMU_A53_SRC_QEMU_A53_H
#define __BOARDS_ARM64_QEMU_QEMU_A53_SRC_QEMU_A53_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
/****************************************************************************
* Public Functions Definitions
****************************************************************************/
/****************************************************************************
* Name: qemu_bringup
*
* Description:
* Bring up board features
*
****************************************************************************/
#if defined(CONFIG_BOARDCTL) || defined(CONFIG_BOARD_LATE_INITIALIZE)
int qemu_bringup(void);
#endif
#endif /* __ASSEMBLY__ */
#endif /* __BOARDS_ARM64_QEMU_QEMU_A53_SRC_QEMU_A53_H */

View file

@ -0,0 +1,76 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/src/qemu_appinit.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <nuttx/board.h>
#include "qemu-a53.h"
#ifdef CONFIG_BOARDCTL
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: board_app_initialize
*
* Description:
* Perform application specific initialization. This function is never
* called directly from application code, but only indirectly via the
* (non-standard) boardctl() interface using the command BOARDIOC_INIT.
*
* Input Parameters:
* arg - The boardctl() argument is passed to the board_app_initialize()
* implementation without modification. The argument has no
* meaning to NuttX; the meaning of the argument is a contract
* between the board-specific initialization logic and the
* matching application logic. The value could be such things as a
* mode enumeration value, a set of DIP switch switch settings, a
* pointer to configuration data read from a file or serial FLASH,
* or whatever you would like to do with it. Every implementation
* should accept zero/NULL as a default configuration.
*
* Returned Value:
* Zero (OK) is returned on success; a negated errno value is returned on
* any failure to indicate the nature of the failure.
*
****************************************************************************/
int board_app_initialize(uintptr_t arg)
{
UNUSED(arg);
#ifndef CONFIG_BOARD_LATE_INITIALIZE
/* Perform board initialization */
return qemu_bringup();
#else
return OK;
#endif
}
#endif /* CONFIG_BOARDCTL */

View file

@ -0,0 +1,113 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/src/qemu_boardinit.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <nuttx/board.h>
#include "qemu-a53.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: qemu_memory_initialize
*
* Description:
* All qemu architectures must provide the following entry point. This
* entry point is called early in the initialization before memory has
* been configured. This board-specific function is responsible for
* configuring any on-board memories.
*
* Logic in qemu_memory_initialize must be careful to avoid using any
* global variables because those will be uninitialized at the time this
* function is called.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void qemu_memory_initialize(void)
{
/* SDRAM was initialized by a bootloader in the supported configurations. */
}
/****************************************************************************
* Name: qemu_board_initialize
*
* Description:
* All qemu architectures must provide the following entry point. This
* entry point is called in the initialization phase -- after
* qemu_memory_initialize and after all memory has been configured and
* mapped but before any devices have been initialized.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void qemu_board_initialize(void)
{
#ifdef CONFIG_ARCH_LEDS
/* Configure on-board LEDs if LED support has been selected. */
#endif
}
/****************************************************************************
* Name: board_late_initialize
*
* Description:
* If CONFIG_BOARD_LATE_INITIALIZE is selected, then an additional
* initialization call will be performed in the boot-up sequence to a
* function called board_late_initialize(). board_late_initialize() will be
* called immediately after up_intitialize() is called and just before the
* initial application is started. This additional initialization phase
* may be used, for example, to initialize board-specific device drivers.
*
****************************************************************************/
#ifdef CONFIG_BOARD_LATE_INITIALIZE
void board_late_initialize(void)
{
/* Perform board initialization */
qemu_bringup();
}
#endif /* CONFIG_BOARD_LATE_INITIALIZE */

View file

@ -0,0 +1,62 @@
/****************************************************************************
* boards/arm64/qemu/qemu-a53/src/qemu_bringup.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <syslog.h>
#include <nuttx/fs/fs.h>
#include "qemu-a53.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: imx_bringup
*
* Description:
* Bring up board features
*
****************************************************************************/
int qemu_bringup(void)
{
int ret;
#ifdef CONFIG_FS_PROCFS
/* Mount the procfs file system */
ret = nx_mount(NULL, "/proc", "procfs", 0, NULL);
if (ret < 0)
{
syslog(LOG_ERR, "ERROR: Failed to mount procfs at /proc: %d\n", ret);
}
#endif
UNUSED(ret);
return OK;
}

View file

@ -36,3 +36,9 @@
# x86_64
/x86_64/intel64/qemu-intel64/configs/nsh
# The gcc 11.2 toolcain for MACOS maybe fail when compile
# with float, disable the cibuild check for MACOS
# it will be enbaled while new toolchain release
# /arm64/qemu/qemu-a53/configs/nsh_smp

View file

@ -20,3 +20,5 @@
# Sparc-gaisler-elf toolchain doesn't provide macOS binaries
/sparc
-xx3823:nsh
/arm64