1
0
Fork 0
forked from nuttx/nuttx-update

arch/armv7-r: add armv7-r smp support

Signed-off-by: zhangyuan21 <zhangyuan21@xiaomi.com>
This commit is contained in:
zhangyuan21 2023-03-02 22:54:29 +08:00 committed by Xiang Xiao
parent bcc8801480
commit eb0e05be0d
15 changed files with 2044 additions and 21 deletions

View file

@ -56,3 +56,9 @@ ifeq ($(CONFIG_ARCH_FPU),y)
CMN_CSRCS += arm_fpucmp.c
CMN_ASRCS += arm_fpuconfig.S
endif
ifeq ($(CONFIG_SMP),y)
CMN_ASRCS += arm_cpuhead.S
CMN_CSRCS += arm_cpuindex.c arm_cpustart.c arm_cpupause.c
CMN_CSRCS += arm_cpuidlestack.c arm_scu.c
endif

View file

@ -0,0 +1,494 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_cpuhead.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include "arm.h"
#include "cp15.h"
#include "sctlr.h"
#include "smp.h"
#include "chip.h"
#include "arm_internal.h"
#ifdef CONFIG_SMP
.file "arm_cpuhead.S"
/****************************************************************************
* Configuration
****************************************************************************/
/* There are three operational memory configurations:
*
* 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case
* the boot logic must:
*
* - Configure SDRAM (if present),
* - Initialize the .data section in RAM, and
* - Clear .bss section
*
* 2. We boot in FLASH but copy ourselves to SDRAM from better performance.
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=y). In this case
* the boot logic must:
*
* - Configure SDRAM (if present),
* - Copy ourself to DRAM, and
* - Clear .bss section (data should be fully initialized)
*
* In this case, we assume that the logic within this file executes from FLASH.
*
* 3. There is bootloader that copies us to SDRAM (but probably not to the beginning)
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
* was initialized by the boot loader, and this boot logic must:
*
* - Clear .bss section (data should be fully initialized)
*/
/* Beginning (BOTTOM/BASE) and End+1 (TOP) of the IDLE stack.
*
* The IDLE stack is the stack that is used during initialization and,
* eventually, becomes the stack of the IDLE task when initialization
* is complete.
*
* REVISIT: There are issues here in some configurations. The stack
* pointer is initialized very early in the boot sequence. But in some
* architectures the memory supporting the stack may not yet be
* initialized (SDRAM, for example, would not be ready yet). In that
* case, ideally the IDLE stack should be in some other memory that does
* not require initialization (such as internal SRAM)
*/
/****************************************************************************
* .text
****************************************************************************/
.text
.syntax unified
.arm
/****************************************************************************
* Name: __cpu[n]_start
*
* Description:
* Boot functions for each CPU (other than CPU0). These functions set up
* the ARM operating mode, the initial stack, and configure co-processor
* registers. At the end of the boot, arm_cpu_boot() is called.
*
* These functions are provided by the common ARMv7-R logic.
*
* Input Parameters:
* None
*
* Returned Value:
* Do not return.
*
****************************************************************************/
#if CONFIG_SMP_NCPUS > 1
.global __cpu1_start
.type __cpu1_start, #function
__cpu1_start:
/* Make sure that we are in SYS mode with IRQs and FIQs disabled */
cpsid if, #PSR_MODE_SYS
/* Set up the stack pointer and the CPU index */
ldr sp, .Lcpu1_stackpointer
sub sp, sp, #XCPTCONTEXT_SIZE
mov r5, #1
/* Then branch to the common startup logic (PC-relative) */
b .Lcpu_start
.Lcpu1_stackpointer:
.long .Lcpu1_stacktop
.size __cpu1_start, .-__cpu1_start
#if CONFIG_SMP_NCPUS > 2
.global __cpu2_start
.type __cpu2_start, #function
__cpu2_start:
/* Make sure that we are in SYS mode with IRQs and FIQs disabled */
cpsid if, #PSR_MODE_SYS
/* Set up the stack pointer and the CPU index */
ldr sp, .Lcpu2_stackpointer
sub sp, sp, #XCPTCONTEXT_SIZE
mov r5, #2
/* Then branch to the common startup logic (PC-relative) */
b .Lcpu_start
.Lcpu2_stackpointer:
.long .Lcpu2_stacktop
.size __cpu2_start, .-__cpu2_start
#if CONFIG_SMP_NCPUS > 3
.global __cpu3_start
.type __cpu3_start, #function
__cpu3_start:
/* Make sure that we are in SYS mode with IRQs and FIQs disabled */
cpsid if, #PSR_MODE_SYS
/* Set up the stack pointer and the CPU index */
ldr sp, .Lcpu3_stackpointer
sub sp, sp, #XCPTCONTEXT_SIZE
mov r5, #3
/* Then branch to the common startup logic (PC-relative) */
b .Lcpu_start
.Lcpu3_stackpointer:
.long .Lcpu3_stacktop
.size __cpu3_start, .-__cpu3_start
#if CONFIG_SMP_NCPUS > 4
# error This logic needs to extended for CONFIG_SMP_NCPUS > 4
#endif /* CONFIG_SMP_NCPUS > 4 */
#endif /* CONFIG_SMP_NCPUS > 3 */
#endif /* CONFIG_SMP_NCPUS > 2 */
#endif /* CONFIG_SMP_NCPUS > 1 */
/****************************************************************************
* Name: .Lcpu_start
*
* Description:
* Common CPUn startup logic (n > 0)
*
* On input:
* SP = Set to top of CPU IDLE stack
* R5 = CPU number
*
****************************************************************************/
.type .Lcpu_start, #function
.Lcpu_start:
/* The MPU and caches should be disabled */
mrc CP15_SCTLR(r0)
bic r0, r0, #(SCTLR_M | SCTLR_C)
bic r0, r0, #(SCTLR_I)
mcr CP15_SCTLR(r0)
/* Invalidate caches and TLBs.
*
* NOTE: "The ARMv7 Virtual Memory System Architecture (VMSA) does not
* support a CP15 operation to invalidate the entire data cache. ...
* In normal usage the only time the entire data cache has to be
* invalidated is on reset."
*
* The instruction cache is virtually indexed and physically tagged but
* the data cache is physically indexed and physically tagged. So it
* should not be an issue if the system comes up with a dirty Dcache;
* the ICache, however, must be invalidated.
*/
mov r0, #0
mcr CP15_BPIALL(r0) /* Invalidate entire branch prediction array */
mcr CP15_ICIALLU(r0) /* Invalidate I-cache */
mcr CP15_DCIALLU(r0) /* Invalidate D-cache */
isb
/* Set lr = Resume at .Lcpu_vstart with the MMU enabled */
ldr lr, .LCcpu_vstart /* Abs. address */
/* Configure the system control register (see sctrl.h) */
mrc CP15_SCTLR(r0) /* Get control register */
/* Clear bits to reset values. This is only necessary in situations like, for
* example, we get here via a bootloader and the control register is in some
* unknown state.
*
* SCTLR_M Bit 0: MPU enable bit
* SCTLR_A Bit 1: Strict alignment disabled
* SCTLR_C Bit 2: DCache disabled
* SCTLR_CCP15BEN Bit 5: CP15 barrier enable
* SCTLR_B Bit 7: Should be zero on ARMv7R
*
* SCTLR_SW Bit 10: SWP/SWPB not enabled
* SCTLR_I Bit 12: ICache disabled
* SCTLR_V Bit 13: Assume low vectors
* SCTLR_RR Bit 14: Round-robin replacement strategy.
*
* SCTLR_BR Bit 17: Background Region bit
* SCTLR_DZ Bit 19: Divide by Zero fault enable bit
* SCTLR_FI Bit 21: Fast interrupts configuration enable bit
* SCTLR_U Bit 22: Unaligned access model (always one)
*
* SCTLR_VE Bit 24: Interrupt Vectors Enable bit
* SCTLR_EE Bit 25: 0=Little endian.
* SCTLR_NMFI Bit 27: Non-maskable FIQ (NMFI) support
* SCTLR_TE Bit 30: All exceptions handled in ARM state.
*/
bic r0, r0, #(SCTLR_A | SCTLR_C | SCTLR_CCP15BEN | SCTLR_B)
bic r0, r0, #(SCTLR_SW | SCTLR_I | SCTLR_V | SCTLR_RR)
bic r0, r0, #(SCTLR_BR | SCTLR_DZ | SCTLR_FI)
bic r0, r0, #(SCTLR_VE | SCTLR_EE | SCTLR_NMFI | SCTLR_TE)
/* Set bits to enable the MPU
*
* SCTLR_M Bit 0: Enable the MPU
*/
orr r0, r0, #(SCTLR_M)
/* Set configured bits */
#ifdef CONFIG_ARMV7R_ALIGNMENT_TRAP
/* Alignment abort enable
*
* SCTLR_A Bit 1: Strict alignment enabled
*/
orr r0, r0, #(SCTLR_A)
#endif
#ifdef CONFIG_ARMV7R_SCTLR_CCP15BEN
/* Enable memory barriers
*
* SCTLR_CCP15BEN Bit 5: CP15 barrier enable
*/
orr r0, r0, #(SCTLR_CCP15BEN)
#endif
#ifndef CONFIG_ARCH_LOWVECTORS
/* Position vectors to 0xffff0000 if so configured.
*
* SCTLR_V Bit 13: High vectors
*/
orr r0, r0, #(SCTLR_V)
#endif
#ifdef CONFIG_ARMV7R_CACHE_ROUND_ROBIN
/* Round Robin cache replacement
*
* SCTLR_RR Bit 14: Round-robin replacement strategy.
*/
orr r0, r0, #(SCTLR_RR)
#endif
#ifdef CONFIG_ARMV7R_BACKGROUND_REGION
/* Allow PL1 access to back region when MPU is enabled
*
* SCTLR_BR Bit 17: Background Region bit
*/
orr r0, r0, #(SCTLR_BR)
#endif
#ifdef CONFIG_ARMV7R_DIV0_FAULT
/* Enable divide by zero faults
*
* SCTLR_DZ Bit 19: Divide by Zero fault enable bit
*/
orr r0, r0, #(SCTLR_DZ)
#endif
#ifdef CONFIG_ARMV7R_FAST_INTERRUPT
/* Fast interrupts configuration enable bit
*
* SCTLR_FI Bit 21: Fast interrupts configuration enable bit
*/
orr r0, r0, #(SCTLR_FI)
#endif
#ifdef CONFIG_ARMV7R_IMPL_VECTORS
/* Implementation defined interrupt vectors
*
* SCTLR_VE Bit 24: Interrupt Vectors Enable bit
*/
orr r0, r0, #(SCTLR_VE)
#endif
#ifdef CONFIG_ENDIAN_BIG
/* Big endian mode
*
* SCTLR_EE Bit 25: 1=Big endian.
*/
orr r0, r0, #(SCTLR_EE)
#endif
#ifdef CONFIG_ARMV7R_NONMASKABLE_FIQ
/* Non-maskable FIQ support
*
* SCTLR_NMFI Bit 27: Non-maskable FIQ (NMFI) support
*/
orr r0, r0, #(SCTLR_NMFI)
#endif
/* Then write the configured control register */
mcr CP15_SCTLR(r0) /* Write control reg */
isb
.rept 12 /* Some CPUs want want lots of NOPs here */
nop
.endr
/* And "jump" to .Lcpu_vstart in the newly mapped virtual address space */
mov pc, lr
/****************************************************************************
* PC_Relative Data
****************************************************************************/
/* The start address of the second phase boot logic */
.type .LCcpu_vstart, %object
.LCcpu_vstart:
.long .Lcpu_vstart
.size .LCcpu_vstart, . -.LCcpu_vstart
.size .Lcpu_start, .-.Lcpu_start
/****************************************************************************
* Name: .Lcpu_vstart
*
* Description:
* Continue initialization after the MPU has been enabled.
*
* The following is executed after the MPU has been enabled. This uses
* absolute addresses; this is not position independent.
*
* On input:
* SP = Set to top of CPU IDLE stack
* R5 = CPU number
*
****************************************************************************/
.align 8
.globl arm_cpu_boot
.type .Lcpu_vstart, %function
.Lcpu_vstart:
#ifdef CONFIG_STACK_COLORATION
/* Write a known value to the IDLE thread stack to support stack
* monitoring logic
*/
adr r3, .Lstkinit
mov r0, sp /* R0 = end of IDLE stack */
ldmia r3, {r1, r2} /* R1 = Size of stack; R2 = coloration */
1: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0, #-4]! /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
#endif
/* Branch to continue C level CPU initialization */
mov fp, #0 /* Clear framepointer */
mov lr, #0 /* LR = return address (none) */
mov r0, r5 /* Input parameter = CPU index */
b arm_cpu_boot /* Branch to C level CPU initialization */
.size .Lcpu_vstart, .-.Lcpu_vstart
/***************************************************************************
* Text-section constants
***************************************************************************/
/* Text-section constants: */
#ifdef CONFIG_STACK_COLORATION
.type .Lstkinit, %object
.Lstkinit:
.long SMP_STACK_WORDS - (XCPTCONTEXT_SIZE / 4)
.long STACK_COLOR /* Stack coloration word */
.size .Lstkinit, . -.Lstkinit
#endif
/***************************************************************************
* .noinit section data
***************************************************************************/
.section .noinit, "aw"
#if CONFIG_SMP_NCPUS > 1
.align 8
.globl g_cpu1_idlestack
.type g_cpu1_idlestack, object
g_cpu1_idlestack:
.space SMP_STACK_SIZE
.Lcpu1_stacktop:
.size g_cpu1_idlestack, .Lcpu1_stacktop-g_cpu1_idlestack
#if CONFIG_SMP_NCPUS > 2
.align 8
.globl g_cpu2_idlestack
.type g_cpu2_idlestack, object
g_cpu2_idlestack:
.space SMP_STACK_SIZE
.Lcpu2_stacktop:
.size g_cpu2_idlestack, .Lcpu2_stacktop-g_cpu2_idlestack
#if CONFIG_SMP_NCPUS > 3
.align 8
.globl g_cpu3_idlestack
.type g_cpu3_idlestack, object
g_cpu3_idlestack:
.space SMP_STACK_SIZE
.Lcpu3_stacktop:
.size g_cpu3_idlestack, .Lcpu3_stacktop-g_cpu3_idlestack
#if CONFIG_SMP_NCPUS > 4
# error This logic needs to extended for CONFIG_SMP_NCPUS > 4
#endif /* CONFIG_SMP_NCPUS > 4 */
#endif /* CONFIG_SMP_NCPUS > 3 */
#endif /* CONFIG_SMP_NCPUS > 2 */
#endif /* CONFIG_SMP_NCPUS > 1 */
#endif /* CONFIG_SMP */
.end

View file

@ -0,0 +1,132 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_cpuidlestack.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include "smp.h"
#include "arm_internal.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Stack alignment macros */
#define STACK_ISALIGNED(a) ((uintptr_t)(a) & ~SMP_STACK_MASK)
/****************************************************************************
* Private Data
****************************************************************************/
#if CONFIG_SMP_NCPUS > 1
static const uint32_t *g_cpu_stackalloc[CONFIG_SMP_NCPUS] =
{
0
, g_cpu1_idlestack
#if CONFIG_SMP_NCPUS > 2
, g_cpu2_idlestack
#if CONFIG_SMP_NCPUS > 3
, g_cpu3_idlestack
#endif /* CONFIG_SMP_NCPUS > 3 */
#endif /* CONFIG_SMP_NCPUS > 2 */
};
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_idlestack
*
* Description:
* Allocate a stack for the CPU[n] IDLE task (n > 0) if appropriate and
* setup up stack-related information in the IDLE task's TCB. This
* function is always called before up_cpu_start(). This function is
* only called for the CPU's initial IDLE task; up_create_task is used for
* all normal tasks, pthreads, and kernel threads for all CPUs.
*
* The initial IDLE task is a special case because the CPUs can be started
* in different wans in different environments:
*
* 1. The CPU may already have been started and waiting in a low power
* state for up_cpu_start(). In this case, the IDLE thread's stack
* has already been allocated and is already in use. Here
* up_cpu_idlestack() only has to provide information about the
* already allocated stack.
*
* 2. The CPU may be disabled but started when up_cpu_start() is called.
* In this case, a new stack will need to be created for the IDLE
* thread and this function is then equivalent to:
*
* return up_create_stack(tcb, stack_size, TCB_FLAG_TTYPE_KERNEL);
*
* The following TCB fields must be initialized by this function:
*
* - adj_stack_size: Stack size after adjustment for hardware, processor,
* etc. This value is retained only for debug purposes.
* - stack_alloc_ptr: Pointer to allocated stack
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
* Arguments has been removed from the stack allocation.
*
* Input Parameters:
* - cpu: CPU index that indicates which CPU the IDLE task is
* being created for.
* - tcb: The TCB of new CPU IDLE task
* - stack_size: The requested stack size for the IDLE task. At least
* this much must be allocated. This should be
* CONFIG_SMP_STACK_SIZE.
*
****************************************************************************/
int up_cpu_idlestack(int cpu, struct tcb_s *tcb, size_t stack_size)
{
#if CONFIG_SMP_NCPUS > 1
uintptr_t stack_alloc;
DEBUGASSERT(cpu > 0 && cpu < CONFIG_SMP_NCPUS && tcb != NULL &&
stack_size <= SMP_STACK_SIZE);
/* Get the top of the stack */
stack_alloc = (uintptr_t)g_cpu_stackalloc[cpu];
DEBUGASSERT(stack_alloc != 0 && STACK_ISALIGNED(stack_alloc));
tcb->adj_stack_size = SMP_STACK_SIZE;
tcb->stack_alloc_ptr = (void *)stack_alloc;
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
#endif
return OK;
}
#endif /* CONFIG_SMP */

View file

@ -0,0 +1,71 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_cpuindex.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <nuttx/arch.h>
#include "cp15.h"
#include "sctlr.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_index
*
* Description:
* Return an index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
* If TLS is enabled, then the RTOS can get this information from the TLS
* info structure. Otherwise, the MCU-specific logic must provide some
* mechanism to provide the CPU index.
*
* Input Parameters:
* None
*
* Returned Value:
* An integer index in the range of 0 through (CONFIG_SMP_NCPUS-1) that
* corresponds to the currently executing CPU.
*
****************************************************************************/
int up_cpu_index(void)
{
/* Read the Multiprocessor Affinity Register (MPIDR) */
uint32_t mpidr = cp15_rdmpidr();
/* And return the CPU ID field */
return (mpidr & MPIDR_CPUID_MASK) >> MPIDR_CPUID_SHIFT;
}
#endif /* CONFIG_SMP */

View file

@ -0,0 +1,326 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_cpupause.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/spinlock.h>
#include <nuttx/sched_note.h>
#include "arm_internal.h"
#include "gic.h"
#include "sched/sched.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Private Data
****************************************************************************/
/* These spinlocks are used in the SMP configuration in order to implement
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.
*
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
* so that it will be ready for the next pause operation.
*/
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_cpu_pausereq
*
* Description:
* Return true if a pause request is pending for this CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be queried
*
* Returned Value:
* true = a pause request is pending.
* false = no pasue request is pending.
*
****************************************************************************/
bool up_cpu_pausereq(int cpu)
{
return spin_islocked(&g_cpu_paused[cpu]);
}
/****************************************************************************
* Name: up_cpu_paused
*
* Description:
* Handle a pause request from another CPU. Normally, this logic is
* executed from interrupt handling logic within the architecture-specific
* However, it is sometimes necessary necessary to perform the pending
* pause operation in other contexts where the interrupt cannot be taken
* in order to avoid deadlocks.
*
* This function performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* cpu - The index of the CPU to be paused
*
* Returned Value:
* On success, OK is returned. Otherwise, a negated errno value indicating
* the nature of the failure is returned.
*
****************************************************************************/
int up_cpu_paused(int cpu)
{
struct tcb_s *tcb = this_task();
/* Update scheduler parameters */
nxsched_suspend_scheduler(tcb);
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we are paused */
sched_note_cpu_paused(tcb);
#endif
/* Save the current context at CURRENT_REGS into the TCB at the head
* of the assigned task list for this CPU.
*/
arm_savestate(tcb->xcp.regs);
/* Release the g_cpu_paused spinlock to synchronize with the
* requesting CPU.
*/
spin_unlock(&g_cpu_paused[cpu]);
/* Wait for the spinlock to be released. The requesting CPU will release
* the spinlock when the CPU is resumed.
*/
spin_lock(&g_cpu_wait[cpu]);
/* This CPU has been resumed. Restore the exception context of the TCB at
* the (new) head of the assigned task list.
*/
tcb = this_task();
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that we have resumed */
sched_note_cpu_resumed(tcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(tcb);
/* Then switch contexts. Any necessary address environment changes
* will be made when the interrupt returns.
*/
arm_restorestate(tcb->xcp.regs);
spin_unlock(&g_cpu_wait[cpu]);
return OK;
}
/****************************************************************************
* Name: arm_pause_handler
*
* Description:
* This is the handler for SGI2. It performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* Standard interrupt handling
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int arm_pause_handler(int irq, void *context, void *arg)
{
int cpu = this_cpu();
/* Check for false alarms. Such false could occur as a consequence of
* some deadlock breaking logic that might have already serviced the SG2
* interrupt by calling up_cpu_paused(). If the pause event has already
* been processed then g_cpu_paused[cpu] will not be locked.
*/
if (up_cpu_pausereq(cpu))
{
/* NOTE: The following enter_critical_section() will call
* up_cpu_paused() to process a pause request to break a deadlock
* because the caller held a critical section. Once up_cpu_paused()
* finished, the caller will proceed and release the g_cpu_irqlock.
* Then this CPU will acquire g_cpu_irqlock in the function.
*/
irqstate_t flags = enter_critical_section();
/* NOTE: the pause request should not exist here */
DEBUGVERIFY(!up_cpu_pausereq(cpu));
leave_critical_section(flags);
}
return OK;
}
/****************************************************************************
* Name: up_cpu_pause
*
* Description:
* Save the state of the current task at the head of the
* g_assignedtasks[cpu] task list and then pause task execution on the
* CPU.
*
* This function is called by the OS when the logic executing on one CPU
* needs to modify the state of the g_assignedtasks[cpu] list for another
* CPU.
*
* Input Parameters:
* cpu - The index of the CPU to be stopped
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_pause(int cpu)
{
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the pause event */
sched_note_cpu_pause(this_task(), cpu);
#endif
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
* handler from returning until up_cpu_resume() is called; g_cpu_paused
* is a handshake that will prefent this function from returning until
* the CPU is actually paused.
* Note that we might spin before getting g_cpu_wait, this just means that
* the other CPU still hasn't finished responding to the previous resume
* request.
*/
DEBUGASSERT(!spin_islocked(&g_cpu_paused[cpu]));
spin_lock(&g_cpu_wait[cpu]);
spin_lock(&g_cpu_paused[cpu]);
/* Execute SGI2 */
arm_cpu_sgi(GIC_IRQ_SGI2, (1 << cpu));
/* Wait for the other CPU to unlock g_cpu_paused meaning that
* it is fully paused and ready for up_cpu_resume();
*/
spin_lock(&g_cpu_paused[cpu]);
spin_unlock(&g_cpu_paused[cpu]);
/* On successful return g_cpu_wait will be locked, the other CPU will be
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
* called. g_cpu_paused will be unlocked in any case.
*/
return OK;
}
/****************************************************************************
* Name: up_cpu_resume
*
* Description:
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
* state of the task at the head of the g_assignedtasks[cpu] list, and
* resume normal tasking.
*
* This function is called after up_cpu_pause in order resume operation of
* the CPU after modifying its g_assignedtasks[cpu] list.
*
* Input Parameters:
* cpu - The index of the CPU being re-started.
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_resume(int cpu)
{
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the resume event */
sched_note_cpu_resume(this_task(), cpu);
#endif
/* Release the spinlock. Releasing the spinlock will cause the SGI2
* handler on 'cpu' to continue and return from interrupt to the newly
* established thread.
*/
DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
!spin_islocked(&g_cpu_paused[cpu]));
spin_unlock(&g_cpu_wait[cpu]);
return OK;
}
#endif /* CONFIG_SMP */

View file

@ -0,0 +1,136 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_cpustart.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/sched_note.h>
#include "arm_internal.h"
#include "cp15_cacheops.h"
#include "gic.h"
#include "sched/sched.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm_start_handler
*
* Description:
* This is the handler for SGI1. This handler simply returns from the
* interrupt, restoring the state of the new task at the head of the ready
* to run list.
*
* Input Parameters:
* Standard interrupt handling
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int arm_start_handler(int irq, void *context, void *arg)
{
struct tcb_s *tcb = this_task();
sinfo("CPU%d Started\n", this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify that this CPU has started */
sched_note_cpu_started(tcb);
#endif
/* Reset scheduler parameters */
nxsched_resume_scheduler(tcb);
/* Then switch contexts. This instantiates the exception context of the
* tcb at the head of the assigned task list. In this case, this should
* be the CPUs NULL task.
*/
arm_restorestate(tcb->xcp.regs);
return OK;
}
/****************************************************************************
* Name: up_cpu_start
*
* Description:
* In an SMP configuration, only one CPU is initially active (CPU 0).
* System initialization occurs on that single thread. At the completion of
* the initialization of the OS, just before beginning normal multitasking,
* the additional CPUs would be started by calling this function.
*
* Each CPU is provided the entry point to its IDLE task when started. A
* TCB for each CPU's IDLE task has been initialized and placed in the
* CPU's g_assignedtasks[cpu] list. No stack has been allocated or
* initialized.
*
* The OS initialization logic calls this function repeatedly until each
* CPU has been started, 1 through (CONFIG_SMP_NCPUS-1).
*
* Input Parameters:
* cpu - The index of the CPU being started. This will be a numeric
* value in the range of one to (CONFIG_SMP_NCPUS-1).
* (CPU 0 is already active)
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int up_cpu_start(int cpu)
{
sinfo("Starting CPU%d\n", cpu);
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION
/* Notify of the start event */
sched_note_cpu_start(this_task(), cpu);
#endif
/* Execute SGI1 */
arm_cpu_sgi(GIC_IRQ_SGI1, (1 << cpu));
return OK;
}
#endif /* CONFIG_SMP */

View file

@ -76,7 +76,11 @@
*/
#ifndef IDLE_STACK_BASE
# define IDLE_STACK_BASE _ebss
# ifdef CONFIG_SMP
# define IDLE_STACK_BASE _enoinit
# else
# define IDLE_STACK_BASE _ebss
# endif
#endif
#define IDLE_STACK_TOP IDLE_STACK_BASE+CONFIG_IDLETHREAD_STACKSIZE
@ -124,6 +128,32 @@
.type __start, #function
__start:
#if defined(CONFIG_SMP) && CONFIG_SMP_NCPUS > 1
/* Get cpuindex, cpu0 continue boot, others wait event from cpu0 */
mrc CP15_MPIDR(r0)
and r0, r0, #0x3
cmp r0, #0
beq __cpu0_start
wfe
cmp r0, #1
beq __cpu1_start
# if CONFIG_SMP_NCPUS > 2
cmp r0, #2
beq __cpu2_start
# endif
# if CONFIG_SMP_NCPUS > 3
cmp r0, #3
beq __cpu3_start
# endif
# if CONFIG_SMP_NCPUS > 4
cmp r0, #4
beq __cpu4_start
# endif
__cpu0_start:
#endif
/* Make sure that we are in SYS mode with IRQs and FIQs disabled */
cpsid if, #PSR_MODE_SYS
@ -184,10 +214,19 @@ __start:
/* Clear all configurable bits */
bic r0, r0, #(SCTLR_M | SCTLR_A | SCTLR_C | SCTLR_CCP15BEN | SCTLR_B)
bic r0, r0, #(SCTLR_SW | SCTLR_I | SCTLR_V | SCTLR_RR)
bic r0, r0, #(SCTLR_A | SCTLR_C | SCTLR_CCP15BEN | SCTLR_B)
bic r0, r0, #(SCTLR_SW | SCTLR_I | SCTLR_V | SCTLR_RR)
bic r0, r0, #(SCTLR_BR | SCTLR_DZ | SCTLR_FI)
bic r0, r0, #(SCTLR_VE | SCTLR_EE | SCTLR_NMFI | SCTLR_TE)
bic r0, r0, #(SCTLR_VE | SCTLR_EE | SCTLR_NMFI | SCTLR_TE)
#ifndef CONFIG_SMP
/* Set bits to enable the MPU
*
* SCTLR_M Bit 0: Enable the MPU
*/
orr r0, r0, #(SCTLR_M)
#endif
/* Set configured bits */
@ -200,7 +239,7 @@ __start:
orr r0, r0, #(SCTLR_A)
#endif
#ifndef CONFIG_ARMV7R_DCACHE_DISABLE
#if !defined(CONFIG_ARMV7R_DCACHE_DISABLE) && !defined(CONFIG_SMP)
/* Dcache enable
*
* SCTLR_C Bit 2: DCache enable
@ -218,7 +257,7 @@ __start:
orr r0, r0, #(SCTLR_CCP15BEN)
#endif
#ifndef CONFIG_ARMV7R_ICACHE_DISABLE
#if !defined(CONFIG_ARMV7R_ICACHE_DISABLE) && !defined(CONFIG_SMP)
/* Icache enable
*
* SCTLR_I Bit 12: ICache enable

View file

@ -75,6 +75,7 @@
*
****************************************************************************/
#ifndef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver);
@ -208,3 +209,222 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
}
}
}
#else
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
int cpu;
int me;
sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver);
/* Refuse to handle nested signal actions */
if (!tcb->xcp.sigdeliver)
{
/* First, handle some special cases when the signal is being delivered
* to task that is currently executing on any CPU.
*/
sinfo("rtcb=%p CURRENT_REGS=%p\n", this_task(), CURRENT_REGS);
if (tcb->task_state == TSTATE_TASK_RUNNING)
{
me = this_cpu();
cpu = tcb->cpu;
/* CASE 1: We are not in an interrupt handler and a task is
* signaling itself for some reason.
*/
if (cpu == me && !CURRENT_REGS)
{
/* In this case just deliver the signal now.
* REVISIT: Signal handler will run in a critical section!
*/
sigdeliver(tcb);
}
/* CASE 2: The task that needs to receive the signal is running.
* This could happen if the task is running on another CPU OR if
* we are in an interrupt handler and the task is running on this
* CPU. In the former case, we will have to PAUSE the other CPU
* first. But in either case, we will have to modify the return
* state as well as the state in the TCB.
*/
else
{
/* If we signaling a task running on the other CPU, we have
* to PAUSE the other CPU.
*/
if (cpu != me)
{
/* Pause the CPU */
up_cpu_pause(cpu);
/* Wait while the pause request is pending */
while (up_cpu_pausereq(cpu))
{
}
/* Now tcb on the other CPU can be accessed safely */
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be
* restored by the signal trampoline after the signal has
* been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
/* Save the current register context location */
tcb->xcp.saved_regs = tcb->xcp.regs;
/* Duplicate the register context. These will be
* restored by the signal trampoline after the signal has
* been delivered.
*/
tcb->xcp.regs = (void *)
((uint32_t)tcb->xcp.regs -
(uint32_t)XCPTCONTEXT_SIZE);
memcpy(tcb->xcp.regs, tcb->xcp.saved_regs,
XCPTCONTEXT_SIZE);
tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs +
(uint32_t)XCPTCONTEXT_SIZE;
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver;
tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT |
PSR_F_BIT);
#ifdef CONFIG_ARM_THUMB
tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT;
#endif
}
else
{
/* tcb is running on the same CPU */
/* Save the return PC, CPSR and either the BASEPRI or
* PRIMASK registers (and perhaps also the LR). These will
* be restored by the signal trampoline after the signal
* has been delivered.
*/
tcb->xcp.sigdeliver = (void *)sigdeliver;
/* And make sure that the saved context in the TCB is the
* same as the interrupt return context.
*/
arm_savestate(tcb->xcp.saved_regs);
/* Duplicate the register context. These will be
* restored by the signal trampoline after the signal has
* been delivered.
*/
CURRENT_REGS = (void *)
((uint32_t)CURRENT_REGS -
(uint32_t)XCPTCONTEXT_SIZE);
memcpy((uint32_t *)CURRENT_REGS, tcb->xcp.saved_regs,
XCPTCONTEXT_SIZE);
CURRENT_REGS[REG_SP] = (uint32_t)CURRENT_REGS +
(uint32_t)XCPTCONTEXT_SIZE;
/* Then set up vector to the trampoline with interrupts
* disabled. The kernel-space trampoline must run in
* privileged thread mode.
*/
CURRENT_REGS[REG_PC] = (uint32_t)arm_sigdeliver;
CURRENT_REGS[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT |
PSR_F_BIT);
#ifdef CONFIG_ARM_THUMB
CURRENT_REGS[REG_CPSR] |= PSR_T_BIT;
#endif
}
/* Increment the IRQ lock count so that when the task is
* restarted, it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
/* NOTE: If the task runs on another CPU(cpu), adjusting
* global IRQ controls will be done in the pause handler
* on the CPU(cpu) by taking a critical section.
* If the task is scheduled on this CPU(me), do nothing
* because this CPU already took a critical section
*/
/* RESUME the other CPU if it was PAUSED */
if (cpu != me)
{
up_cpu_resume(cpu);
}
}
}
/* Otherwise, we are (1) signaling a task is not running from an
* interrupt handler or (2) we are not in an interrupt handler and the
* running task is signaling some other non-running task.
*/
else
{
/* Save the return lr and cpsr and one scratch register. These
* will be restored by the signal trampoline after the signals
* have been delivered.
*/
tcb->xcp.sigdeliver = sigdeliver;
/* Save the current register context location */
tcb->xcp.saved_regs = tcb->xcp.regs;
/* Duplicate the register context. These will be
* restored by the signal trampoline after the signal has been
* delivered.
*/
tcb->xcp.regs = (void *)
((uint32_t)tcb->xcp.regs -
(uint32_t)XCPTCONTEXT_SIZE);
memcpy(tcb->xcp.regs, tcb->xcp.saved_regs, XCPTCONTEXT_SIZE);
tcb->xcp.regs[REG_SP] = (uint32_t)tcb->xcp.regs +
(uint32_t)XCPTCONTEXT_SIZE;
/* Increment the IRQ lock count so that when the task is restarted,
* it will hold the IRQ spinlock.
*/
DEBUGASSERT(tcb->irqcount < INT16_MAX);
tcb->irqcount++;
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
tcb->xcp.regs[REG_PC] = (uint32_t)arm_sigdeliver;
tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SYS | PSR_I_BIT | PSR_F_BIT);
#ifdef CONFIG_ARM_THUMB
tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT;
#endif
}
}
}
#endif

View file

@ -0,0 +1,123 @@
/****************************************************************************
* arch/arm/src/armv7-r/arm_scu.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include "arm_internal.h"
#include "cp15_cacheops.h"
#include "barriers.h"
#include "sctlr.h"
#include "scu.h"
#include "cp15.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm_enable_smp
*
* Description:
* Enable the SCU and make certain that current CPU is participating in
* the SMP cache coherency.
*
* Assumption:
* Called early in the CPU start-up. No special critical sections are
* needed if only CPU-private registers are modified.
*
****************************************************************************/
void arm_enable_smp(int cpu)
{
uint32_t regval;
/* Handle actions unique to CPU0 which comes up first */
if (cpu == 0)
{
/* Invalidate the SCU duplicate tags for all processors */
putreg32((SCU_INVALIDATE_ALL_WAYS << SCU_INVALIDATE_CPU0_SHIFT)
| (SCU_INVALIDATE_ALL_WAYS << SCU_INVALIDATE_CPU1_SHIFT)
#if CONFIG_SMP_NCPUS > 2
| (SCU_INVALIDATE_ALL_WAYS << SCU_INVALIDATE_CPU2_SHIFT)
| (SCU_INVALIDATE_ALL_WAYS << SCU_INVALIDATE_CPU3_SHIFT)
#endif
, SCU_INVALIDATE);
/* Invalidate CPUn L1 data cache so that is will we be reloaded from
* coherent L2.
*/
cp15_invalidate_dcache_all();
ARM_DSB();
/* Invalidate the L2C-310 -- Missing logic. */
/* Enable the SCU */
regval = getreg32(SCU_CTRL);
regval |= SCU_CTRL_ENABLE;
putreg32(regval, SCU_CTRL);
/* Initialize done, kick other cpus which waiting on __start */
ARM_SEV();
}
/* Actions for other CPUs */
else
{
/* Invalidate CPUn L1 data cache so that is will we be reloaded from
* coherent L2.
*/
cp15_dcache_op_level(0, CP15_CACHE_INVALIDATE);
ARM_DSB();
/* Wait for the SCU to be enabled by the primary processor -- should
* not be necessary.
*/
}
/* Enable the data cache, set the SMP mode with ACTLR.SMP=1.
*
* SMP - Sgnals if the processor is taking part in coherency
* or not.
*
* FW - Cache and TLB maintenance broadcast.
*/
regval = CP15_GET(ACTLR);
regval |= ACTLR_SMP;
regval |= ACTLR_FW;
CP15_SET(ACTLR, regval);
regval = CP15_GET(SCTLR);
regval |= SCTLR_C | SCTLR_I | SCTLR_M;
CP15_SET(SCTLR, regval);
}

View file

@ -56,12 +56,41 @@ void arm_sigdeliver(void)
struct tcb_s *rtcb = this_task();
uint32_t *regs = rtcb->xcp.saved_regs;
#ifdef CONFIG_SMP
/* In the SMP case, we must terminate the critical section while the signal
* handler executes, but we also need to restore the irqcount when the
* we resume the main thread of the task.
*/
int16_t saved_irqcount;
#endif
board_autoled_on(LED_SIGNAL);
sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
DEBUGASSERT(rtcb->xcp.sigdeliver != NULL);
#ifdef CONFIG_SMP
/* In the SMP case, up_schedule_sigaction(0) will have incremented
* 'irqcount' in order to force us into a critical section. Save the
* pre-incremented irqcount.
*/
saved_irqcount = rtcb->irqcount - 1;
DEBUGASSERT(saved_irqcount >= 0);
/* Now we need call leave_critical_section() repeatedly to get the irqcount
* to zero, freeing all global spinlocks that enforce the critical section.
*/
do
{
leave_critical_section(regs[REG_CPSR]);
}
while (rtcb->irqcount > 0);
#endif /* CONFIG_SMP */
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* Then make sure that interrupts are enabled. Signal handlers must always
* run with interrupts enabled.
@ -80,7 +109,22 @@ void arm_sigdeliver(void)
*/
sinfo("Resuming\n");
#ifdef CONFIG_SMP
/* Restore the saved 'irqcount' and recover the critical section
* spinlocks.
*/
DEBUGASSERT(rtcb->irqcount == 0);
while (rtcb->irqcount < saved_irqcount)
{
enter_critical_section();
}
#endif
#ifndef CONFIG_SUPPRESS_INTERRUPTS
up_irq_save();
#endif
/* Modify the saved return state with the actual saved values in the
* TCB. This depends on the fact that nested signal handling is

View file

@ -42,6 +42,36 @@
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Name: setirqstack
*
* Description:
* Set the current stack pointer to the "top" of the IRQ interrupt stack. Single
* CPU case. Must be provided by MCU-specific logic in chip.h for the SMP case.
*
****************************************************************************/
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.macro setirqstack, tmp1, tmp2
ldr sp, .Lirqstacktop /* SP = IRQ stack top */
.endm
#endif
/****************************************************************************
* Name: setfiqstack
*
* Description:
* Set the current stack pointer to the "top" of the FIQ interrupt stack. Single
* CPU case. Must be provided by MCU-specific logic in chip.h for the SMP case.
*
****************************************************************************/
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.macro setfiqstack, tmp1, tmp2
ldr sp, .Lfiqstacktop /* SP = FIQ stack top */
.endm
#endif
/****************************************************************************
* Name: savefpu
*
@ -169,7 +199,7 @@ arm_vectorirq:
#if CONFIG_ARCH_INTERRUPTSTACK > 7
/* Call arm_decodeirq() on the interrupt stack */
ldr sp, .Lirqstacktop /* SP = interrupt stack top */
setirqstack r1, r3 /* SP = interrupt stack top */
#else
/* Call arm_decodeirq() on the user stack */
@ -216,7 +246,7 @@ arm_vectorirq:
rfeia r14
#if CONFIG_ARCH_INTERRUPTSTACK > 7
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.Lirqstacktop:
.word g_intstacktop
#endif
@ -281,7 +311,7 @@ arm_vectorsvc:
#if CONFIG_ARCH_INTERRUPTSTACK > 7
/* Call arm_syscall() on the interrupt stack */
ldr sp, .Lirqstacktop /* SP = interrupt stack top */
setirqstack r1, r3 /* SP = interrupt stack top */
#else
/* Call arm_syscall() on the user stack */
@ -641,7 +671,7 @@ arm_vectorfiq:
#if CONFIG_ARCH_INTERRUPTSTACK > 7
/* Call arm_decodefiq() on the interrupt stack */
ldr sp, .Lfiqstacktop /* SP = interrupt stack top */
setfiqstack r1, r3 /* SP = interrupt stack top */
#endif
bic sp, sp, #7 /* Force 8-byte alignment */
@ -676,7 +706,7 @@ arm_vectorfiq:
rfeia r14
#if CONFIG_ARCH_INTERRUPTSTACK > 7
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.Lfiqstacktop:
.word g_fiqstacktop
#endif
@ -690,7 +720,7 @@ arm_vectorfiq:
* Name: g_intstackalloc/g_intstacktop
****************************************************************************/
#if CONFIG_ARCH_INTERRUPTSTACK > 7
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.bss
.balign 8
@ -722,5 +752,5 @@ g_fiqstacktop:
.size g_fiqstackalloc, (CONFIG_ARCH_INTERRUPTSTACK & ~7)
#endif
#endif /* CONFIG_ARCH_INTERRUPTSTACK > 7 */
#endif /* !CONFIG_SMP && CONFIG_ARCH_INTERRUPTSTACK > 7 */
.end

View file

@ -34,11 +34,13 @@
#define arm_isb(n) __asm__ __volatile__ ("isb " #n : : : "memory")
#define arm_dsb(n) __asm__ __volatile__ ("dsb " #n : : : "memory")
#define arm_dmb(n) __asm__ __volatile__ ("dmb " #n : : : "memory")
#define arm_nop(n) __asm__ __volatile__ ("nop\n")
#define arm_nop() __asm__ __volatile__ ("nop\n")
#define arm_sev() __asm__ __volatile__ ("sev\n")
#define ARM_DSB() arm_dsb(15)
#define ARM_ISB() arm_isb(15)
#define ARM_DMB() arm_dmb(15)
#define ARM_NOP() arm_nop(15)
#define ARM_NOP() arm_nop()
#define ARM_SEV() arm_sev()
#endif /* __ARCH_ARM_SRC_ARMV7_R_BARRIERS_H */

View file

@ -61,9 +61,19 @@
* TODO: To be provided
*/
/* Multiprocessor Affinity Register (MPIDR): CRn=c0, opc1=0, CRm=c0, opc2=5
* TODO: To be provided
*/
/* Multiprocessor Affinity Register (MPIDR): CRn=c0, opc1=0, CRm=c0, opc2=5 */
#define MPIDR_CPUID_SHIFT (0) /* Bits 0-1: CPU ID */
#define MPIDR_CPUID_MASK (3 << MPIDR_CPUID_SHIFT)
# define MPIDR_CPUID_CPU0 (0 << MPIDR_CPUID_SHIFT)
# define MPIDR_CPUID_CPU1 (1 << MPIDR_CPUID_SHIFT)
# define MPIDR_CPUID_CPU2 (2 << MPIDR_CPUID_SHIFT)
# define MPIDR_CPUID_CPU3 (3 << MPIDR_CPUID_SHIFT)
/* Bits 2-7: Reserved */
#define MPIDR_CLUSTID_SHIFT (8) /* Bits 8-11: Cluster ID value */
#define MPIDR_CLUSTID_MASK (15 << MPIDR_CLUSTID_SHIFT)
/* Bits 12-29: Reserved */
#define MPIDR_U (1 << 30) /* Bit 30: Multiprocessing Extensions. */
/* Revision ID Register (REVIDR): CRn=c0, opc1=0, CRm=c0, opc2=6
* TODO: To be provided
@ -160,9 +170,19 @@
/* Bits 28-29: Reserved */
#define SCTLR_TE (1 << 30) /* Bit 30: Thumb exception enable */
/* Auxiliary Control Register (ACTLR): CRn=c1, opc1=0, CRm=c0, opc2=1
* Implementation defined
*/
/* Auxiliary Control Register (ACTLR): CRn=c1, opc1=0, CRm=c0, opc2=1 */
#define ACTLR_FW (1 << 0) /* Bit 0: Enable Cache/TLB maintenance broadcast */
/* Bits 1-2: Reserved */
#define ACTLR_MRP (1 << 3) /* Bit 3: Enable MRP */
/* Bits 4-5: Reserved */
#define ACTLR_SMP (1 << 6) /* Bit 6: Cortex-A9 taking part in coherency */
/* Bits 7: Reserved */
#define ACTLR_ALLOC_1WAY (1 << 8) /* Bit 8: Allocation in 1-way cache only */
#define ACTLR_DTCM_ECC (1 << 9) /* Bit 9: ECC on caches and DTCM */
#define ACTLR_ITCM_ECC (1 << 10) /* Bit 10: ECC on caches and ITCM */
#define ACTLR_ITCM_QOS (1 << 11) /* Bit 11: Enable QoS*/
/* Bits 12-31: Reserved */
/* Coprocessor Access Control Register (CPACR):
* CRn=c1, opc1=0, CRm=c0, opc2=2

252
arch/arm/src/armv7-r/scu.h Normal file
View file

@ -0,0 +1,252 @@
/****************************************************************************
* arch/arm/src/armv7-r/scu.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/* Reference:
* Cortex-R8 MPCore, Revision: r0p3, Technical Reference Manual.
*/
#ifndef __ARCH_ARM_SRC_ARMV7_R_SCU_H
#define __ARCH_ARM_SRC_ARMV7_R_SCU_H
/****************************************************************************
* Included Files
****************************************************************************/
#include "mpcore.h" /* For MPCORE_SCU_VBASE */
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Register offsets *********************************************************/
#define SCU_CTRL_OFFSET 0x0000 /* SCU Control Register (Implementation defined) */
#define SCU_CONFIG_OFFSET 0x0004 /* SCU Configuration Register (Implementation defined) */
#define SCU_PWRSTATUS_OFFSET 0x0008 /* SCU CPU Power Status Register */
#define SCU_INVALIDATE_OFFSET 0x000c /* SCU Invalidate All Registers in Secure State */
#define SCU_FILTERSTART_OFFSET 0x0040 /* Filtering Start Address Register Defined by FILTERSTART input */
#define SCU_FILTEREND_OFFSET 0x0044 /* Filtering End Address Register Defined by FILTEREND input */
#define SCU_PFILTERSTART_OFFSET 0x0048 /* Peripherals Filtering Start Address Register */
#define SCU_PFILTEREND_OFFSET 0x004c /* Peripherals Filtering End Address Register */
#define SCU_SAC_OFFSET 0x0050 /* SCU Access Control (SAC) Register */
#define SCU_ERRBANKFST_OFFSET 0x0060 /* SCU Error Bank First Entry Register */
#define SCU_ERRBANKSND_OFFSET 0x0064 /* SCU Error Bank Second Entry Register */
#define SCU_DEBUGRAM_OFFSET 0x0070 /* SCU Debug Tag RAM Operation Register */
#define SCU_DEBUGRAMDATA_OFFSET 0x0074 /* SCU Debug Tag RAM Data Value Register */
#define SCU_DEBUGRAMECC_OFFSET 0x0078 /* SCU Debug Tag RAM ECC Chunk Register */
#define SCU_ECCERR_OFFSET 0x007c /* ECC Fatal Error Registe */
#define SCU_FPPFILTERSTART_OFFSET(n) (0x0080 + (n)*8) /* FPP Filtering Start Address Register for core n */
#define SCU_FPPFILTEREND_OFFSET(n) (0x0084 + (n)*8) /* FPP Filtering End Address Register for core n */
/* Register addresses *******************************************************/
#define SCU_CTRL (MPCORE_SCU_VBASE+SCU_CTRL_OFFSET)
#define SCU_CONFIG (MPCORE_SCU_VBASE+SCU_CONFIG_OFFSET)
#define SCU_PWRSTATUS (MPCORE_SCU_VBASE+SCU_PWRSTATUS_OFFSET)
#define SCU_INVALIDATE (MPCORE_SCU_VBASE+SCU_INVALIDATE_OFFSET)
#define SCU_FILTERSTART (MPCORE_SCU_VBASE+SCU_FILTERSTART_OFFSET)
#define SCU_FILTEREND (MPCORE_SCU_VBASE+SCU_FILTEREND_OFFSET)
#define SCU_PFILTERSTART (MPCORE_SCU_VBASE+SCU_PFILTERSTART_OFFSET)
#define SCU_PFILTEREND (MPCORE_SCU_VBASE+SCU_PFILTEREND_OFFSET)
#define SCU_SAC (MPCORE_SCU_VBASE+SCU_SAC_OFFSET)
#define SCU_ERRBANKFST (MPCORE_SCU_VBASE+SCU_ERRBANKFST_OFFSET)
#define SCU_ERRBANKSND (MPCORE_SCU_VBASE+SCU_ERRBANKSND_OFFSET)
#define SCU_DEBUGRAM (MPCORE_SCU_VBASE+SCU_DEBUGRAM_OFFSET)
#define SCU_DEBUGRAMDATA (MPCORE_SCU_VBASE+SCU_DEBUGRAMDATA_OFFSET)
#define SCU_DEBUGRAMECC (MPCORE_SCU_VBASE+SCU_DEBUGRAMECC_OFFSET)
#define SCU_ECCERR (MPCORE_SCU_VBASE+SCU_ECCERR_OFFSET)
#define SCU_FPPFILTERSTART(n) (MPCORE_SCU_VBASE+SCU_FPPFILTERSTART0_OFFSET(n))
#define SCU_FPPFILTEREND(n) (MPCORE_SCU_VBASE+SCU_FPPFILTEREND0_OFFSET(n))
/* Register bit-field definitions *******************************************/
/* SCU Control Register (Implementation defined) */
#define SCU_CTRL_ENABLE (1 << 0) /* SCU enable */
#define SCU_CTRL_ADDRFILTER (1 << 1) /* Address filtering enable */
#define SCU_CTRL_RAMPARITY (1 << 2) /* SCU RAMs ECC enable */
#define SCU_CTRL_LINFILL (1 << 3) /* SCU speculative linefill enable */
#define SCU_CTRL_STANDBY (1 << 5) /* SCU standby enable */
#define SCU_CTRL_ICSTANDBY (1 << 6) /* IC standby enable */
#define SCU_CTRL_ECCCHKEN_M0 (1 << 12) /* ECC check enable on M0 */
#define SCU_CTRL_ECCCHKEN_M1 (1 << 13) /* ECC check enable on M1 */
#define SCU_CTRL_ECCCHKEN_MP (1 << 14) /* ECC check enable on MP */
#define SCU_CTRL_ECCCHKEN_ACP (1 << 15) /* ECC check enable on ACP */
#define SCU_CTRL_ECCCHKEN_FPP(n) (1 << ((n)+16)) /* ECC check enable on FPP for core n */
#define SCU_CTRL_ECCCHKEN_TCM (1 << 20) /* ECC check enable on AXI TCM */
/* SCU Configuration Register (Implementation defined) */
#define SCU_CONFIG_NCPUS_SHIFT 0 /* CPU number Number of CPUs present */
#define SCU_CONFIG_NCPUS_MASK (3 << SCU_CONFIG_NCPUS_SHIFT)
# define SCU_CONFIG_NCPUS(r) ((((uint32_t)(r) & SCU_CONFIG_NCPUS_MASK) >> SCU_CONFIG_NCPUS_SHIFT) + 1)
#define SCU_CONFIG_SMPCPUS_SHIFT 4 /* Processors that are in SMP or AMP mode */
#define SCU_CONFIG_SMPCPUS_MASK (15 << SCU_CONFIG_SMPCPUS_SHIFT)
# define SCU_CONFIG_CPU_SMP(n) (1 << ((n)+4))
# define SCU_CONFIG_CPU0_SMP (1 << 4)
# define SCU_CONFIG_CPU1_SMP (1 << 5)
# define SCU_CONFIG_CPU2_SMP (1 << 6)
# define SCU_CONFIG_CPU3_SMP (1 << 7)
#define SCU_CONFIG_CACHE_0KB 0
#define SCU_CONFIG_CACHE_4KB 1
#define SCU_CONFIG_CACHE_8KB 2
#define SCU_CONFIG_CACHE_16KB 3
#define SCU_CONFIG_CACHE_32KB 4
#define SCU_CONFIG_CACHE_64KB 5
#define SCU_CONFIG_CPU0_CACHE_SHIFT 8 /* CPU 0 cache size */
#define SCU_CONFIG_CPU0_CACHE_MASK (4 << SCU_CONFIG_CPU0_CACHE_SHIFT)
#define SCU_CONFIG_CPU1_CACHE_SHIFT 12 /* CPU 1 cache size */
#define SCU_CONFIG_CPU1_CACHE_MASK (4 << SCU_CONFIG_CPU1_CACHE_SHIFT)
#define SCU_CONFIG_CPU2_CACHE_SHIFT 16 /* CPU 2 cache size */
#define SCU_CONFIG_CPU2_CACHE_MASK (4 << SCU_CONFIG_CPU2_CACHE_SHIFT)
#define SCU_CONFIG_CPU3_CACHE_SHIFT 20 /* CPU 3 cache size */
#define SCU_CONFIG_CPU3_CACHE_MASK (4 << SCU_CONFIG_CPU3_CACHE_SHIFT)
#define SCU_CONFIG_AXI_PORT1_SHIFT 31
/* SCU CPU Power Status Register */
#define SCU_PWRSTATUS_NORMAL 0
#define SCU_PWRSTATUS_DORMANT 2
#define SCU_PWRSTATUS_PWROFF 3
#define SCU_PWRSTATUS_CPU0_SHIFT 0 /* CPU0 status Power status */
#define SCU_PWRSTATUS_CPU0_MASK (3 << SCU_PWRSTATUS_CPU0_SHIFT)
#define SCU_PWRSTATUS_CPU1_SHIFT 8 /* CPU1 status Power status */
#define SCU_PWRSTATUS_CPU1_MASK (3 << SCU_PWRSTATUS_CPU1_SHIFT)
#define SCU_PWRSTATUS_CPU2_SHIFT 16 /* CPU2 status Power status */
#define SCU_PWRSTATUS_CPU2_MASK (3 << SCU_PWRSTATUS_CPU2_SHIFT)
#define SCU_PWRSTATUS_CPU3_SHIFT 24 /* CPU3 status Power status */
#define SCU_PWRSTATUS_CPU3_MASK (3 << SCU_PWRSTATUS_CPU3_SHIFT)
/* SCU Invalidate All Registers in Secure State */
#define SCU_INVALIDATE_ALL_WAYS 15
#define SCU_INVALIDATE_CPU0_SHIFT 0 /* Ways that must be invalidated for CPU0 */
#define SCU_INVALIDATE_CPU0_MASK (15 << SCU_INVALIDATE_CPU0_SHIFT)
#define SCU_INVALIDATE_CPU1_SHIFT 4 /* Ways that must be invalidated for CPU1 */
#define SCU_INVALIDATE_CPU1_MASK (15 << SCU_INVALIDATE_CPU1_SHIFT)
#define SCU_INVALIDATE_CPU2_SHIFT 8 /* Ways that must be invalidated for CPU2 */
#define SCU_INVALIDATE_CPU2_MASK (15 << SCU_INVALIDATE_CPU2_SHIFT)
#define SCU_INVALIDATE_CPU3_SHIFT 12 /* Ways that must be invalidated for CPU3 */
#define SCU_INVALIDATE_CPU3_MASK (15 << SCU_INVALIDATE_CPU3_SHIFT)
/* Filtering Start Address Register Defined by FILTERSTART input */
#define SCU_FILTERSTART_SHIFT 20 /* Filtering start address */
#define SCU_FILTERSTART_MASK (0xfff << SCU_FILTERSTART_SHIFT)
/* Filtering End Address Register Defined by FILTEREND input */
#define SCU_FILTEREND_SHIFT 20 /* Filtering start address */
#define SCU_FILTEREND_MASK (0xfff << SCU_FILTEREND_SHIFT)
/* LLP Filtering Start Address Register */
#define SCU_LLPFILTERSTART_SHIFT 20
#define SCU_LLPFILTERSTART_MASK (0xfff << SCU_LLPFILTERSTART_SHIFT)
/* LLP Filtering End Address Register */
#define SCU_LLPFILTEREND_SHIFT 20
#define SCU_LLPFILTEREND_MASK (0xfff << SCU_LLPFILTEREND_SHIFT)
/* SCU Access Control (SAC) Register */
#define SCU_SAC_CPU(n) (1 << (n)) /* CPUn may access components */
/* SCU Error Bank First Entry Register */
#define SCU_ERRBANKFST_STATUS_SHIFT 0
#define SCU_ERRBANKFST_STATUS_MASK (3 << SCU_ERRBANKFST_STATUS_SHIFT)
#define SCU_ERRBANKFST_WAYS_SHIFT(n) (16 + (n)*4)
#define SCU_ERRBANKFST_WAYS_MASK(n) (0xf << SCU_ERRBANKFST_WAYS_SHIFT(n))
/* SCU Error Bank Second Entry Register */
#define SCU_ERRBANKSND_STATUS_SHIFT 0
#define SCU_ERRBANKSND_STATUS_MASK (3 << SCU_ERRBANKSND_STATUS_SHIFT)
#define SCU_ERRBANKSND_INDEX_SHIFT 5
#define SCU_ERRBANKSND_INDEX_MASK (0x1ff << SCU_ERRBANKSND_INDEX_SHIFT)
#define SCU_ERRBANKSND_WAYS_SHIFT 16
#define SCU_ERRBANKSND_WAYS_MASK (0xffff << SCU_ERRBANKSND_WAYS_SHIFT)
/* SCU Debug Tag RAM Operation Register */
#define SCU_DEBUGRAM_READ 0
#define SCU_DEBUGRAM_WRITE 1
#define SCU_DEBUGRAM_INDEX_SHIFT 5
#define SCU_DEBUGRAM_INDEX_MASK (0x1ff << SCU_DEBUGRAM_INDEX_SHIFT)
#define SCU_DEBUGRAM_CORE_SHIFT 24
#define SCU_DEBUGRAM_CORE_MASK (3 << SCU_DEBUGRAM_CORE_SHIFT)
#define SCU_DEBUGRAM_WAY_SHIFT 30
#define SCU_DEBUGRAM_WAY_MASK (3 << SCU_DEBUGRAM_WAY_SHIFT)
/* SCU Debug Tag RAM Data Value Register */
#define SCU_DEBUGRAMDATA_VALUE_SHIFT 17
#define SCU_DEBUGRAMDATA_VALUE_MASK (0x1f << SCU_DEBUGRAMDATA_VALUE_SHIFT)
#define SCU_DEBUGRAMDATA_VALID (1 << 22)
/* SCU Debug Tag RAM ECC Chunk Register */
#define SCU_DEBUGRAMECC_CHUNK_SHIFT 0
#define SCU_DEBUGRAMECC_CHUNK_MASK (0x3f << SCU_DEBUGRAMECC_CHUNK_SHIFT)
/* ECC Fatal Error Register */
#define SCU_ECCERR_CORE_DETECTED(n) (n)
#define SCU_ECCERR_DETECTED (1 << 8)
/* FPP Filtering Start Address Registers 0-3 */
#define SCU_FPPFILTERSTART_SHIFT 20
#define SCU_FPPFILTERSTART_MASK (0xfff << SCU_FPPFILTERSTART_SHIFT)
/* FPP Filtering End Address Registers 0-3 */
#define SCU_FPPFILTEREND_SHIFT 20
#define SCU_FPPFILTEREND_MASK (0xfff << SCU_FPPFILTEREND_SHIFT)
/****************************************************************************
* Public Functions Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm_enable_smp
*
* Description:
* Enable the SCU and make certain that current CPU is participating in
* the SMP cache coherency.
*
****************************************************************************/
void arm_enable_smp(int cpu);
#endif /* __ARCH_ARM_SRC_ARMV7_R_SCU_H */

128
arch/arm/src/armv7-r/smp.h Normal file
View file

@ -0,0 +1,128 @@
/****************************************************************************
* arch/arm/src/armv7-r/smp.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_SRC_ARMV7_R_SMP_H
#define __ARCH_ARM_SRC_ARMV7_R_SMP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#ifdef CONFIG_SMP
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* ARM requires at least a 4-byte stack alignment. For use with EABI and
* floating point, the stack must be aligned to 8-byte addresses. We will
* always use the EABI stack alignment
*/
#define SMP_STACK_ALIGNMENT 8
#define SMP_STACK_MASK 7
#define SMP_STACK_SIZE ((CONFIG_IDLETHREAD_STACKSIZE + 7) & ~7)
#define SMP_STACK_WORDS (SMP_STACK_SIZE >> 2)
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
#if CONFIG_SMP_NCPUS > 1
extern uint32_t g_cpu1_idlestack[SMP_STACK_WORDS];
#if CONFIG_SMP_NCPUS > 2
extern uint32_t g_cpu2_idlestack[SMP_STACK_WORDS];
#if CONFIG_SMP_NCPUS > 3
extern uint32_t g_cpu3_idlestack[SMP_STACK_WORDS];
#if CONFIG_SMP_NCPUS > 4
# error This logic needs to extended for CONFIG_SMP_NCPUS > 4
#endif /* CONFIG_SMP_NCPUS > 4 */
#endif /* CONFIG_SMP_NCPUS > 3 */
#endif /* CONFIG_SMP_NCPUS > 2 */
#endif /* CONFIG_SMP_NCPUS > 1 */
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: __cpu[n]_start
*
* Description:
* Boot functions for each CPU (other than CPU0). These functions set up
* the ARM operating mode, the initial stack, and configure co-processor
* registers. At the end of the boot, arm_cpu_boot() is called.
*
* These functions are provided by the common ARMv7-R logic.
*
* Input Parameters:
* None
*
* Returned Value:
* Do not return.
*
****************************************************************************/
#if CONFIG_SMP_NCPUS > 1
void __cpu1_start(void);
#endif
#if CONFIG_SMP_NCPUS > 2
void __cpu2_start(void);
#endif
#if CONFIG_SMP_NCPUS > 3
void __cpu3_start(void);
#endif
#if CONFIG_SMP_NCPUS > 4
# error This logic needs to extended for CONFIG_SMP_NCPUS > 4
#endif
/****************************************************************************
* Name: arm_cpu_boot
*
* Description:
* Continues the C-level initialization started by the assembly language
* __cpu[n]_start function. At a minimum, this function needs to
* initialize interrupt handling and, perhaps, wait on WFI for
* arm_cpu_start() to issue an SGI.
*
* This function must be provided by the each ARMv7-R MCU and implement
* MCU-specific initialization logic.
*
* Input Parameters:
* cpu - The CPU index. This is the same value that would be obtained by
* calling up_cpu_index();
*
* Returned Value:
* Does not return.
*
****************************************************************************/
void arm_cpu_boot(int cpu);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_SMP */
#endif /* __ARCH_ARM_SRC_ARMV7_R_SMP_H */