arch/intel64: add support for AP cores boot
Intel64 can now boot application cores which is needed for SMP IMPORTANT: CONFIG_BOARD_LOOPSPERMSEC must be properly configured, otherwise AP boot sequence can fail due too short delays during the AP startup Signed-off-by: p-szafonimateusz <p-szafonimateusz@xiaomi.com>
This commit is contained in:
parent
961ade88fe
commit
8d4681a190
14 changed files with 900 additions and 58 deletions
|
@ -133,6 +133,7 @@ config ARCH_X86_64
|
|||
select ARCH_HAVE_TCBINFO
|
||||
select ARCH_HAVE_FPU
|
||||
select ARCH_HAVE_DPFPU
|
||||
select ARCH_HAVE_MULTICPU
|
||||
select ARCH_HAVE_TESTSET
|
||||
select ARCH_HAVE_INTERRUPTSTACK
|
||||
select ARCH_HAVE_CUSTOMOPT
|
||||
|
|
|
@ -345,6 +345,10 @@
|
|||
#define HPET0_IRQ IRQ2
|
||||
#define HPET1_IRQ IRQ8
|
||||
|
||||
/* Use IRQ15 for SMP */
|
||||
|
||||
#define SMP_IPI_IRQ IRQ15
|
||||
|
||||
/* Common register save structure created by up_saveusercontext() and by
|
||||
* ISR/IRQ interrupt processing.
|
||||
*/
|
||||
|
|
|
@ -37,6 +37,12 @@
|
|||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#define IDLE_STACK_SIZE CONFIG_IDLETHREAD_STACKSIZE
|
||||
|
||||
#if CONFIG_IDLETHREAD_STACKSIZE % 16 != 0
|
||||
# error CONFIG_IDLETHREAD_STACKSIZE must be aligned to 16
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
@ -49,8 +55,28 @@
|
|||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
const uintptr_t g_idle_topstack = (uintptr_t)_ebss +
|
||||
CONFIG_IDLETHREAD_STACKSIZE;
|
||||
static const uintptr_t g_idle_stackalloc = (uintptr_t)_ebss +
|
||||
CONFIG_IDLETHREAD_STACKSIZE * CONFIG_SMP_NCPUS;
|
||||
|
||||
const uintptr_t g_idle_topstack[CONFIG_SMP_NCPUS] =
|
||||
{
|
||||
(uintptr_t)g_idle_stackalloc + (1 * IDLE_STACK_SIZE) - 16,
|
||||
#if CONFIG_SMP_NCPUS > 1
|
||||
(uintptr_t)g_idle_stackalloc + (2 * IDLE_STACK_SIZE) - 16,
|
||||
#endif
|
||||
#if CONFIG_SMP_NCPUS > 2
|
||||
(uintptr_t)g_idle_stackalloc + (3 * IDLE_STACK_SIZE) - 16,
|
||||
#endif
|
||||
#if CONFIG_SMP_NCPUS > 3
|
||||
(uintptr_t)g_idle_stackalloc + (4 * IDLE_STACK_SIZE) - 16,
|
||||
#endif
|
||||
#if CONFIG_SMP_NCPUS > 4
|
||||
(uintptr_t)g_idle_stackalloc + (5 * IDLE_STACK_SIZE) - 16,
|
||||
#endif
|
||||
#if CONFIG_SMP_NCPUS > 5
|
||||
# error missing logic
|
||||
#endif
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_allocate_heap
|
||||
|
@ -69,11 +95,16 @@ const uintptr_t g_idle_topstack = (uintptr_t)_ebss +
|
|||
|
||||
void up_allocate_heap(void **heap_start, size_t *heap_size)
|
||||
{
|
||||
uintptr_t hstart;
|
||||
uintptr_t topstack;
|
||||
|
||||
board_autoled_on(LED_HEAPALLOCATE);
|
||||
|
||||
topstack = g_idle_topstack[CONFIG_SMP_NCPUS - 1] + 8;
|
||||
|
||||
/* Calculate the end of .bss section */
|
||||
|
||||
uintptr_t hstart = (g_idle_topstack + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
hstart = (topstack + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
*heap_start = (void *)hstart;
|
||||
|
||||
/* The size is the rest of the RAM */
|
||||
|
|
|
@ -140,7 +140,7 @@ typedef void (*up_vector_t)(void);
|
|||
* end of the heap is CONFIG_RAM_END
|
||||
*/
|
||||
|
||||
extern const uintptr_t g_idle_topstack;
|
||||
extern const uintptr_t g_idle_topstack[];
|
||||
|
||||
/* Address of the saved user stack pointer */
|
||||
|
||||
|
|
|
@ -52,6 +52,10 @@ if(CONFIG_ARCH_HAVE_TESTSET)
|
|||
list(APPEND SRCS intel64_testset.S)
|
||||
endif()
|
||||
|
||||
if(CONFIG_SMP)
|
||||
list(APPEND SRCS intel64_cpuidlestack.c intel64_cpupause.c intel64_cpustart.c)
|
||||
endif()
|
||||
|
||||
if(CONFIG_MULTBOOT2_FB_TERM)
|
||||
list(APPEND SRCS intel64_mbfb.c)
|
||||
endif()
|
||||
|
|
|
@ -38,6 +38,12 @@ ifeq ($(CONFIG_ARCH_HAVE_TESTSET), y)
|
|||
CHIP_ASRCS += intel64_testset.S
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_SMP),y)
|
||||
CHIP_CSRCS += intel64_cpuidlestack.c
|
||||
CHIP_CSRCS += intel64_cpupause.c
|
||||
CHIP_CSRCS += intel64_cpustart.c
|
||||
endif
|
||||
|
||||
# Configuration-dependent intel64 files
|
||||
|
||||
ifeq ($(CONFIG_MULTBOOT2_FB_TERM),y)
|
||||
|
|
97
arch/x86_64/src/intel64/intel64_cpuidlestack.c
Normal file
97
arch/x86_64/src/intel64/intel64_cpuidlestack.c
Normal file
|
@ -0,0 +1,97 @@
|
|||
/****************************************************************************
|
||||
* arch/x86_64/src/intel64/intel64_cpuidlestack.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/sched.h>
|
||||
|
||||
#include "x86_64_internal.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_idlestack
|
||||
*
|
||||
* Description:
|
||||
* Allocate a stack for the CPU[n] IDLE task (n > 0) if appropriate and
|
||||
* setup up stack-related information in the IDLE task's TCB. This
|
||||
* function is always called before up_cpu_start(). This function is
|
||||
* only called for the CPU's initial IDLE task; up_create_task is used for
|
||||
* all normal tasks, pthreads, and kernel threads for all CPUs.
|
||||
*
|
||||
* The initial IDLE task is a special case because the CPUs can be started
|
||||
* in different wans in different environments:
|
||||
*
|
||||
* 1. The CPU may already have been started and waiting in a low power
|
||||
* state for up_cpu_start(). In this case, the IDLE thread's stack
|
||||
* has already been allocated and is already in use. Here
|
||||
* up_cpu_idlestack() only has to provide information about the
|
||||
* already allocated stack.
|
||||
*
|
||||
* 2. The CPU may be disabled but started when up_cpu_start() is called.
|
||||
* In this case, a new stack will need to be created for the IDLE
|
||||
* thread and this function is then equivalent to:
|
||||
*
|
||||
* return up_create_stack(tcb, stack_size, TCB_FLAG_TTYPE_KERNEL);
|
||||
*
|
||||
* The following TCB fields must be initialized by this function:
|
||||
*
|
||||
* - adj_stack_size: Stack size after adjustment for hardware, processor,
|
||||
* etc. This value is retained only for debug purposes.
|
||||
* - stack_alloc_ptr: Pointer to allocated stack
|
||||
* - stack_base_ptr: Adjusted stack base pointer after the TLS Data and
|
||||
* Arguments has been removed from the stack allocation.
|
||||
*
|
||||
* Input Parameters:
|
||||
* - cpu: CPU index that indicates which CPU the IDLE task is
|
||||
* being created for.
|
||||
* - tcb: The TCB of new CPU IDLE task
|
||||
* - stack_size: The requested stack size for the IDLE task. At least
|
||||
* this much must be allocated. This should be
|
||||
* CONFIG_SMP_STACK_SIZE.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_idlestack(int cpu, struct tcb_s *tcb, size_t stack_size)
|
||||
{
|
||||
#if CONFIG_SMP_NCPUS > 1
|
||||
uintptr_t stack_alloc;
|
||||
|
||||
DEBUGASSERT(cpu > 0 && cpu < CONFIG_SMP_NCPUS && tcb != NULL &&
|
||||
stack_size <= CONFIG_IDLETHREAD_STACKSIZE);
|
||||
|
||||
/* Get the top of the stack */
|
||||
|
||||
stack_alloc = (uintptr_t)g_idle_topstack[cpu];
|
||||
tcb->adj_stack_size = stack_size - 8;
|
||||
tcb->stack_alloc_ptr = (void *)stack_alloc;
|
||||
tcb->stack_base_ptr = tcb->stack_alloc_ptr;
|
||||
#endif
|
||||
return OK;
|
||||
}
|
410
arch/x86_64/src/intel64/intel64_cpupause.c
Normal file
410
arch/x86_64/src/intel64/intel64_cpupause.c
Normal file
|
@ -0,0 +1,410 @@
|
|||
/****************************************************************************
|
||||
* arch/x86_64/src/intel64/intel64_cpupause.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <arch/irq.h>
|
||||
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/spinlock.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
#include "x86_64_internal.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Save the current context at CURRENT_REGS into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
x86_64_savestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Release the g_cpu_paused spinlock to synchronize with the
|
||||
* requesting CPU.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
x86_64_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* Inter-CPU interrupt handler
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handler inputs
|
||||
*
|
||||
* Returned Value:
|
||||
* Should always return OK
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_pause_handler(int irq, void *c, void *arg)
|
||||
{
|
||||
int cpu = up_cpu_index();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_async_pause
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_async_pause(int cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(cpu, &cpuset);
|
||||
|
||||
up_trigger_irq(SMP_IPI_IRQ, cpuset);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
up_cpu_async_pause(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return 0;
|
||||
}
|
219
arch/x86_64/src/intel64/intel64_cpustart.c
Normal file
219
arch/x86_64/src/intel64/intel64_cpustart.c
Normal file
|
@ -0,0 +1,219 @@
|
|||
/****************************************************************************
|
||||
* arch/x86_64/src/intel64/intel64_cpustart.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include <arch/arch.h>
|
||||
#include <arch/irq.h>
|
||||
#include <nuttx/arch.h>
|
||||
|
||||
#include "init/init.h"
|
||||
|
||||
#include "intel64_lowsetup.h"
|
||||
#include "intel64_cpu.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Private Types
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* External functions
|
||||
****************************************************************************/
|
||||
|
||||
extern void __ap_entry(void);
|
||||
extern int up_pause_handler(int irq, void *c, void *arg);
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: x86_64_ap_startup
|
||||
*
|
||||
* Description:
|
||||
* Startup AP CPU
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static int x86_64_ap_startup(int cpu)
|
||||
{
|
||||
uint64_t dest = 0;
|
||||
uint64_t vect = 0;
|
||||
uint64_t regval = 0;
|
||||
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
/* Get destination - must be LOAPIC id */
|
||||
|
||||
dest = MSR_X2APIC_DESTINATION((uint64_t)x86_64_cpu_to_loapic(cpu));
|
||||
|
||||
/* Get the AP trampoline from a fixed address */
|
||||
|
||||
vect = (uint32_t)((uintptr_t)&__ap_entry) >> 12;
|
||||
|
||||
/* Send an INIT IPI to the CPU */
|
||||
|
||||
regval = MSR_X2APIC_ICR_INIT | dest;
|
||||
write_msr(MSR_X2APIC_ICR, regval);
|
||||
|
||||
/* Wait for 10 ms */
|
||||
|
||||
up_mdelay(10);
|
||||
SP_DMB();
|
||||
|
||||
/* Send an STARTUP IPI to the CPU */
|
||||
|
||||
regval = MSR_X2APIC_ICR_STARTUP | dest | vect;
|
||||
write_msr(MSR_X2APIC_ICR, regval);
|
||||
|
||||
/* Wait for AP ready */
|
||||
|
||||
up_udelay(300);
|
||||
SP_DMB();
|
||||
|
||||
/* Check CPU ready flag */
|
||||
|
||||
if (x86_64_cpu_ready_get(cpu) == false)
|
||||
{
|
||||
sinfo("failed to startup cpu=%d\n", cpu);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: x86_64_cpu_boot
|
||||
*
|
||||
* Description:
|
||||
* Boot handler for AP core[cpu]
|
||||
*
|
||||
* Input Parameters:
|
||||
* lapic_id - The local APIC ID index of the CPU being started.
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void x86_64_ap_boot(void)
|
||||
{
|
||||
uint8_t cpu = 0;
|
||||
|
||||
/* Do some checking on CPU compatibilities at the top of this function */
|
||||
|
||||
x86_64_check_and_enable_capability();
|
||||
|
||||
/* Reload the GDTR with mapped high memory address */
|
||||
|
||||
setgdt((void *)g_gdt64, (uintptr_t)(&g_gdt64_low_end - &g_gdt64_low) - 1);
|
||||
|
||||
/* Get CPU ID */
|
||||
|
||||
cpu = x86_64_cpu_count_get();
|
||||
|
||||
/* Store CPU private data */
|
||||
|
||||
x86_64_cpu_priv_set(cpu);
|
||||
|
||||
/* Configure interrupts */
|
||||
|
||||
up_irqinitialize();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that this CPU has started */
|
||||
|
||||
sched_note_cpu_started(this_task());
|
||||
#endif
|
||||
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
/* Connect Pause IRQ to CPU */
|
||||
|
||||
irq_attach(SMP_IPI_IRQ, up_pause_handler, NULL);
|
||||
up_enable_irq(SMP_IPI_IRQ);
|
||||
|
||||
/* CPU ready */
|
||||
|
||||
x86_64_cpu_ready_set(cpu);
|
||||
|
||||
/* Revoke the lower memory if all CPUs are up */
|
||||
|
||||
if (x86_64_cpu_count_get() >= CONFIG_SMP_NCPUS)
|
||||
{
|
||||
__revoke_low_memory();
|
||||
}
|
||||
|
||||
/* Then transfer control to the IDLE task */
|
||||
|
||||
nx_idle_trampoline();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_start
|
||||
*
|
||||
* Description:
|
||||
* In an SMP configuration, only one CPU is initially active (CPU 0).
|
||||
* System initialization occurs on that single thread. At the completion of
|
||||
* the initialization of the OS, just before beginning normal multitasking,
|
||||
* the additional CPUs would be started by calling this function.
|
||||
*
|
||||
* Each CPU is provided the entry point to its IDLE task when started. A
|
||||
* TCB for each CPU's IDLE task has been initialized and placed in the
|
||||
* CPU's g_assignedtasks[cpu] list. No stack has been allocated or
|
||||
* initialized.
|
||||
*
|
||||
* The OS initialization logic calls this function repeatedly until each
|
||||
* CPU has been started, 1 through (CONFIG_SMP_NCPUS-1).
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being started. This will be a numeric
|
||||
* value in the range of one to (CONFIG_SMP_NCPUS-1).
|
||||
* (CPU 0 is already active)
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_start(int cpu)
|
||||
{
|
||||
int ret = OK;
|
||||
|
||||
if (cpu != 0)
|
||||
{
|
||||
/* Startup AP core */
|
||||
|
||||
ret = x86_64_ap_startup(cpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -25,6 +25,7 @@
|
|||
#include <nuttx/config.h>
|
||||
#include <arch/arch.h>
|
||||
#include <arch/multiboot2.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
.file "intel64_head.S"
|
||||
|
||||
|
@ -33,7 +34,7 @@
|
|||
****************************************************************************/
|
||||
|
||||
/* Memory Map: _sbss is the start of the BSS region (see ld.script) _ebss is
|
||||
* the end of the BSS regsion (see ld.script). The idle task stack starts at
|
||||
* the end of the BSS region (see ld.script). The idle task stack starts at
|
||||
* the end of BSS and is of size CONFIG_IDLETHREAD_STACKSIZE. The IDLE thread
|
||||
* is the thread that the system boots on and, eventually, becomes the idle,
|
||||
* do nothing task that runs only when there is nothing else to run. The
|
||||
|
@ -45,11 +46,15 @@
|
|||
****************************************************************************/
|
||||
|
||||
.global __pmode_entry /* The 32bit protected mode entry */
|
||||
#ifdef CONFIG_SMP
|
||||
.global __ap_entry /* The 32bit real mode entry for AP */
|
||||
#endif
|
||||
.global __enable_sse_avx
|
||||
.global __enable_pcid
|
||||
.global __revoke_low_memory
|
||||
.global __nxstart /* __nxstart is defined elsewhere */
|
||||
.global nx_start /* nx_start is defined elsewhere */
|
||||
.global x86_64_ap_boot /* x86_64_ap_boot is defined elsewhere */
|
||||
.global g_idle_topstack /* The end of the idle stack, the start of the heap */
|
||||
.global g_mb_info_struct
|
||||
.global g_mb_magic
|
||||
|
@ -118,7 +123,7 @@ __reset_entry:
|
|||
movl $loader_gdt_ptr, %ebx
|
||||
lgdtl (%ebx)
|
||||
|
||||
/* enable protected mode in CR0 */
|
||||
/* Enable protected mode in CR0 */
|
||||
mov %cr0, %eax
|
||||
or $X86_CR0_PE, %al
|
||||
mov %eax, %cr0
|
||||
|
@ -134,11 +139,40 @@ loader_gdt:
|
|||
.quad 0x00cf9a000000ffff
|
||||
.quad 0x00cf92000000ffff
|
||||
loader_gdt_ptr:
|
||||
.short loader_gdt_ptr - loader_gdt - 1
|
||||
.short loader_gdt_ptr - loader_gdt - 1
|
||||
.long loader_gdt
|
||||
|
||||
.size __reset_entry, . - __reset_entry
|
||||
|
||||
bsp_done:
|
||||
.long 0
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/****************************************************************************
|
||||
* Name: __ap_entry
|
||||
*
|
||||
* Description:
|
||||
* Entry point for AP CPU 32-bit real mode which must be aligned to 4096
|
||||
* bytes. This is simply copy of __reset_entry.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
.type __ap_entry, @function
|
||||
.align(4096)
|
||||
__ap_entry:
|
||||
/* Load a GDT for protected mode */
|
||||
movl $loader_gdt_ptr, %ebx
|
||||
lgdtl (%ebx)
|
||||
|
||||
/* enable protected mode in CR0 */
|
||||
mov %cr0, %eax
|
||||
or $X86_CR0_PE, %al
|
||||
mov %eax, %cr0
|
||||
|
||||
/* Long jump into protected mode. Hardcode the address. */
|
||||
ljmpl $0x8, $0x100000
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* .text
|
||||
****************************************************************************/
|
||||
|
@ -169,6 +203,11 @@ start32:
|
|||
movl %eax, g_mb_magic
|
||||
#endif
|
||||
|
||||
/* Jump to 64 bit initialiation if this is AP core */
|
||||
mov bsp_done, %eax
|
||||
cmp $0, %eax
|
||||
jne start64_init
|
||||
|
||||
/* initialize rest of the page directory */
|
||||
lea g_pd_low, %edi
|
||||
lea g_pt_low, %esi
|
||||
|
@ -212,6 +251,7 @@ pd_loop:
|
|||
dec %ecx
|
||||
jnz pd_loop
|
||||
|
||||
start64_init:
|
||||
/* Populate the 1GB after 4GB boundary with Global mapping to kernel code.
|
||||
* This creates maps the lower 1GB to 4GB~5GB
|
||||
*/
|
||||
|
@ -260,29 +300,56 @@ pd_loop:
|
|||
ljmpl $(X86_GDT_CODE_SEL), $start64
|
||||
|
||||
.code64
|
||||
|
||||
start64:
|
||||
/* Set Segement Registers for proper iret, etc. operation */
|
||||
mov $(X86_GDT_DATA_SEL), %ax
|
||||
mov %ax, %ss
|
||||
mov %ax, %ds
|
||||
mov %ax, %es
|
||||
mov %ax, %fs
|
||||
mov %ax, %gs
|
||||
/* Set Segement Registers for proper iret, etc. operation */
|
||||
mov $(X86_GDT_DATA_SEL), %ax
|
||||
mov %ax, %ss
|
||||
mov %ax, %ds
|
||||
mov %ax, %es
|
||||
mov %ax, %fs
|
||||
mov %ax, %gs
|
||||
|
||||
/* Properly setup RSP to idle stack */
|
||||
movabs $g_idle_topstack, %rbx
|
||||
mov (%rbx), %rsp
|
||||
/* Start BSP or AP */
|
||||
mov bsp_done, %eax
|
||||
cmp $0, %eax
|
||||
jne ap_start
|
||||
|
||||
/* We use jmp instruction below which doesn't push 1 byte on stack, so we
|
||||
* have to push a dummy value here, otherwise SSE instructions calledd
|
||||
* during initialization will fail.
|
||||
*/
|
||||
pushq $0
|
||||
/* Properly setup RSP to idle stack */
|
||||
movabs $g_idle_topstack, %rbx
|
||||
mov (%rbx), %rsp
|
||||
|
||||
/* Finally, we can start the OS */
|
||||
movabs $__nxstart, %rbx
|
||||
jmp *%rbx
|
||||
.size __pmode_entry, . - __pmode_entry
|
||||
/* Set bsp_done flag */
|
||||
movl $1, bsp_done
|
||||
|
||||
/* We use jmp instruction below which doesn't push 1 byte on stack, so we
|
||||
* have to push a dummy value here, otherwise SSE instructions called
|
||||
* during initialization will fail.
|
||||
*/
|
||||
pushq $0
|
||||
|
||||
/* Finally, we can start the OS */
|
||||
movabs $__nxstart, %rbx
|
||||
jmp *%rbx
|
||||
|
||||
ap_start:
|
||||
#ifdef CONFIG_SMP
|
||||
/* Get CPU ID */
|
||||
movl g_cpu_count, %edi
|
||||
|
||||
/* Setup AP stack */
|
||||
movabs $g_idle_topstack, %rbx
|
||||
movl %edi, %eax
|
||||
imul $8, %eax, %eax
|
||||
add %rax, %rbx
|
||||
mov (%rbx), %rsp
|
||||
|
||||
/* Jump to ap_start routine */
|
||||
movabs $x86_64_ap_boot, %rbx
|
||||
jmp *%rbx
|
||||
#endif
|
||||
|
||||
.size __pmode_entry, . - __pmode_entry
|
||||
|
||||
g_cpu_count:
|
||||
.long 1
|
||||
|
@ -377,23 +444,10 @@ __enable_pcid:
|
|||
|
||||
.section ".loader.data", "ax"
|
||||
|
||||
/* IST for 64 bit long mode will be filled in up_irq */
|
||||
/* TSS (IST) for 64 bit long mode will be filled in up_irq. */
|
||||
.align(16)
|
||||
g_ist64_low:
|
||||
.long 0
|
||||
.quad 0xdeadbeefdeadbee0
|
||||
.quad 0xdeadbeefdeadbee1
|
||||
.quad 0xdeadbeefdeadbee2
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.quad 0
|
||||
.word 0
|
||||
.fill X86_TSS_SIZE * CONFIG_SMP_NCPUS, 1, 0
|
||||
|
||||
/* GDT for 64 bit long mode */
|
||||
.align(16)
|
||||
|
@ -405,8 +459,9 @@ g_gdt64_low:
|
|||
.quad X86_GDT_DATA_ENTRY
|
||||
.quad X86_GDT_CODE64_ENTRY
|
||||
g_gdt64_ist_low:
|
||||
.quad 0x0 /* TSS segment low */
|
||||
.quad 0x0 /* TSS segment high */
|
||||
/* TSS segment low + segment high per CPU */
|
||||
.fill CONFIG_SMP_NCPUS * 16, 1, 0
|
||||
|
||||
g_gdt64_low_end:
|
||||
|
||||
gdt64_ptr:
|
||||
|
|
|
@ -58,7 +58,7 @@ void up_initial_state(struct tcb_s *tcb)
|
|||
|
||||
if (tcb->pid == IDLE_PROCESS_ID)
|
||||
{
|
||||
char *stack_ptr = (char *)(g_idle_topstack -
|
||||
char *stack_ptr = (char *)(g_idle_topstack[0] -
|
||||
CONFIG_IDLETHREAD_STACKSIZE);
|
||||
#ifdef CONFIG_STACK_COLORATION
|
||||
char *stack_end = (char *)up_getsp();
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
|
||||
struct intel64_irq_priv_s
|
||||
{
|
||||
uint8_t busy;
|
||||
cpu_set_t busy;
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -433,27 +433,36 @@ static inline void up_idtinit(void)
|
|||
|
||||
void up_irqinitialize(void)
|
||||
{
|
||||
int cpu = up_cpu_index();
|
||||
|
||||
/* Initialize the TSS */
|
||||
|
||||
x86_64_cpu_tss_init(0);
|
||||
x86_64_cpu_tss_init(cpu);
|
||||
|
||||
/* Initialize the APIC */
|
||||
|
||||
up_apic_init();
|
||||
|
||||
if (cpu == 0)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_INTEL64_DISABLE_INT_INIT
|
||||
/* Disable 8259 PIC */
|
||||
/* Disable 8259 PIC */
|
||||
|
||||
up_deinit_8259();
|
||||
up_deinit_8259();
|
||||
|
||||
/* Initialize the IOAPIC */
|
||||
/* Initialize the IOAPIC */
|
||||
|
||||
up_ioapic_init();
|
||||
up_ioapic_init();
|
||||
#endif
|
||||
|
||||
/* Initialize the IDT */
|
||||
/* Initialize the IDT */
|
||||
|
||||
up_idtinit();
|
||||
up_idtinit();
|
||||
}
|
||||
|
||||
/* Program the IDT - one per all cores */
|
||||
|
||||
setidt(&g_idt_entries, sizeof(struct idt_entry_s) * NR_IRQS - 1);
|
||||
|
||||
/* And finally, enable interrupts */
|
||||
|
||||
|
@ -487,7 +496,9 @@ void up_disable_irq(int irq)
|
|||
g_irq_priv[irq].busy -= 1;
|
||||
}
|
||||
|
||||
if (g_irq_priv[irq].busy == 0)
|
||||
CPU_CLR(up_cpu_index(), &g_irq_priv[irq].busy);
|
||||
|
||||
if (CPU_COUNT(&g_irq_priv[irq].busy) == 0)
|
||||
{
|
||||
/* One time disable */
|
||||
|
||||
|
@ -517,7 +528,7 @@ void up_enable_irq(int irq)
|
|||
# ifndef CONFIG_IRQCHAIN
|
||||
/* Check if IRQ is free if we don't support IRQ chains */
|
||||
|
||||
if (g_irq_priv[irq].busy)
|
||||
if (CPU_ISSET(up_cpu_index(), &g_irq_priv[irq].busy))
|
||||
{
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -530,7 +541,7 @@ void up_enable_irq(int irq)
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
if (g_irq_priv[irq].busy == 0)
|
||||
if (CPU_COUNT(&g_irq_priv[irq].busy) == 0)
|
||||
{
|
||||
/* One time enable */
|
||||
|
||||
|
@ -540,7 +551,7 @@ void up_enable_irq(int irq)
|
|||
}
|
||||
}
|
||||
|
||||
g_irq_priv[irq].busy += 1;
|
||||
CPU_SET(up_cpu_index(), &g_irq_priv[irq].busy);
|
||||
|
||||
spin_unlock_irqrestore(&g_irq_spin, flags);
|
||||
#endif
|
||||
|
|
|
@ -91,7 +91,11 @@ void intel64_lowsetup(void)
|
|||
|
||||
setgdt((void *)g_gdt64, (uintptr_t)(&g_gdt64_low_end - &g_gdt64_low) - 1);
|
||||
|
||||
/* Revoke the lower memory */
|
||||
#ifndef CONFIG_SMP
|
||||
/* Revoke the lower memory if not SMP, otherwise this is done in
|
||||
* x86_64_ap_boot() after the initialization of the last AP is finished.
|
||||
*/
|
||||
|
||||
__revoke_low_memory();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ void __nxstart(void)
|
|||
{
|
||||
uint64_t *dest = NULL;
|
||||
|
||||
/* This is only for BSP core. */
|
||||
/* This is only for BSP core. AP cores are handled by x86_64_ap_boot() */
|
||||
|
||||
/* Do some checking on CPU compatibilities at the top of this function.
|
||||
* BSS cleanup can be optimized with vector instructions, so we need to
|
||||
|
|
Loading…
Reference in a new issue