arch/arm: Fix the style issue in assemble files

remove the unused header file and mimic the difference between sub arch

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
This commit is contained in:
Xiang Xiao 2021-04-01 18:12:05 +08:00 committed by Abdelatif Guettouche
parent 7e1ae24c3c
commit 2268c19171
91 changed files with 1769 additions and 1694 deletions

View file

@ -1,4 +1,4 @@
/**************************************************************************
/****************************************************************************
* arch/arm/src/arm/arm_fullcontextrestore.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -16,59 +16,60 @@
* License for the specific language governing permissions and limitations
* under the License.
*
**************************************************************************/
****************************************************************************/
/**************************************************************************
/****************************************************************************
* Included Files
**************************************************************************/
****************************************************************************/
#include <nuttx/irq.h>
#include "arm_internal.h"
#include <nuttx/config.h>
#include <arch/irq.h>
/**************************************************************************
/****************************************************************************
* Pre-processor Definitions
**************************************************************************/
****************************************************************************/
/**************************************************************************
* Private Types
**************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
/**************************************************************************
* Private Function Prototypes
**************************************************************************/
.file "arm_fullcontextrestore.S"
/**************************************************************************
* Public Data
**************************************************************************/
/****************************************************************************
* Macros
****************************************************************************/
/**************************************************************************
* Private Data
**************************************************************************/
/**************************************************************************
* Private Functions
**************************************************************************/
/**************************************************************************
/****************************************************************************
* Public Functions
**************************************************************************/
****************************************************************************/
/**************************************************************************
/****************************************************************************
* Name: arm_fullcontextrestore
**************************************************************************/
*
* Description:
* Restore the current thread context. Full prototype is:
*
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
*
* Returned Value:
* None
*
****************************************************************************/
.globl arm_fullcontextrestore
.type arm_fullcontextrestore, function
arm_fullcontextrestore:
/* On entry, a1 (r0) holds address of the register save area */
/* On entry, a1 (r0) holds address of the register save area. All other
* registers are available for use.
*/
/* Recover all registers except for r0, r1, R15, and CPSR */
add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
ldmia r1, {r2-r14} /* Recover registers */
/* Create a stack frame to hold the PC */
/* Create a stack frame to hold the some registers */
sub sp, sp, #(3*4) /* Frame for three registers */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
@ -89,8 +90,10 @@ arm_fullcontextrestore:
msr cpsr, r1 /* Set the CPSR */
/* Now recover r0 and r1. Then return to the address at the stop of
* the stack, destroying the stack frame
* the stack, destroying the stack frame
*/
ldmia sp!, {r0-r1, r15}
.size arm_fullcontextrestore, . - arm_fullcontextrestore
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -27,18 +27,21 @@
#include "arm.h"
#include "chip.h"
#include "arm_internal.h"
#include "arm_arch.h"
#ifdef CONFIG_PAGING
# include <nuttx/page.h>
# include "pg_macros.h"
#endif
/**********************************************************************************
* Configuration
**********************************************************************************/
.file "arm_head.S"
#undef ALIGNMENT_TRAP
/****************************************************************************
* Configuration
****************************************************************************/
/* Hard-coded options */
#undef CPU_ALIGNMENT_TRAP
#undef CPU_DCACHE_WRITETHROUGH
#undef CPU_CACHE_ROUND_ROBIN
#undef CPU_DCACHE_DISABLE
@ -49,13 +52,13 @@
* 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case
* the boot logic must:
*
* - Configure SDRAM,
* - Configure SDRAM (if present),
* - Initialize the .data section in RAM, and
* - Clear .bss section
*/
#ifdef CONFIG_BOOT_RUNFROMFLASH
# error "Configuration not implemented"
# error "configuration not implemented"
# define CONFIG_SDRAM 1
/* Check for the identity mapping: For this configuration, this would be
@ -99,11 +102,11 @@
# define CONFIG_IDENTITY_TEXTMAP 1
# endif
/* 3. There is bootloader that copies us to DRAM (but probably not to the beginning)
/* 3. There is bootloader that copies us to SDRAM (but probably not to the beginning)
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
* was initialized by the boot loader, and this boot logic must:
*
* - Clear .bss section
* - Clear .bss section (data should be fully initialized)
*/
#else
@ -207,12 +210,15 @@
* Name: __start
****************************************************************************/
/* We assume the bootloader has already initialized most of the h/w for us
* and that only leaves us having to do some os specific things below.
/* We assume the bootloader has already initialized most of the h/w for
* us and that only leaves us having to do some os specific things
* below.
*/
.text
.global __start
.type __start, #function
__start:
/* Make sure that we are in SVC mode with all IRQs disabled */
@ -240,17 +246,17 @@ __start:
bne .Lpgtableclear
/* Create identity mapping for first MB of the .text section to support
* this startup logic executing out of the physical address space. This
* this start-up logic executing out of the physical address space. This
* identity mapping will be removed by .Lvstart (see below). Of course,
* we would only do this if the physical-virtual mapping is not already
* the identity mapping.
*/
#ifndef CONFIG_IDENTITY_TEXTMAP
mksection r0, r4 /* r0=phys. base section */
mksection r0, r4 /* r0=phys. base section */
ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
add r3, r1, r0 /* r3=flags + base */
str r3, [r4, r0, lsr #18] /* identity mapping */
add r3, r1, r0 /* r3=flags + base */
str r3, [r4, r0, lsr #18] /* identity mapping */
#endif
#ifdef CONFIG_PAGING
@ -269,35 +275,35 @@ __start:
*/
adr r0, .Ltxtspan
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
/* Then populate the L2 table for the locked text region only. */
adr r0, .Ltxtmap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
/* Make sure that the page table is itself mapped and and read/write-able.
* First, populate the L1 table:
*/
adr r0, .Lptabspan
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
/* Then populate the L2 table. */
adr r0, .Lptabmap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
#else /* CONFIG_PAGING */
#ifdef CONFIG_IDENTITY_TEXTMAP
mksection r0, r4 /* r0=phys. base section */
mksection r0, r4 /* r0=phys. base section */
ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
add r3, r1, r0 /* r3=flags + base */
add r3, r1, r0 /* r3=flags + base */
#endif
/* Create a virtual single section mapping for the first MB of the .text
@ -310,8 +316,8 @@ __start:
*/
ldr r2, .LCvpgtable /* r2=virt. page table */
mksection r0, r2 /* r0=virt. base section */
str r3, [r4, r0, lsr #18] /* identity mapping */
mksection r0, r2 /* r0=virt. base section */
str r3, [r4, r0, lsr #18] /* identity mapping */
/* NOTE: No .data/.bss access should be attempted. This temporary mapping
* can only be assumed to cover the initial .text region.
@ -328,12 +334,12 @@ __start:
mov r0, #0
mcr p15, 0, r0, c7, c7 /* Invalidate I,D caches */
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c8, c7 /* Invalidate I,D TLBs */
mcr p15, 0, r4, c2, c0 /* Load page table pointer */
#ifdef CPU_DCACHE_WRITETHROUGH
mov r0, #4 /* Disable write-back on caches explicitly */
mov r0, #4 /* Disable write-back on caches explicitly */
mcr p15, 7, r0, c15, c0, 0
#endif
@ -343,7 +349,7 @@ __start:
ldr lr, .LCvstart /* Abs. virtual address */
mov r0, #0x1f /* Domains 0, 1 = client */
mov r0, #0x1f /* Domains 0, 1 = client */
mcr p15, 0, r0, c3, c0 /* Load domain access register */
mrc p15, 0, r0, c1, c0 /* Get control register */
@ -402,20 +408,20 @@ __start:
#endif
/* CR_A - Alignment abort enable */
#ifdef ALIGNMENT_TRAP
#ifdef CPU_ALIGNMENT_TRAP
orr r0, r0, #(CR_A)
#endif
mcr p15, 0, r0, c1, c0, 0 /* write control reg */
mcr p15, 0, r0, c1, c0, 0 /* write control reg */
/* Get TMP=2 Processor ID register */
mrc p15, 0, r1, c0, c0, 0 /* read id reg */
mov r1,r1 /* Null-avoiding nop */
mov r1,r1 /* Null-avoiding nop */
mrc p15, 0, r1, c0, c0, 0 /* read id reg */
mov r1,r1 /* Null-avoiding nop */
mov r1,r1 /* Null-avoiding nop */
/* And "jump" to .Lvstart */
mov pc, lr
mov pc, lr
/****************************************************************************
* PC_Relative Data
@ -484,6 +490,7 @@ __start:
.align 5
.local .Lvstart
.type .Lvstart, %function
.Lvstart:
/* Remove the temporary mapping (if one was made). The following assumes
@ -495,23 +502,23 @@ __start:
#ifndef CONFIG_IDENTITY_TEXTMAP
ldr r4, .LCvpgtable /* r4=virtual page table */
ldr r1, .LCppgtable /* r1=phys. page table */
mksection r3, r1 /* r2=phys. base addr */
mov r0, #0 /* flags + base = 0 */
str r0, [r4, r3, lsr #18] /* Undo identity mapping */
mksection r3, r1 /* r2=phys. base addr */
mov r0, #0 /* flags + base = 0 */
str r0, [r4, r3, lsr #18] /* Undo identity mapping */
#endif
#if defined(CONFIG_PAGING)
/* Populate the L1 table for the data region */
adr r0, .Ldataspan
ldmia r0, {r0, r1, r2, r3, r4}
pg_l1span r0, r1, r2, r3, r4, r5
ldmia r0, {r0, r1, r2, r3, r4}
pg_l1span r0, r1, r2, r3, r4, r5
/* Populate the L2 table for the data region */
adr r0, .Ldatamap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r4
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r4
#elif defined(CONFIG_BOOT_RUNFROMFLASH)
# error "Logic not implemented"
@ -533,7 +540,7 @@ __start:
*/
ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
add r3, r3, r1 /* r3=flags + base */
add r3, r3, r1 /* r3=flags + base */
add r0, r4, #(NUTTX_START_VADDR & 0xfff00000) >> 18
str r3, [r0], #4
@ -542,7 +549,7 @@ __start:
* memory region.
*/
.rept RX_NSECTIONS-1
.rept RX_NSECTIONS-1
add r3, r3, #SECTION_SIZE
str r3, [r0], #4
.endr
@ -562,15 +569,15 @@ __start:
/* Zero BSS and set up the stack pointer */
adr r0, .Linitparms
ldmia r0, {r0, r1, sp}
ldmia r0, {r0, r1, sp}
/* Clear the frame pointer and .bss */
mov fp, #0
.Lbssinit:
cmp r0, r1 /* Clear up to _bss_end_ */
strcc fp, [r0],#4
cmp r0, r1 /* Clear up to _bss_end_ */
strcc fp, [r0],#4
bcc .Lbssinit
/* If the .data section is in a separate, uninitialized address space,
@ -584,10 +591,10 @@ __start:
#if defined(CONFIG_BOOT_RUNFROMFLASH) || defined(CONFIG_PAGING)
adr r3, .Ldatainit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
1: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
1: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
cmp r1, r2
blt 1b
#endif
@ -602,13 +609,13 @@ __start:
*/
adr r3, .Lstkinit
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
2: /* Top of the loop */
2: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 2b /* Bottom of the loop */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 2b /* Bottom of the loop */
#endif
@ -617,12 +624,17 @@ __start:
mov lr, #0 /* LR = return address (none) */
b nx_start /* Branch to nx_start */
/***************************************************************************
* Text-section constants
***************************************************************************/
/* Text-section constants:
*
* _sbss is the start of the BSS region (see ld.script)
* _ebss is the end of the BSS region (see ld.script)
* _sbss is the start of the BSS region (see linker script)
* _ebss is the end of the BSS region (see linker script)
*
* The idle task stack starts at the end of BSS and is of size
* Typical Configuration:
* The idle task stack usually starts at the end of BSS and is of size
* CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the
* end of memory. See g_idle_topstack below.
*/
@ -673,10 +685,12 @@ __start:
#endif
.size .Lvstart, .-.Lvstart
/* Data section variables */
/***************************************************************************
* Data section variables
***************************************************************************/
/* This global variable is unsigned long g_idle_topstack and is
* exported from here only because of its coupling to .Linitparms
* exported from here only because of its coupling to .Lstackpointer
* above.
*/
@ -684,7 +698,9 @@ __start:
.align 4
.globl g_idle_topstack
.type g_idle_topstack, object
g_idle_topstack:
.long _ebss+CONFIG_IDLETHREAD_STACKSIZE
.size g_idle_topstack, .-g_idle_topstack
.end

View file

@ -26,7 +26,6 @@
#include "arm.h"
#include "arm_internal.h"
#include "arm_arch.h"
/****************************************************************************
* Macros
@ -36,8 +35,8 @@
#ifdef CONFIG_DEBUG_FEATURES
.macro showprogress, code
mov r0, #\code
bl arm_lowputc
mov r0, #\code
bl arm_lowputc
.endm
#else
.macro showprogress, code
@ -53,7 +52,7 @@
* below.
*/
.text
.global __start
.global __start
.type __start, #function
__start:
@ -62,31 +61,31 @@ __start:
mov r0, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT )
msr cpsr, r0
showprogress 'A'
showprogress 'A'
/* Setup system stack (and get the BSS range) */
adr r0, .Lbssinit
ldmia r0, {r4, r5, sp}
ldmia r0, {r4, r5, sp}
/* Clear system BSS section */
mov r0, #0
1: cmp r4, r5
strcc r0, [r4], #4
strcc r0, [r4], #4
bcc 1b
showprogress 'B'
showprogress 'B'
/* Copy system .data sections to new home in RAM. */
#ifdef CONFIG_BOOT_RUNFROMFLASH
adr r3, .Ldatainit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
1: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
1: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
cmp r1, r2
blt 1b
@ -119,13 +118,13 @@ __start:
*/
adr r3, .Lstkinit
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
2: /* Top of the loop */
2: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 2b /* Bottom of the loop */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 2b /* Bottom of the loop */
#endif

View file

@ -1,4 +1,4 @@
/**************************************************************************
/****************************************************************************
* arch/arm/src/arm/arm_saveusercontext.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -16,64 +16,60 @@
* License for the specific language governing permissions and limitations
* under the License.
*
**************************************************************************/
****************************************************************************/
/**************************************************************************
/****************************************************************************
* Included Files
**************************************************************************/
****************************************************************************/
#include <nuttx/irq.h>
#include "arm_internal.h"
#include <nuttx/config.h>
#include <arch/irq.h>
/**************************************************************************
/****************************************************************************
* Pre-processor Definitions
**************************************************************************/
****************************************************************************/
/**************************************************************************
* Private Types
**************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
/**************************************************************************
* Private Function Prototypes
**************************************************************************/
.file "arm_saveusercontext.S"
/**************************************************************************
* Public Data
**************************************************************************/
/****************************************************************************
* Macros
****************************************************************************/
/**************************************************************************
* Private Data
**************************************************************************/
/**************************************************************************
* Private Functions
**************************************************************************/
/**************************************************************************
/****************************************************************************
* Public Functions
**************************************************************************/
****************************************************************************/
/**************************************************************************
/****************************************************************************
* Name: arm_saveusercontext
**************************************************************************/
*
* Description:
* Save the current thread context. Full prototype is:
*
* int arm_saveusercontext(uint32_t *saveregs);
*
* Returned Value:
* 0: Normal return
* 1: Context switch return
*
****************************************************************************/
.text
.globl arm_saveusercontext
.type arm_saveusercontext, function
arm_saveusercontext:
/* On entry, a1 (r0) holds address of struct xcptcontext.
* Offset to the user region.
/* On entry, a1 (r0) holds address of struct xcptcontext */
/* Make sure that the return value will be non-zero (the value of the
* other volatile registers don't matter -- r1-r3, ip). This function
* is called through the normal C calling conventions and the values of
* these registers cannot be assumed at the point of setjmp return.
*/
/* Make sure that the return value will be non-zero (the
* value of the other volatile registers don't matter --
* r1-r3, ip). This function is called through the
* normal C calling conventions and the values of these
* registers cannot be assumed at the point of setjmp
* return.
*/
mov ip, #1
mov ip, #1
str ip, [r0, #(4*REG_R0)]
/* Save the volatile registers (plus r12 which really
@ -85,19 +81,20 @@ arm_saveusercontext:
/* Save the current cpsr */
mrs r2, cpsr /* R3 = CPSR value */
mrs r2, cpsr /* R2 = CPSR value */
add r1, r0, #(4*REG_CPSR)
str r2, [r1]
/* Finally save the return address as the PC so that we
* return to the exit from this function.
/* Save the return address as the PC so that we return to the exit from
* this function.
*/
add r1, r0, #(4*REG_PC)
add r1, r0, #(4*REG_PC)
str lr, [r1]
/* Return 0 */
mov r0, #0 /* Return value == 0 */
mov pc, lr /* Return */
.size arm_saveusercontext, . - arm_saveusercontext
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -1,5 +1,5 @@
/************************************************************************************
* arch/arm/src/arm/up_vectoraddrexceptn.S
/****************************************************************************
* arch/arm/src/arm/arm_vectoraddrexceptn.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -16,53 +16,56 @@
* License for the specific language governing permissions and limitations
* under the License.
*
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Included Files
************************************************************************************/
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm_arch.h"
/************************************************************************************
.file "arm_vectoraddrexcptn.S"
/****************************************************************************
* Pre-processor Definitions
************************************************************************************/
****************************************************************************/
/************************************************************************************
* Public Data
************************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
/************************************************************************************
.globl arm_vectoraddrexcption
/****************************************************************************
* Assembly Macros
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Private Functions
************************************************************************************/
****************************************************************************/
.text
/************************************************************************************
/****************************************************************************
* Public Functions
************************************************************************************/
****************************************************************************/
.text
/************************************************************************************
* Name: up_vectoraddrexcption
/****************************************************************************
* Name: arm_vectoraddrexcption
*
* Description:
* Shouldn't happen. This exception handler is in a separate file from other
* vector handlers because some processors (e.g., lpc2148) do not support the
* the Address Exception vector.
* Shouldn't happen. This exception handler is in a separate file from
* other vector handlers because some processors do not support the
* Address Exception vector.
*
************************************************************************************/
****************************************************************************/
.globl arm_vectoraddrexcptn
.type arm_vectoraddrexcptn, %function
arm_vectoraddrexcptn:
b arm_vectoraddrexcptn
b arm_vectoraddrexcptn
.size arm_vectoraddrexcptn, . - arm_vectoraddrexcptn
.end

View file

@ -1,4 +1,4 @@
/************************************************************************************
/****************************************************************************
* arch/arm/src/arm/arm_vectors.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -16,73 +16,76 @@
* License for the specific language governing permissions and limitations
* under the License.
*
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Included Files
************************************************************************************/
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm.h"
#include "arm_arch.h"
/************************************************************************************
.file "arm_vectors.S"
/****************************************************************************
* Pre-processor Definitions
************************************************************************************/
****************************************************************************/
/************************************************************************************
* Public Data
************************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
.data
g_irqtmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
g_undeftmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
g_aborttmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
/************************************************************************************
/****************************************************************************
* Assembly Macros
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Private Functions
************************************************************************************/
****************************************************************************/
.text
/************************************************************************************
/****************************************************************************
* Public Functions
************************************************************************************/
****************************************************************************/
.text
/************************************************************************************
/****************************************************************************
* Name: arm_vectorirq
*
* Description:
* Interrupt exception. Entered in IRQ mode with spsr = SVC CPSR, lr = SVC PC
*
************************************************************************************/
****************************************************************************/
.globl arm_decodeirq
.globl arm_vectorirq
.type arm_vectorirq, %function
arm_vectorirq:
/* On entry, we are in IRQ mode. We are free to use
* the IRQ mode r13 and r14.
/* On entry, we are in IRQ mode. We are free to use the IRQ mode r13
* and r14.
*/
ldr r13, .Lirqtmp
sub lr, lr, #4
str lr, [r13] @ save lr_IRQ
str lr, [r13] /* Save lr_IRQ */
mrs lr, spsr
str lr, [r13, #4] @ save spsr_IRQ
str lr, [r13, #4] /* Save spsr_IRQ */
/* Then switch back to SVC mode */
@ -105,7 +108,7 @@ arm_vectorirq:
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lirqtmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
@ -132,23 +135,27 @@ arm_vectorirq:
.Lirqtmp:
.word g_irqtmp
#if CONFIG_ARCH_INTERRUPTSTACK > 3
.Lirqstackbase:
.word g_intstackbase
#endif
.size arm_vectorirq, . - arm_vectorirq
.align 5
/************************************************************************************
/****************************************************************************
* Function: arm_vectorswi
*
* Description:
* SWI interrupt. We enter the SWI in SVC mode.
*
************************************************************************************/
****************************************************************************/
.globl arm_syscall
.globl arm_vectorswi
.type arm_vectorswi, %function
arm_vectorswi:
/* Create a context structure. First set aside a stack frame
@ -186,7 +193,7 @@ arm_vectorswi:
.align 5
/************************************************************************************
/****************************************************************************
* Name: arm_vectordata
*
* Description:
@ -195,10 +202,12 @@ arm_vectorswi:
* current processor state and gives control to data abort handler. This function
* is entered in ABORT mode with spsr = SVC CPSR, lr = SVC PC
*
************************************************************************************/
****************************************************************************/
.globl arm_dataabort
.globl arm_vectordata
.type arm_vectordata, %function
arm_vectordata:
/* On entry we are free to use the ABORT mode registers
* r13 and r14
@ -231,7 +240,7 @@ arm_vectordata:
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Ldaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
@ -260,7 +269,7 @@ arm_vectordata:
.align 5
/************************************************************************************
/****************************************************************************
* Name: arm_vectorprefetch
*
* Description:
@ -269,10 +278,12 @@ arm_vectordata:
* handler saves the current processor state and gives control to prefetch abort
* handler. This function is entered in ABT mode with spsr = SVC CPSR, lr = SVC PC.
*
************************************************************************************/
****************************************************************************/
.globl arm_prefetchabort
.globl arm_vectorprefetch
.type arm_vectorprefetch, %function
arm_vectorprefetch:
/* On entry we are free to use the ABORT mode registers
* r13 and r14
@ -305,7 +316,7 @@ arm_vectorprefetch:
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lpaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
@ -330,17 +341,19 @@ arm_vectorprefetch:
.align 5
/************************************************************************************
/****************************************************************************
* Name: arm_vectorundefinsn
*
* Description:
* Undefined instruction entry exception. Entered in UND mode, spsr = SVC CPSR,
* lr = SVC PC
*
************************************************************************************/
****************************************************************************/
.globl arm_undefinedinsn
.globl arm_vectorundefinsn
.type arm_vectorundefinsn, %function
arm_vectorundefinsn:
/* On entry we are free to use the UND mode registers
* r13 and r14
@ -372,7 +385,7 @@ arm_vectorundefinsn:
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lundeftmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
@ -397,31 +410,34 @@ arm_vectorundefinsn:
.align 5
/************************************************************************************
/****************************************************************************
* Name: arm_vectorfiq
*
* Description:
* Shouldn't happen
*
************************************************************************************/
****************************************************************************/
.globl arm_vectorfiq
.type arm_vectorfiq, %function
arm_vectorfiq:
subs pc, lr, #4
.size arm_vectorfiq, . - arm_vectorfiq
/************************************************************************************
/****************************************************************************
* Name: g_intstackalloc/g_intstackbase
************************************************************************************/
****************************************************************************/
#if CONFIG_ARCH_INTERRUPTSTACK > 3
.bss
.balign 4
.globl g_intstackalloc
.type g_intstackalloc, object
.globl g_intstackbase
.type g_intstackbase, object
g_intstackalloc:
.skip (CONFIG_ARCH_INTERRUPTSTACK & ~3)
g_intstackbase:

View file

@ -24,14 +24,19 @@
#include <nuttx/config.h>
.file "arm_vectortab.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
* Public Symbols
****************************************************************************/
.globl _vector_start
.globl _vector_end
/****************************************************************************
* Assembly Macros
****************************************************************************/
@ -53,11 +58,11 @@ _vector_start:
ldr pc, .Lswihandler /* 0x08: Software interrupt */
ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
ldr pc, .Ldataaborthandler /* 0x10: Data abort */
ldr pc, .Laddrexcptnhandler /* 0x14: Address exception */
ldr pc, .Laddrexcptnhandler /* 0x14: Address exception (reserved) */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl __start
.globl arm_vectorundefinsn
.globl arm_vectorswi
.globl arm_vectorprefetch
@ -67,7 +72,7 @@ _vector_start:
.globl arm_vectorfiq
.Lresethandler:
.long __start
.long __start
.Lundefinedhandler:
.long arm_vectorundefinsn
.Lswihandler:
@ -85,4 +90,5 @@ _vector_start:
.globl _vector_end
_vector_end:
.size _vector_start, . - _vector_start
.end

View file

@ -1,4 +1,4 @@
/************************************************************************************
/****************************************************************************
* arch/arm/src/arm/vfork.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -16,49 +16,50 @@
* License for the specific language governing permissions and limitations
* under the License.
*
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Included Files
************************************************************************************/
****************************************************************************/
#include <nuttx/config.h>
#include "arm_vfork.h"
/************************************************************************************
/****************************************************************************
* Pre-processor Definitions
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Public Symbols
************************************************************************************/
****************************************************************************/
.file "vfork.S"
.globl up_vfork
/************************************************************************************
/****************************************************************************
* Public Functions
************************************************************************************/
****************************************************************************/
/************************************************************************************
/****************************************************************************
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the behavior is
* undefined if the process created by vfork() either modifies any data other than
* a variable of type pid_t used to store the return value from vfork(), or returns
* from the function in which vfork() was called, or calls any other function before
* successfully calling _exit() or one of the exec family of functions.
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the vfork()
* context as an argument. The overall sequence is:
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork()and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB. This
* consists of:
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
@ -75,19 +76,19 @@
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and returns
* the process ID of the child process to the parent process. Otherwise, -1 is
* returned to the parent, no child process is created, and errno is set to
* indicate the error.
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
************************************************************************************/
****************************************************************************/
.globl vfork
.type vfork, function
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -46,8 +46,6 @@
#include <arch/irq.h>
#include "exc_return.h"
#include "chip.h"
/****************************************************************************
* Public Symbols
****************************************************************************/
@ -95,7 +93,7 @@ exception_common:
#ifdef CONFIG_BUILD_PROTECTED
mov r0, r14 /* Copy high register to low register */
lsl r0, #(31 - EXC_RETURN_PROCESS_BITNO) /* Move to bit 31 */
bmi 1f /* Test bit 31 */
bmi 1f /* Test bit 31 */
mrs r1, msp /* R1=The main stack pointer */
b 2f
@ -115,13 +113,13 @@ exception_common:
2:
/* Save SP, PRIMASK, and R4-R7 in the context array */
sub r1, #SW_XCPT_SIZE /* R1=Beginning of context array on the stack */
mov r2, #XCPTCONTEXT_SIZE /* R2=Size of the context array */
sub r1, #SW_XCPT_SIZE /* R1=Beginning of context array on the stack */
mov r2, #XCPTCONTEXT_SIZE /* R2=Size of the context array */
add r2, r1 /* R2=MSP/PSP before the interrupt was taken */
/* (ignoring the xPSR[9] alignment bit) */
/* (ignoring the xPSR[9] alignment bit) */
mrs r3, primask /* R3=Current PRIMASK setting */
mov r0, r1 /* Copy the context array pointer */
stmia r0!, {r2-r7} /* Save the SP, PRIMASK, and R4-R7 in the context array */
stmia r0!, {r2-r7} /* Save the SP, PRIMASK, and R4-R7 in the context array */
/* Save R8-R11 and the EXEC_RETURN value in the context array */
@ -131,9 +129,9 @@ exception_common:
mov r5, r11
#ifdef CONFIG_BUILD_PROTECTED
mov r6, r14
stmia r0!, {r2-r6} /* Save the high registers r8-r11 and r14 */
stmia r0!, {r2-r6} /* Save the high registers r8-r11 and r14 */
#else
stmia r0!, {r2-r5} /* Save the high registers r8-r11 */
stmia r0!, {r2-r5} /* Save the high registers r8-r11 */
#endif
/* Get the exception number in R0=IRQ, R1=register save area on stack */
@ -152,9 +150,9 @@ exception_common:
*/
#if CONFIG_ARCH_INTERRUPTSTACK > 3
ldr r7, =g_intstackbase /* R7=Base of the interrupt stack */
ldr r7, =g_intstackbase /* R7=Base of the interrupt stack */
mov sp, r7 /* Set the new stack point */
push {r1} /* Save the MSP on the interrupt stack */
push {r1} /* Save the MSP on the interrupt stack */
bl arm_doirq /* R0=IRQ, R1=register save area on stack */
pop {r1} /* Recover R1=main stack pointer */
#else
@ -163,13 +161,13 @@ exception_common:
mrs r1, msp /* Recover R1=main stack pointer */
#endif
/* On return from arm_doirq, r0 will hold a pointer to register context
/* On return from arm_doirq, R0 will hold a pointer to register context
* array to use for the interrupt return. If that return value is the same
* as current stack pointer, then things are relatively easy.
*/
cmp r0, r1 /* Context switch? */
beq 3f /* Branch if no context switch */
beq 3f /* Branch if no context switch */
/* We are returning with a pending context switch. This case is different
* because in this case, the register save structure does not lie on the
@ -180,13 +178,13 @@ exception_common:
/* Copy the hardware-saved context to the new stack */
mov r2, #SW_XCPT_SIZE /* R2=Size of software-saved portion of the context array */
add r1, r0, r2 /* R1=Address of HW save area in reg array */
ldr r2, [r0, #(4*REG_SP)] /* R2=Value of SP before the interrupt */
add r1, r0, r2 /* R1=Address of HW save area in reg array */
ldr r2, [r0, #(4*REG_SP)] /* R2=Value of SP before the interrupt */
sub r2, #HW_XCPT_SIZE /* R2=Address of HW save area on the return stack */
ldmia r1!, {r4-r7} /* Fetch four registers from the HW save area */
stmia r2!, {r4-r7} /* Copy four registers to the return stack */
ldmia r1!, {r4-r7} /* Fetch four registers from the HW save area */
stmia r2!, {r4-r7} /* Copy four registers to the return stack */
ldmia r1!, {r4-r7} /* Fetch four registers from the HW save area */
stmia r2!, {r4-r7} /* Copy four registers to the return stack */
ldmia r1!, {r4-r7} /* Fetch four registers from the HW save area */
stmia r2!, {r4-r7} /* Copy four registers to the return stack */
/* Restore the register contents */
@ -199,17 +197,17 @@ exception_common:
/* Recover R8-R11 and EXEC_RETURN (5 registers) */
mov r2, #(4*REG_R8) /* R2=Offset to R8 storage */
mov r2, #(4*REG_R8) /* R2=Offset to R8 storage */
add r0, r1, r2 /* R0=Address of R8 storage */
#ifdef CONFIG_BUILD_PROTECTED
ldmia r0!, {r2-r6} /* Recover R8-R11 and R14 (5 registers)*/
ldmia r0!, {r2-r6} /* Recover R8-R11 and R14 (5 registers)*/
mov r8, r2 /* Move to position in high registers */
mov r9, r3
mov r10, r4
mov r11, r5
mov r14, r6 /* EXEC_RETURN */
#else
ldmia r0!, {r2-r5} /* Recover R8-R11 and R14 (5 registers)*/
ldmia r0!, {r2-r5} /* Recover R8-R11 and R14 (5 registers)*/
mov r8, r2 /* Move to position in high registers */
mov r9, r3
mov r10, r4
@ -220,8 +218,8 @@ exception_common:
* the stack pointer as it was on entry to the exception handler.
*/
ldmia r1!, {r2-r7} /* Recover R4-R7 + 2 temp values */
mov r1, #HW_XCPT_SIZE /* R1=Size of hardware-saved portion of the context array */
ldmia r1!, {r2-r7} /* Recover R4-R7 + 2 temp values */
mov r1, #HW_XCPT_SIZE /* R1=Size of hardware-saved portion of the context array */
sub r1, r2, r1 /* R1=Value of MSP/PSP on exception entry */
/* Restore the stack pointer. The EXC_RETURN value tells us whether the
@ -231,7 +229,7 @@ exception_common:
#ifdef CONFIG_BUILD_PROTECTED
mov r0, r14 /* Copy high register to low register */
lsl r0, #(31 - EXC_RETURN_PROCESS_BITNO) /* Move to bit 31 */
bmi 5f /* Test bit 31 */
bmi 5f /* Test bit 31 */
msr msp, r1 /* R1=The main stack pointer */
b 6f
@ -241,7 +239,7 @@ exception_common:
6:
#else
msr msp, r1 /* R1=The main stack pointer */
ldr r0, =EXC_RETURN_PRIVTHR /* R0=EXC_RETURN to privileged mode */
ldr r0, =EXC_RETURN_PRIVTHR /* R0=EXC_RETURN to privileged mode */
mov r14, r0 /* R14=EXC_RETURN to privileged mode */
#endif
@ -254,7 +252,7 @@ exception_common:
* return to thread mode, and (2) select the correct stack.
*/
bx r14 /* And return */
bx r14 /* And return */
.size exception_common, .-exception_common
@ -269,7 +267,7 @@ exception_common:
#if CONFIG_ARCH_INTERRUPTSTACK > 3
.bss
.global g_intstackalloc
.global g_intstackbase
.global g_intstackbase
.balign 4
g_intstackalloc:
.skip (CONFIG_ARCH_INTERRUPTSTACK & ~3)

View file

@ -69,12 +69,12 @@ arm_fullcontextrestore:
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_restore_context /* R0: restore context */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -71,20 +71,20 @@ arm_saveusercontext:
/* Perform the System call with R0=0 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_save_context /* R0: save context (also return value) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* There are two return conditions. On the first return, R0 (the
* return value will be zero. On the second return we need to
* force R0 to be 1.
*/
mov r3, #(4*REG_R0) /* R3=Offset to R0 storage */
add r2, r1, r3 /* R2=Address of R0 storage */
mov r3, #1 /* R3=Return value of one */
str r3, [r2, #0] /* Save return value */
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
mov r3, #(4*REG_R0) /* R3=Offset to R0 storage */
add r2, r1, r3 /* R2=Address of R0 storage */
mov r3, #1 /* R3=Return value of one */
str r3, [r2, #0] /* Save return value */
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -33,7 +33,7 @@
****************************************************************************/
.cpu cortex-m0
.file "arm_signal_handler.S"
.file "arm_signal_handler.S"
/****************************************************************************
* Private Functions
@ -50,6 +50,10 @@
* This function is the user-space, signal handler trampoline function. It
* is called from up_signal_dispatch() in user-mode.
*
* R0-R3, R11 - volatile registers need not be preserved.
* R4-R10 - static registers must be preserved
* R12-R14 - LR and SP must be preserved
*
* Input Parameters:
* R0 = sighand
* The address user-space signal handling function
@ -72,7 +76,7 @@ up_signal_handler:
/* Save some register */
push {r4, r5 /* Save R4 and R5 on the stack */
push {r4, r5} /* Save R4 and R5 on the stack */
mov r5, lr /* Save LR in R5 */
/* Call the signal handler */
@ -81,7 +85,7 @@ up_signal_handler:
mov r0, r1 /* R0=signo */
mov r1, r2 /* R1=info */
mov r2, r3 /* R2=ucontext */
blx r4 /* Call the signal handler */
blx r4 /* Call the signal handler */
/* Restore the registers */

View file

@ -72,10 +72,10 @@ arm_switchcontext:
mov r2, r1 /* R2: restoreregs */
mov r1, r0 /* R1: saveregs */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* We will get here only after the rerturn from the context switch */
/* We will get here only after the rerturn from the context switch */
bx lr
.size arm_switchcontext, .-arm_switchcontext

View file

@ -46,20 +46,21 @@
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the behavior is
* undefined if the process created by vfork() either modifies any data other than
* a variable of type pid_t used to store the return value from vfork(), or returns
* from the function in which vfork() was called, or calls any other function before
* successfully calling _exit() or one of the exec family of functions.
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the vfork()
* context as an argument. The overall sequence is:
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork()and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB. This
* consists of:
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
@ -76,10 +77,10 @@
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and returns
* the process ID of the child process to the parent process. Otherwise, -1 is
* returned to the parent, no child process is created, and errno is set to
* indicate the error.
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
@ -91,21 +92,21 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */
/* Save the volatile registers */
mov r1, sp
stmia r1!, {r4-r7} /* Save r4-r7 in the structure */
mov r4, r8 /* Copy high registers to low registers */
stmia r1!, {r4-r7} /* Save r4-r7 in the structure */
mov r4, r8 /* Copy high registers to low registers */
mov r5, r9
mov r6, r10
mov r7, fp
stmia r1!, {r4-r7} /* Save r8-r10 and fp in the structure */
mov r5, lr /* Copy lr to a low register */
stmia r1!, {r0,r5} /* Save sp and lr in the structure */
stmia r1!, {r4-r7} /* Save r8-r10 and fp in the structure */
mov r5, lr /* Copy lr to a low register */
stmia r1!, {r0,r5} /* Save sp and lr in the structure */
/* Then, call up_vfork(), passing it a pointer to the stack structure */
@ -115,7 +116,7 @@ vfork:
/* Recover r4-r7 that were destroyed before up_vfork was called */
mov r1, sp
ldmia r1!, {r4-r7}
ldmia r1!, {r4-r7}
/* Release the stack data and return the value returned by up_vfork */

View file

@ -24,14 +24,11 @@
#include <nuttx/config.h>
#include <arch/board/board.h>
#include "arm.h"
#include "cp15.h"
#include "sctlr.h"
#include "mmu.h"
#include "smp.h"
#include "chip.h"
#include "arm_internal.h"
@ -350,7 +347,7 @@ __cpu3_start:
/* Then write the configured control register */
mcr CP15_SCTLR(r0) /* Write control reg */
.rept 12 /* Cortex A8 wants lots of NOPs here */
.rept 12 /* Cortex A8 wants lots of NOPs here */
nop
.endr
@ -408,11 +405,11 @@ __cpu3_start:
mov r0, sp /* R0 = end of IDLE stack */
ldmia r3, {r1, r2} /* R1 = Size of stack; R2 = coloration */
1: /* Top of the loop */
1: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0, #-4]! /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
str r2, [r0, #-4]! /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
#endif
/* Branch to continue C level CPU initialization */
@ -420,7 +417,7 @@ __cpu3_start:
mov fp, #0 /* Clear framepointer */
mov lr, #0 /* LR = return address (none) */
mov r0, r5 /* Input parameter = CPU index */
b arm_cpu_boot /* Branch to C level CPU initialization */
b arm_cpu_boot /* Branch to C level CPU initialization */
.size .Lcpu_vstart, .-.Lcpu_vstart
/***************************************************************************

View file

@ -24,7 +24,7 @@
#include <nuttx/config.h>
.file "arm_fetchadd.S"
.file "arm_fetchadd.S"
/****************************************************************************
* Public Functions
@ -55,15 +55,15 @@
up_fetchadd32:
1:
ldrex r2, [r0] /* Fetch the value to be incremented */
ldrex r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strex failed */
bne 1b /* Failed to lock... try again */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd32, . - up_fetchadd32
/****************************************************************************
@ -89,15 +89,15 @@ up_fetchadd32:
up_fetchsub32:
1:
ldrex r2, [r0] /* Fetch the value to be decremented */
ldrex r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strex failed */
bne 1b /* Failed to lock... try again */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub32, . - up_fetchsub32
/****************************************************************************
@ -123,15 +123,15 @@ up_fetchsub32:
up_fetchadd16:
1:
ldrexh r2, [r0] /* Fetch the value to be incremented */
ldrexh r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexh failed */
bne 1b /* Failed to lock... try again */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd16, . - up_fetchadd16
/****************************************************************************
@ -157,17 +157,17 @@ up_fetchadd16:
up_fetchsub16:
1:
ldrexh r2, [r0] /* Fetch the value to be decremented */
ldrexh r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
/* Attempt to save the decremented value */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexh failed */
bne 1b /* Failed to lock... try again */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub16, . - up_fetchsub16
/****************************************************************************
@ -193,15 +193,15 @@ up_fetchsub16:
up_fetchadd8:
1:
ldrexb r2, [r0] /* Fetch the value to be incremented */
ldrexb r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexb failed */
bne 1b /* Failed to lock... try again */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd8, . - up_fetchadd8
/****************************************************************************
@ -227,14 +227,14 @@ up_fetchadd8:
up_fetchsub8:
1:
ldrexb r2, [r0] /* Fetch the value to be decremented */
ldrexb r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexb failed */
bne 1b /* Failed to lock... try again */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub8, . - up_fetchsub8
.end

View file

@ -27,7 +27,6 @@
#ifdef CONFIG_ARCH_FPU
.file "arm_fpuconfig.S"
/****************************************************************************
* Pre-processor Definitions
@ -38,6 +37,7 @@
****************************************************************************/
.globl arm_fpuconfig
.file "arm_fpuconfig.S"
/****************************************************************************
* Assembly Macros
@ -54,7 +54,7 @@
****************************************************************************/
/****************************************************************************
* Name: sam_fpuconfig
* Name: arm_fpuconfig
*
* Description:
* Configure the FPU. Enables access to CP10 and CP11
@ -77,9 +77,9 @@ arm_fpuconfig:
/* Set FPEXC.EN (B30) */
fmrx r0, fpexc
fmrx r0, fpexc
orr r0, r0, #0x40000000
fmxr fpexc, r0
fmxr fpexc, r0
bx lr
.size arm_fpuconfig, . - arm_fpuconfig
#endif

View file

@ -23,18 +23,24 @@
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm_internal.h"
#include <arch/irq.h>
#include "svcall.h"
#include "arm.h"
.file "arm_fullcontextrestore.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl arm_fullcontextrestore
.file "arm_fullcontextrestore.S"
/****************************************************************************
* Macros
****************************************************************************/
/****************************************************************************
* Public Functions
@ -44,9 +50,9 @@
* Name: arm_fullcontextrestore
*
* Description:
* Restore the specified task context. Full prototype is:
* Restore the current thread context. Full prototype is:
*
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
*
* Returned Value:
* None
@ -55,7 +61,6 @@
.globl arm_fullcontextrestore
.type arm_fullcontextrestore, function
arm_fullcontextrestore:
/* On entry, a1 (r0) holds address of the register save area. All other
@ -74,14 +79,14 @@ arm_fullcontextrestore:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
#endif
#ifdef CONFIG_BUILD_KERNEL
@ -93,13 +98,13 @@ arm_fullcontextrestore:
/* Perform the System call with R0=SYS_context_restore, R1=restoreregs */
mov r1, r0 /* R1: restoreregs */
mov r1, r0 /* R1: restoreregs */
mov r0, #SYS_context_restore /* R0: SYS_context_restore syscall */
svc #0x900001 /* Perform the system call */
svc #0x900001 /* Perform the system call */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
#else
/* For a flat build, we can do all of this here... Just think of this as
@ -109,22 +114,22 @@ arm_fullcontextrestore:
/* Recover all registers except for r0, r1, r2, R15, and CPSR */
add r1, r0, #(4*REG_R3) /* Offset to REG_R3 storage */
ldmia r1, {r3-r14} /* Recover registers */
ldmia r1, {r3-r14} /* Recover registers */
ldr r2, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
ldr r2, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
/* Create a stack frame to hold the some registers */
sub sp, sp, #(4*4) /* Frame for four registers */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
str r1, [sp, #4] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R2)] /* Fetch the stored r2 value */
ldr r1, [r0, #(4*REG_R2)] /* Fetch the stored r2 value */
str r1, [sp, #8] /* Save it in the stack */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
tst r2, #PSR_T_BIT
orrne r1, r1, #1
orrne r1, r1, #1
str r1, [sp, #12] /* Save it at the bottom of the frame */
/* Now we can restore the CPSR. We wait until we are completely
@ -134,12 +139,12 @@ arm_fullcontextrestore:
* disabled.
*/
msr cpsr, r2 /* Set the CPSR */
msr cpsr, r2 /* Set the CPSR */
/* Now recover r0 r1 r2 and R15 */
pop {r0-r2, pc}
#endif
.size arm_fullcontextrestore, . - arm_fullcontextrestore
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -24,8 +24,6 @@
#include <nuttx/config.h>
#include <arch/board/board.h>
#include "arm.h"
#include "cp15.h"
#include "sctlr.h"
@ -54,7 +52,7 @@
* 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case
* the boot logic must:
*
* - Configure SDRAM,
* - Configure SDRAM (if present),
* - Initialize the .data section in RAM, and
* - Clear .bss section
*/
@ -73,13 +71,13 @@
# define CONFIG_IDENTITY_TEXTMAP 1
# endif
/* 2. We boot in FLASH but copy ourselves to DRAM from better performance.
/* 2. We boot in FLASH but copy ourselves to SDRAM from better performance.
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=y). In this case
* the boot logic must:
*
* - Configure SDRAM,
* - Configure SDRAM (if present),
* - Copy ourself to DRAM (after mapping it), and
* - Clear .bss section
* - Clear .bss section (data should be fully initialized)
*
* In this case, we assume that the logic within this file executes from FLASH.
*/
@ -100,11 +98,11 @@
# define CONFIG_IDENTITY_TEXTMAP 1
# endif
/* 3. There is bootloader that copies us to DRAM (but probably not to the beginning)
/* 3. There is bootloader that copies us to SDRAM (but probably not to the beginning)
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
* was initialized by the boot loader, and this boot logic must:
*
* - Clear .bss section
* - Clear .bss section (data should be fully initialized)
*/
#else
@ -128,12 +126,6 @@
* Pre-processor Definitions
****************************************************************************/
/* RX_NSECTIONS determines the number of 1Mb sections to map for the
* Read/eXecute address region. This is based on NUTTX_TEXT_SIZE.
*/
#define WR_NSECTIONS ((NUTTX_RAM_SIZE+0x000fffff) >> 20)
/****************************************************************************
* Assembly Macros
****************************************************************************/
@ -170,6 +162,11 @@
* Name: __start
****************************************************************************/
/* We assume the bootloader has already initialized most of the h/w for
* us and that only leaves us having to do some os specific things
* below.
*/
.text
.global __start
.type __start, #function
@ -219,12 +216,12 @@ __start:
*/
adr r0, .LCptinfo /* Address of page table description */
ldmia r0, {r1, r2, r3} /* Load the page table description */
ldmia r0, {r1, r2, r3} /* Load the page table description */
/* A single page is sufficient to map the page table */
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r2, lsr #18] /* Map using the virtual address as an index */
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r2, lsr #18] /* Map using the virtual address as an index */
#endif
/* Load information needed to map the .text region. After the ldmia, we
@ -238,7 +235,7 @@ __start:
*/
adr r0, .LCtextinfo /* Address of text info */
ldmia r0, {r1, r2, r3, r4} /* Load the text description */
ldmia r0, {r1, r2, r3, r4} /* Load the text description */
#ifndef CONFIG_IDENTITY_TEXTMAP
/* Create identity mapping for first MB of the .text section to support
@ -248,8 +245,8 @@ __start:
* the identity mapping.
*/
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r1, lsr #18] /* Identity mapping */
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r1, lsr #18] /* Identity mapping */
#endif
/* Map the entire .text region. We do this before enabling caches so
@ -269,10 +266,10 @@ __start:
*/
.Lpgtextloop:
orr r0, r1, r3 /* R0: OR MMU flags into physical address */
subs r4, r4, #1 /* R4: Decrement the section count */
orr r0, r1, r3 /* R0: OR MMU flags into physical address */
subs r4, r4, #1 /* R4: Decrement the section count */
str r0, [r2], #4 /* Save page table entry, increment page table address */
add r1, r1, #(1024*1024) /* R1: Increment the physical address */
add r1, r1, #(1024*1024) /* R1: Increment the physical address */
bne .Lpgtextloop /* Loop while R4 is non-zero */
#if defined(CONFIG_BOOT_RUNFROMFLASH) && !defined(CONFIG_BOOT_SDRAM_DATA)
@ -293,7 +290,7 @@ __start:
*/
adr r0, .LCraminfo /* Address of primary RAM info */
ldmia r0, {r1, r2, r3, r4} /* Load the primary RAM description */
ldmia r0, {r1, r2, r3, r4} /* Load the primary RAM description */
add r2, r5, r2, lsr #18 /* R2=Offset page table address */
/* Loop until each page table entry has been written for the primary RAM
@ -301,11 +298,11 @@ __start:
*/
.Lpgramloop:
orr r0, r1, r3 /* R0: OR MMU flags into physical address */
subs r4, r4, #1 /* R4: Decrement the section count */
orr r0, r1, r3 /* R0: OR MMU flags into physical address */
subs r4, r4, #1 /* R4: Decrement the section count */
str r0, [r2], #4 /* Save page table entry, increment page table address */
add r1, r1, #(1024*1024) /* R1: Increment the physical address */
bne .Lpgramloop /* Loop while R4 is non-zero */
add r1, r1, #(1024*1024) /* R1: Increment the physical address */
bne .Lpgramloop /* Loop while R4 is non-zero */
#endif /* CONFIG_BOOT_RUNFROMFLASH && !CONFIG_BOOT_SDRAM_DATA */
#endif /* CONFIG_ARCH_ROMPGTABLE */
@ -492,7 +489,7 @@ __start:
/* Then write the configured control register */
mcr CP15_SCTLR(r0) /* Write control reg */
.rept 12 /* Cortex A8 wants lots of NOPs here */
.rept 12 /* Cortex A8 wants lots of NOPs here */
nop
.endr
@ -508,7 +505,7 @@ __start:
.type .LCppgtable, %object
.LCppgtable:
.long PGTABLE_BASE_PADDR /* Physical start of page table */
.long PGTABLE_BASE_PADDR /* Physical start of page table */
.size .LCppgtable, . -.LCppgtable
#ifdef ARMV7A_PGTABLE_MAPPING
@ -524,7 +521,7 @@ __start:
.LCptinfo:
.long (PGTABLE_BASE_PADDR & 0xfff00000) /* Physical base address */
.long (PGTABLE_BASE_VADDR & 0xfff00000) /* Virtual base address */
.long MMU_MEMFLAGS /* MMU flags for text section in RAM */
.long MMU_MEMFLAGS /* MMU flags for text section in RAM */
.size .LCptinfo, . -.LCptinfo
#endif
@ -552,7 +549,7 @@ __start:
#else
.long MMU_MEMFLAGS /* MMU flags for text section in RAM */
#endif
.long (NUTTX_TEXT_SIZE >> 20) /* Number of 1MB read-execute sections */
.long (NUTTX_TEXT_SIZE >> 20) /* Number of 1MB read-execute sections */
.size .LCtextinfo, . -.LCtextinfo
#ifdef CONFIG_BOOT_RUNFROMFLASH
@ -573,7 +570,7 @@ __start:
.long NUTTX_RAM_PADDR /* Physical base address */
.long NUTTX_RAM_VADDR /* Virtual base address */
.long MMU_MEMFLAGS /* MMU flags for primary RAM section */
.long (NUTTX_RAM_SIZE >> 20) /* Number of 1MB read-execute sections */
.long (NUTTX_RAM_SIZE >> 20) /* Number of 1MB read-execute sections */
.size .LCraminfo, . -.LCraminfo
#endif /* CONFIG_BOOT_RUNFROMFLASH */
@ -609,14 +606,14 @@ __start:
ldr r5, .LCvpgtable /* r5=Virtual page table base address */
ldr r3, .LCptextbase /* r0=Physical base address of .text section */
mov r0, #0 /* flags + base = 0 */
str r3, [r5, r3, lsr #18] /* identity mapping */
mov r0, #0 /* flags + base = 0 */
str r3, [r5, r3, lsr #18] /* identity mapping */
#endif /* !CONFIG_ARCH_ROMPGTABLE && !CONFIG_IDENTITY_TEXTMAP */
/* Set up the stack pointer and clear the frame pointer */
ldr sp, .Lstackpointer
bic sp, sp, #7 /* Get the stack pointer with 8-byte alignment */
bic sp, sp, #7 /* Get the stack pointer with 8-byte alignment */
mov fp, #0
#ifndef CONFIG_BOOT_SDRAM_DATA
@ -641,13 +638,13 @@ __start:
*/
adr r3, .Lstkinit
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
1: /* Top of the loop */
1: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
#endif
/* Finally branch to the OS entry point */
@ -669,12 +666,12 @@ arm_data_initialize:
/* Zero BSS */
adr r0, .Linitparms
ldmia r0, {r0, r1}
ldmia r0, {r0, r1}
mov r2, #0
mov r2, #0
1:
cmp r0, r1 /* Clear up to _bss_end_ */
strcc r2, [r0], #4
strcc r2, [r0], #4
bcc 1b
#ifdef CONFIG_BOOT_RUNFROMFLASH
@ -688,7 +685,7 @@ arm_data_initialize:
*/
adr r3, .Ldatainit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
2:
ldr r3, [r0], #4
@ -708,9 +705,10 @@ arm_data_initialize:
/* Text-section constants:
*
* _sbss is the start of the BSS region (see ld.script)
* _ebss is the end of the BSS region (see ld.script)
* _sbss is the start of the BSS region (see linker script)
* _ebss is the end of the BSS region (see linker script)
*
* Typical Configuration:
* The idle task stack usually starts at the end of BSS and is of size
* CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the
* end of memory. See g_idle_topstack below.
@ -725,7 +723,7 @@ arm_data_initialize:
.type .LCvpgtable, %object
.LCvpgtable:
.long PGTABLE_BASE_VADDR /* Virtual start of page table */
.long PGTABLE_BASE_VADDR /* Virtual start of page table */
.size .LCvpgtable, . -.LCvpgtable
#endif /* CONFIG_ARCH_ROMPGTABLE */
@ -759,7 +757,7 @@ arm_data_initialize:
.type .Lstkinit, %object
.Lstkinit:
#ifdef CONFIG_BOOT_SDRAM_DATA
.long IDLE_STACK_VBASE /* Beginning of the IDLE stack, then words of IDLE stack */
.long IDLE_STACK_VBASE /* Beginning of the IDLE stack, then words of IDLE stack */
#elif defined(CONFIG_SMP)
.long _enoinit
#else
@ -775,7 +773,7 @@ arm_data_initialize:
***************************************************************************/
/* This global variable is unsigned long g_idle_topstack and is
* exported from here only because of its coupling to .Linitparms
* exported from here only because of its coupling to .Lstackpointer
* above.
*/

View file

@ -25,13 +25,10 @@
#include <nuttx/config.h>
#include <nuttx/page.h>
#include <arch/board/board.h>
#include "arm.h"
#include "cp15.h"
#include "sctlr.h"
#include "mmu.h"
#include "chip.h"
#include "arm_internal.h"
@ -61,7 +58,7 @@
* 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case
* the boot logic must:
*
* - Configure SDRAM,
* - Configure SDRAM (if present),
* - Initialize the .data section in RAM, and
* - Clear .bss section
*/
@ -80,13 +77,13 @@
# define CONFIG_IDENTITY_TEXTMAP 1
# endif
/* 2. We boot in FLASH but copy ourselves to DRAM from better performance.
/* 2. We boot in FLASH but copy ourselves to SDRAM from better performance.
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=y). In this case
* the boot logic must:
*
* - Configure SDRAM,
* - Configure SDRAM (if present),
* - Copy ourself to DRAM (after mapping it), and
* - Clear .bss section
* - Clear .bss section (data should be fully initialized)
*
* In this case, we assume that the logic within this file executes from FLASH.
*/
@ -107,11 +104,11 @@
# define CONFIG_IDENTITY_TEXTMAP 1
# endif
/* 3. There is bootloader that copies us to DRAM (but probably not to the beginning)
/* 3. There is bootloader that copies us to SDRAM (but probably not to the beginning)
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
* was initialized by the boot loader, and this boot logic must:
*
* - Clear .bss section
* - Clear .bss section (data should be fully initialized)
*/
#else
@ -152,11 +149,10 @@
* Pre-processor Definitions
****************************************************************************/
/* RX_NSECTIONS determines the number of 1Mb sections to map for the
* Read/eXecute address region. This is based on NUTTX_TEXT_SIZE.
/* WR_NSECTIONS determines the number of 1Mb sections to map for the
* Write/Read/eXecute address region. This is based on NUTTX_TEXT_SIZE.
*/
#define RX_NSECTIONS ((NUTTX_TEXT_SIZE+0x000fffff) >> 20)
#define WR_NSECTIONS ((NUTTX_RAM_SIZE+0x000fffff) >> 20)
/****************************************************************************
@ -195,6 +191,11 @@
* Name: __start
****************************************************************************/
/* We assume the bootloader has already initialized most of the h/w for
* us and that only leaves us having to do some os specific things
* below.
*/
.text
.global __start
.type __start, #function
@ -236,12 +237,12 @@ __start:
*/
adr r0, .LCptinfo /* Address of page table description */
ldmia r0, {r1, r2, r3} /* Load the page table description */
ldmia r0, {r1, r2, r3} /* Load the page table description */
/* A single page is sufficient to map the page table */
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r2, lsr #18] /* Map using the virtual address as an index */
orr r0, r1, r3 /* OR MMU flags into physical address */
str r0, [r5, r2, lsr #18] /* Map using the virtual address as an index */
#endif
#ifndef CONFIG_IDENTITY_TEXTMAP
@ -254,8 +255,8 @@ __start:
ldr r0, .LCptextbase /* r0=phys. base address of .text section */
ldr r1, .LCtextflags /* R1=.text section MMU flags */
orr r3, r1, r0 /* r3=flags + base */
str r3, [r4, r0, lsr #18] /* identity mapping */
orr r3, r1, r0 /* r3=flags + base */
str r3, [r4, r0, lsr #18] /* identity mapping */
#endif
/* Map the read-only .text region in place. This must be done
@ -272,28 +273,28 @@ __start:
*/
adr r0, .Ltxtspan
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
/* Then populate the L2 table for the locked text region only. */
adr r0, .Ltxtmap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
/* Make sure that the page table is itself mapped and and read/write-able.
* First, populate the L1 table:
*/
adr r0, .Lptabspan
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
ldmia r0, {r0, r1, r2, r3, r5}
pg_l1span r0, r1, r2, r3, r5, r6
/* Then populate the L2 table. */
adr r0, .Lptabmap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r5
/* The following logic will set up the ARMv7-A for normal operation.
*
@ -423,7 +424,7 @@ __start:
/* In SMP configurations, the data cache will not be enabled until later
* after SMP cache coherency has been setup.
*/
*/
#if !defined(CPU_DCACHE_DISABLE) && !defined(CONFIG_SMP)
/* Dcache enable
@ -467,7 +468,7 @@ __start:
/* Then write the configured control register */
mcr CP15_SCTLR(r0) /* Write control reg */
.rept 12 /* Cortex A8 wants lots of NOPs here */
.rept 12 /* Cortex A8 wants lots of NOPs here */
nop
.endr
@ -499,7 +500,7 @@ __start:
.LCptinfo:
.long (PGTABLE_BASE_PADDR & 0xfff00000) /* Physical base address */
.long (PGTABLE_BASE_VADDR & 0xfff00000) /* Virtual base address */
.long MMU_MEMFLAGS /* MMU flags for text section in RAM */
.long MMU_MEMFLAGS /* MMU flags for text section in RAM */
.size .LCptinfo, . -.LCptinfo
#endif
@ -555,7 +556,7 @@ __start:
.Ltxtmap:
.long PG_L2_LOCKED_PADDR /* Physical address in the L2 table */
.long PG_LOCKED_PBASE /* Physical address of locked base memory */
.long CONFIG_PAGING_NLOCKED /* Number of pages in the locked region */
.long CONFIG_PAGING_NLOCKED /* Number of pages in the locked region */
.long MMU_L2_TEXTFLAGS /* L2 MMU flags to use */
.size .Ltxtmap, . -.Ltxtmap
@ -564,7 +565,7 @@ __start:
.long PG_L1_PGTABLE_PADDR /* Physical address in the L1 table */
.long PG_L2_PGTABLE_PBASE /* Physical address of the start of the L2 page table */
.long PG_PGTABLE_NPAGES /* Total mapped page table pages */
.long PG_L2_PGTABLE_NPAGE1 /* The number of text pages in the first page table */
.long PG_L2_PGTABLE_NPAGE1 /* The number of text pages in the first page table */
.long MMU_L1_PGTABFLAGS /* L1 MMU flags to use */
.size .Lptabspan, . -.Lptabspan
@ -599,28 +600,28 @@ __start:
ldr r4, .LCvpgtable /* r4=virtual page table base address */
ldr r3, .LCvtextbase /* r0=virtual base address of .text section */
mov r0, #0 /* flags + base = 0 */
str r3, [r4, r3, lsr #18] /* identity mapping */
mov r0, #0 /* flags + base = 0 */
str r3, [r4, r3, lsr #18] /* identity mapping */
#endif
/* Populate the L1 table for the data region */
adr r0, .Ldataspan
ldmia r0, {r0, r1, r2, r3, r4}
pg_l1span r0, r1, r2, r3, r4, r5
ldmia r0, {r0, r1, r2, r3, r4}
pg_l1span r0, r1, r2, r3, r4, r5
/* Populate the L2 table for the data region */
adr r0, .Ldatamap
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r4
ldmia r0, {r0, r1, r2, r3}
pg_l2map r0, r1, r2, r3, r4
#ifdef CONFIG_BOOT_RUNFROMFLASH
/* Get R3 = Value of RAM L1 page table entry */
ldr r3, .LCprambase /* r3=Aligned NuttX RAM address (physical) */
ldr r1, .LCramflags /* R1=.bss/.data section MMU flags */
add r3, r3, r1 /* r3=flags + base */
add r3, r3, r1 /* r3=flags + base */
/* Now setup the page tables for our normal mapped RAM region.
* We round NUTTX_RAM_VADDR down to the nearest megabyte boundary.
@ -633,7 +634,7 @@ __start:
* region.
*/
.rept WR_NSECTIONS-1
.rept WR_NSECTIONS-1
add r3, r3, #SECTION_SIZE
str r3, [r0], #4
.endr
@ -648,7 +649,7 @@ __start:
/* Set up the stack pointer and clear the frame pointer */
ldr sp, .Lstackpointer
bic sp, sp, #7 /* Get the stack pointer with 8-byte alignment */
bic sp, sp, #7 /* Get the stack pointer with 8-byte alignment */
mov fp, #0
#ifndef CONFIG_BOOT_SDRAM_DATA
@ -673,13 +674,13 @@ __start:
*/
adr r3, .Lstkinit
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
ldmia r3, {r0, r1, r2} /* R0 = start of IDLE stack; R1 = Size of tack; R2 = coloration */
1: /* Top of the loop */
1: /* Top of the loop */
sub r1, r1, #1 /* R1 = Number of words remaining */
cmp r1, #0 /* Check (nwords == 0) */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
str r2, [r0], #4 /* Save stack color word, increment stack address */
bne 1b /* Bottom of the loop */
#endif
/* Finally branch to the OS entry point */
@ -701,12 +702,12 @@ arm_data_initialize:
/* Zero BSS */
adr r0, .Linitparms
ldmia r0, {r0, r1}
ldmia r0, {r0, r1}
mov r2, #0
mov r2, #0
1:
cmp r0, r1 /* Clear up to _bss_end_ */
strcc r2, [r0],#4
strcc r2, [r0], #4
bcc 1b
#ifdef CONFIG_BOOT_RUNFROMFLASH
@ -720,7 +721,7 @@ arm_data_initialize:
*/
adr r3, .Ldatainit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
2:
ldr r3, [r0], #4
@ -740,9 +741,10 @@ arm_data_initialize:
/* Text-section constants:
*
* _sbss is the start of the BSS region (see ld.script)
* _ebss is the end of the BSS region (see ld.script)
* _sbss is the start of the BSS region (see linker script)
* _ebss is the end of the BSS region (see linker script)
*
* Typical Configuration:
* The idle task stack usually starts at the end of BSS and is of size
* CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the
* end of memory. See g_idle_topstack below.
@ -794,7 +796,7 @@ arm_data_initialize:
.type .Lstkinit, %object
.Lstkinit:
#ifdef CONFIG_BOOT_SDRAM_DATA
.long IDLE_STACK_VBASE /* Beginning of the IDLE stack, then words of IDLE stack */
.long IDLE_STACK_VBASE /* Beginning of the IDLE stack, then words of IDLE stack */
#else
.long _ebss /* Beginning of the IDLE stack, then words of IDLE stack */
#endif
@ -808,7 +810,7 @@ arm_data_initialize:
***************************************************************************/
/* This global variable is unsigned long g_idle_topstack and is
* exported from here only because of its coupling to .Linitparms
* exported from here only because of its coupling to .Lstackpointer
* above.
*/
@ -825,5 +827,5 @@ g_idle_topstack:
.long _ebss+CONFIG_IDLETHREAD_STACKSIZE
#endif
.size g_idle_topstack, .-g_idle_topstack
#endif
.end
#endif

View file

@ -28,13 +28,12 @@
#ifdef CONFIG_ARCH_FPU
.file "arm_restorefpu.S"
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl arm_restorefpu
.globl arm_restorefpu
.file "arm_restorefpu.S"
/****************************************************************************
* Public Functions
@ -73,14 +72,14 @@ arm_restorefpu:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
bx lr
.size arm_restorefpu, .-arm_restorefpu

View file

@ -76,13 +76,13 @@ arm_savefpu:
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
bx lr

View file

@ -22,30 +22,43 @@
* Included Files
****************************************************************************/
#include <nuttx/irq.h>
#include "arm_internal.h"
#include <nuttx/config.h>
#include <arch/irq.h>
.file "arm_saveusercontext.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl arm_saveusercontext
.file "arm_saveusercontext.S"
/****************************************************************************
* Macros
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: arm_saveusercontext
*
* Description:
* Save the current thread context. Full prototype is:
*
* int arm_saveusercontext(uint32_t *saveregs);
*
* Returned Value:
* 0: Normal return
* 1: Context switch return
*
****************************************************************************/
.globl arm_saveusercontext
.type arm_saveusercontext, function
arm_saveusercontext:
/* On entry, a1 (r0) holds address of struct xcptcontext */
@ -64,15 +77,17 @@ arm_saveusercontext:
*/
add r1, r0, #(4*REG_R4)
stmia r1, {r4-r14}
stmia r1, {r4-r14}
/* Save the current cpsr */
mrs r2, cpsr /* R3 = CPSR value */
mrs r2, cpsr /* R2 = CPSR value */
add r1, r0, #(4*REG_CPSR)
str r2, [r1]
/* Save the return address */
/* Save the return address as the PC so that we return to the exit from
* this function.
*/
ldr r2, =1f
add r1, r0, #(4*REG_PC)
@ -86,20 +101,20 @@ arm_saveusercontext:
*/
#ifdef CONFIG_ARCH_FPU
add r1, r0, #(4*REG_S0) /* R1=Address of FP register storage */
add r1, r0, #(4*REG_S0) /* R1=Address of FP register storage */
/* Store all floating point registers. Registers are stored in numeric order,
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#endif
/* Return 0 now indicating that this return is not a context switch */
@ -114,5 +129,5 @@ arm_saveusercontext:
mov r0, #1 /* Return value == 1 */
mov pc, lr /* Return */
.size arm_saveusercontext, . - arm_saveusercontext
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -60,7 +60,7 @@
* This function must be provided via the architecture-specific logic.
*
* Input Parameters:
* lock - The address of spinlock object (r0).
* lock - The address of spinlock object.
*
* Returned Value:
* The spinlock is always locked upon return. The value of previous value
@ -69,8 +69,6 @@
* obtain the lock) or SP_UNLOCKED if the spinlock was previously unlocked
* (meaning that we successfully obtained the lock)
*
* Modifies: r1, r2, and lr
*
****************************************************************************/
.globl up_testset
@ -83,19 +81,19 @@ up_testset:
/* Test if the spinlock is locked or not */
1:
ldrexb r2, [r0] /* Test if spinlock is locked or not */
ldrexb r2, [r0] /* Test if spinlock is locked or not */
cmp r2, r1 /* Already locked? */
beq 2f /* If already locked, return SP_LOCKED */
beq 2f /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
strexb r2, r1, [r0] /* Attempt to set the locked state */
strexb r2, r1, [r0] /* Attempt to set the locked state */
cmp r2, r1 /* r2 will be 1 is strexb failed */
beq 1b /* Failed to lock... try again */
beq 1b /* Failed to lock... try again */
/* Lock acquired -- return SP_UNLOCKED */
dmb /* Required before accessing protected resource */
dmb /* Required before accessing protected resource */
mov r0, #SP_UNLOCKED
bx lr

View file

@ -24,7 +24,6 @@
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm_arch.h"
.file "arm_vectoraddrexcptn.S"
@ -59,8 +58,8 @@
*
* Description:
* Shouldn't happen. This exception handler is in a separate file from
* other vector handlers because some processors (e.g., Cortex-A5) do not
* support the Address Exception vector.
* other vector handlers because some processors do not support the
* Address Exception vector.
*
****************************************************************************/

View file

@ -31,6 +31,10 @@
.file "arm_vectors.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
@ -68,7 +72,7 @@ g_nestlevel:
#endif /* CONFIG_ARCH_INTERRUPTSTACK > 7 && CONFIG_ARMV7A_HAVE_GICv2 */
/****************************************************************************
* Macro Definitions
* Assembly Macros
****************************************************************************/
/****************************************************************************
@ -82,7 +86,7 @@ g_nestlevel:
#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
.macro cpuindex, index
.mov \index, #0
.mov \index, #0
.endm
#endif
@ -145,31 +149,31 @@ arm_vectorirq:
ldr r13, .Lirqtmp
sub lr, lr, #4
str lr, [r13] /* Save lr_IRQ */
str lr, [r13] /* Save lr_IRQ */
mrs lr, spsr
str lr, [r13, #4] /* Save spsr_IRQ */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
#ifdef CONFIG_ARMV7A_DECODEFIQ
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
#else
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
#endif
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lirqtmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
@ -185,9 +189,9 @@ arm_vectorirq:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lirqcontinue
.Lirqentersvc:
@ -201,7 +205,7 @@ arm_vectorirq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lirqcontinue:
@ -214,13 +218,13 @@ arm_vectorirq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the IRQ handler with interrupts disabled. */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
#if CONFIG_ARCH_INTERRUPTSTACK > 7
#ifdef CONFIG_ARMV7A_HAVE_GICv2
@ -231,26 +235,26 @@ arm_vectorirq:
ldr r5, .Lirqnestlevel /* r5=Points to interrupt nesting level */
#ifdef CONFIG_SMP
cpuindex r1 /* r1=cpu index */
lsls r1, r1, #2 /* r1=array byte offset */
add r5, r5, r1 /* Indexed interrupt nesting level */
cpuindex r1 /* r1=cpu index */
lsls r1, r1, #2 /* r1=array byte offset */
add r5, r5, r1 /* Indexed interrupt nesting level */
#endif
ldr r1, [r5] /* Get r1= nesting level */
add r1, r1, #1 /* Increment nesting level */
str r1, [r5] /* Save r1= nesting level */
ldr r1, [r5] /* Get r1= nesting level */
add r1, r1, #1 /* Increment nesting level */
str r1, [r5] /* Save r1= nesting level */
cmp r1, #1 /* r1>1 if nested */
bgt .Lintnested /* Use current SP if nested */
cmp r1, #1 /* r1>1 if nested */
bgt .Lintnested /* Use current SP if nested */
#endif
/* Call arm_decodeirq() on the interrupt stack */
setirqstack r1, r3 /* SP = IRQ stack top */
setirqstack r1, r3 /* SP = IRQ stack top */
str r0, [sp, #-4]! /* Save the xcp address at SP-4 then update SP */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodeirq /* Call the handler */
ldr sp, [r4] /* Restore the user stack pointer */
ldr sp, [r4] /* Restore the user stack pointer */
#ifdef CONFIG_ARMV7A_HAVE_GICv2
b .Lintreturn
@ -258,27 +262,27 @@ arm_vectorirq:
/* Call arm_decodeirq() on whatever stack is in place */
.Lintnested:
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodeirq /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Decrement the nesting level (r5 should be preserved) */
.Lintreturn:
ldr r1, [r5] /* Get r1= nesting level */
cmp r1, #0 /* A sanity check*/
subgt r1, r1, #1 /* Decrement nesting level */
strgt r1, [r5] /* Save r1= nesting level */
ldr r1, [r5] /* Get r1= nesting level */
cmp r1, #0 /* A sanity check*/
subgt r1, r1, #1 /* Decrement nesting level */
strgt r1, [r5] /* Save r1= nesting level */
#endif
#else
/* Call arm_decodeirq() on the user stack */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodeirq /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
#endif
/* Upon return from arm_decodeirq, r0 holds the pointer to the register
@ -289,8 +293,8 @@ arm_vectorirq:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
/* Are we leaving in user mode? If so then we need to restore the
@ -305,18 +309,18 @@ arm_vectorirq:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lirqleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lirqtmp:
.word g_irqtmp
@ -355,19 +359,19 @@ arm_vectorsvc:
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
mov r3, r14 /* Save r14 as the PC as well */
mrs r4, spsr /* Get the saved CPSR */
mov r3, r14 /* Save r14 as the PC as well */
mrs r4, spsr /* Get the saved CPSR */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lsvcentersvc /* Branch if not user mode */
@ -376,9 +380,9 @@ arm_vectorsvc:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lsvccontinue
.Lsvcentersvc:
@ -392,7 +396,7 @@ arm_vectorsvc:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lsvccontinue:
@ -405,19 +409,19 @@ arm_vectorsvc:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the SVC handler with interrupts disabled.
* void arm_syscall(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_syscall /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_syscall /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_syscall, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -427,15 +431,15 @@ arm_vectorsvc:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
/* Are we leaving in user mode? If so then we need to restore the
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lleavesvcsvc /* Branch if not user mode */
@ -443,18 +447,18 @@ arm_vectorsvc:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lleavesvcsvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.size arm_vectorsvc, . - arm_vectorsvc
@ -481,35 +485,35 @@ arm_vectordata:
*/
ldr r13, .Ldaborttmp /* Points to temp storage */
sub lr, lr, #8 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
sub lr, lr, #8 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Ldaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Ldabtentersvc /* Branch if not user mode */
@ -518,9 +522,9 @@ arm_vectordata:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Ldabtcontinue
.Ldabtentersvc:
@ -534,7 +538,7 @@ arm_vectordata:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Ldabtcontinue:
@ -547,21 +551,21 @@ arm_vectordata:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the data abort handler with interrupts disabled.
* void arm_dataabort(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mrc CP15_DFAR(r1) /* Get R1=DFAR */
mrc CP15_DFSR(r2) /* Get r2=DFSR */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_dataabort /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_dataabort, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -571,7 +575,7 @@ arm_vectordata:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
@ -579,7 +583,7 @@ arm_vectordata:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Ldabtleavesvc /* Branch if not user mode */
@ -587,18 +591,18 @@ arm_vectordata:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Ldabtleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r1-r15}^ /* Return */
ldmia r0, {r1-r15}^ /* Return */
.Ldaborttmp:
.word g_aborttmp
@ -627,35 +631,35 @@ arm_vectorprefetch:
*/
ldr r13, .Lpaborttmp /* Points to temp storage */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lpaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lpabtentersvc /* Branch if not user mode */
@ -664,9 +668,9 @@ arm_vectorprefetch:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lpabtcontinue
.Lpabtentersvc:
@ -680,7 +684,7 @@ arm_vectorprefetch:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lpabtcontinue:
@ -693,21 +697,21 @@ arm_vectorprefetch:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the prefetch abort handler with interrupts disabled.
* void arm_prefetchabort(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mrc CP15_IFAR(r1) /* Get R1=IFAR */
mrc CP15_IFSR(r2) /* Get r2=IFSR */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_prefetchabort /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_prefetchabort, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -717,7 +721,7 @@ arm_vectorprefetch:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
@ -725,7 +729,7 @@ arm_vectorprefetch:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lpabtleavesvc /* Branch if not user mode */
@ -733,18 +737,18 @@ arm_vectorprefetch:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lpabtleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lpaborttmp:
.word g_aborttmp
@ -771,34 +775,34 @@ arm_vectorundefinsn:
*/
ldr r13, .Lundeftmp /* Points to temp storage */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lundeftmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_UND, r4=spsr_UND */
ldmia r0, {r3, r4} /* Recover r3=lr_UND, r4=spsr_UND */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lundefentersvc /* Branch if not user mode */
@ -807,9 +811,9 @@ arm_vectorundefinsn:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lundefcontinue
.Lundefentersvc:
@ -823,7 +827,7 @@ arm_vectorundefinsn:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lundefcontinue:
@ -836,19 +840,19 @@ arm_vectorundefinsn:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the undef insn handler with interrupts disabled.
* void arm_undefinedinsn(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_undefinedinsn /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_undefinedinsn, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -858,7 +862,7 @@ arm_vectorundefinsn:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
@ -866,7 +870,7 @@ arm_vectorundefinsn:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lundefleavesvc /* Branch if not user mode */
@ -874,18 +878,18 @@ arm_vectorundefinsn:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lundefleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lundeftmp:
.word g_undeftmp
@ -913,28 +917,28 @@ arm_vectorfiq:
/* On entry we are free to use the FIQ mode registers r8 through r14 */
ldr r13, .Lfiqtmp /* Points to temp storage */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR_fiq */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR_fiq */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lfiqtmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_SVC, r4=spsr_SVC */
ldmia r0, {r3, r4} /* Recover r3=lr_SVC, r4=spsr_SVC */
#ifdef CONFIG_BUILD_KERNEL
/* Did we enter from user mode? If so then we need get the values of
@ -950,9 +954,9 @@ arm_vectorfiq:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lfiqcontinue
.Lfiqentersvc:
@ -966,7 +970,7 @@ arm_vectorfiq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lfiqcontinue:
@ -979,26 +983,26 @@ arm_vectorfiq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the FIQ handler with interrupts disabled. */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
#if CONFIG_ARCH_INTERRUPTSTACK > 7
setfiqstack r1, r4 /* SP = FIQ stack top */
setfiqstack r1, r4 /* SP = FIQ stack top */
str r0, [sp, #-4]! /* Save the xcp address at SP-4 then update SP */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodefiq /* Call the handler */
ldr sp, [r4] /* Restore the user stack pointer */
ldr sp, [r4] /* Restore the user stack pointer */
#else
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodefiq /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
#endif
/* Upon return from arm_decodefiq, r0 holds the pointer to the register
@ -1009,8 +1013,8 @@ arm_vectorfiq:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_KERNEL
/* Are we leaving in user mode? If so then we need to restore the
@ -1025,18 +1029,18 @@ arm_vectorfiq:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lfiqleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lfiqtmp:
.word g_fiqtmp

View file

@ -54,16 +54,16 @@
/* These will be relocated to VECTOR_BASE. */
_vector_start:
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lundefinedhandler /* 0x04: Undefined instruction */
ldr pc, .Lsvchandler /* 0x08: Software interrupt */
ldr pc, .Lsvchandler /* 0x08: Software interrupt */
ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
ldr pc, .Ldataaborthandler /* 0x10: Data abort */
ldr pc, .Laddrexcptnhandler /* 0x14: Address exception (reserved) */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl __start
.globl arm_vectorundefinsn
.globl arm_vectorsvc
.globl arm_vectorprefetch

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -92,21 +92,21 @@
cp15_clean_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
/* Loop, cleaning each cache line by writing its contents to memory */
1:
mcr CP15_DCCMVAC(r0) /* Clean data cache line to PoC by VA */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -93,37 +93,37 @@
.type cp15_coherent_dcache, function
cp15_coherent_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
/* Loop, flushing each D cache line to memory */
1:
mcr CP15_DCCMVAU(r12) /* Clean data or unified cache line by VA to PoU */
add r12, r12, r2 /* R12=Next cache line */
cmp r12, r1 /* Loop until all cache lines have been cleaned */
cmp r12, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb
mrc CP15_CTR(r3) /* Read the Cache Type Register */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
and r3, r3, #0xf /* Isolate the IminLine field */
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
/* Loop, invalidating each I cache line to memory */
1:
mcr CP15_ICIMVAU(r12) /* Invalidate instruction cache by VA to PoU */
add r12, r12, r2 /* R12=Next cache line */
cmp r12, r1 /* Loop until all cache lines have been invalidated */
cmp r12, r1 /* Loop until all cache lines have been invalidated */
blo 1b
mov r0, #0

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -93,20 +93,20 @@
cp15_flush_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
/* Loop, cleaning and invaliding each D cache line in the address range */
1:
mcr CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -93,27 +93,27 @@
cp15_invalidate_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
sub r3, r2, #1 /* R3=Cache line size mask */
tst r0, r3
bic r0, r0, r3 /* R0=aligned start address */
bic r0, r0, r3 /* R0=aligned start address */
mcrne CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
mcrne CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
tst r1, r3
bic r1, r1, r3 /* R0=aligned end address */
mcrne CP15_DCCIMVAC(r1) /* Clean and invalidate data cache line by VA to PoC */
bic r1, r1, r3 /* R0=aligned end address */
mcrne CP15_DCCIMVAC(r1) /* Clean and invalidate data cache line by VA to PoC */
/* Loop, invalidating each D cache line */
1:
mcr CP15_DCIMVAC(r0) /* Invalidate data cache line by VA to PoC */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been invalidate */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been invalidate */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -91,24 +91,24 @@
cp15_invalidate_dcache_all:
mrc CP15_CCSIDR(r0) /* Read the Cache Size Identification Register */
ldr r3, =0xffff /* Isolate the NumSets field (bits 13-27) */
ldr r3, =0xffff /* Isolate the NumSets field (bits 13-27) */
and r0, r3, r0, lsr #13 /* r0=NumSets (number of sets - 1) */
mov r1, #0 /* r1 = way loop counter */
mov r1, #0 /* r1 = way loop counter */
way_loop:
mov r3, #0 /* r3 = set loop counter */
mov r3, #0 /* r3 = set loop counter */
set_loop:
mov r2, r1, lsl #30 /* r2 = way loop counter << 30 */
orr r2, r3, lsl #5 /* r2 = set/way cache operation format */
mcr CP15_DCISW(r2) /* Data Cache Invalidate by Set/Way */
add r3, r3, #1 /* Increment set counter */
cmp r0, r3 /* Last set? */
bne set_loop /* Keep looping if not */
add r3, r3, #1 /* Increment set counter */
cmp r0, r3 /* Last set? */
bne set_loop /* Keep looping if not */
add r1, r1, #1 /* Increment the way counter */
cmp r1, #4 /* Last way? (four ways assumed) */
bne way_loop /* Keep looping if not */
add r1, r1, #1 /* Increment the way counter */
cmp r1, #4 /* Last way? (four ways assumed) */
bne way_loop /* Keep looping if not */
dsb
bx lr

View file

@ -26,8 +26,6 @@
#include "arm_vfork.h"
.file "vfork.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
@ -36,6 +34,7 @@
* Public Symbols
****************************************************************************/
.file "vfork.S"
.globl up_vfork
/****************************************************************************
@ -89,7 +88,7 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -1,7 +1,8 @@
/****************************************************************************
* arch/arm/src/armv7-m/gnu/arm_exception.S
*
* Copyright (C) 2009-2013, 2015-2016, 2018 Gregory Nutt. All rights reserved.
* Copyright (C) 2009-2013, 2015-2016, 2018 Gregory Nutt.
* All rights reserved.
* Copyright (C) 2012 Michael Smith. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
@ -49,10 +50,11 @@
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ********************************************************************/
/* Configuration ************************************************************/
#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
/* In kernel mode without an interrupt stack, this interrupt handler will set the
/* In protected mode without an interrupt stack, this interrupt handler will set the
* MSP to the stack pointer of the interrupted thread. If the interrupted thread
* was a privileged thread, that will be the MSP otherwise it will be the PSP. If
* the PSP is used, then the value of the MSP will be invalid when the interrupt
@ -68,7 +70,7 @@
*/
# if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 8
# error Interrupt stack must be used with high priority interrupts in kernel mode
# error Interrupt stack must be used with high priority interrupts in protected mode
# endif
/* Use the BASEPRI to control interrupts is required if nested, high
@ -135,8 +137,8 @@
*/
.text
.type exception_common, function
.thumb_func
.type exception_common, function
exception_common:
mrs r0, ipsr /* R0=exception number */
@ -145,15 +147,14 @@ exception_common:
/* The EXC_RETURN value tells us whether the context is on the MSP or PSP */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
mrs r1, psp /* R1=The process stack pointer (PSP) */
mov sp, r1 /* Set the MSP to the PSP */
mov sp, r1 /* Set the MSP to the PSP */
1:
mov r2, sp /* R2=Copy of the main/process stack pointer */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
/* (ignoring the xPSR[9] alignment bit) */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
/* (ignoring the xPSR[9] alignment bit) */
#ifdef CONFIG_ARMV7M_USEBASEPRI
mrs r3, basepri /* R3=Current BASEPRI setting */
#else
@ -173,11 +174,11 @@ exception_common:
* where to put the registers.
*/
vstmdb sp!, {s16-s31} /* Save the non-volatile FP context */
vstmdb sp!, {s16-s31} /* Save the non-volatile FP context */
#endif
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP/PRIMASK values */
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP/PRIMASK values */
/* There are two arguments to arm_doirq:
*
@ -197,7 +198,7 @@ exception_common:
* here prohibits nested interrupts without some additional logic!
*/
setintstack r2, r3 /* SP = IRQ stack top */
setintstack r2, r3 /* SP = IRQ stack top */
#else
/* Otherwise, we will re-use the interrupted thread's stack. That may
@ -219,7 +220,7 @@ exception_common:
*/
cmp r0, r1 /* Context switch? */
beq 2f /* Branch if no context switch */
beq 2f /* Branch if no context switch */
/* We are returning with a pending context switch. This case is different
* because in this case, the register save structure does not lie on the
@ -236,32 +237,32 @@ exception_common:
*/
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1!, {r4-r11} /* Fetch eight registers in HW save area */
ldmia r1!, {r4-r11} /* Fetch eight registers in HW save area */
#ifdef CONFIG_ARCH_FPU
vldmia r1!, {s0-s15} /* Fetch sixteen FP registers in HW save area */
ldmia r1, {r2-r3} /* Fetch FPSCR and Reserved in HW save area */
vldmia r1!, {s0-s15} /* Fetch sixteen FP registers in HW save area */
ldmia r1, {r2-r3} /* Fetch FPSCR and Reserved in HW save area */
#endif
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
#ifdef CONFIG_ARCH_FPU
stmdb r1!, {r2-r3} /* Store FPSCR and Reserved on the return stack */
vstmdb r1!, {s0-s15} /* Store sixteen FP registers on the return stack */
stmdb r1!, {r2-r3} /* Store FPSCR and Reserved on the return stack */
vstmdb r1!, {s0-s15} /* Store sixteen FP registers on the return stack */
#endif
stmdb r1!, {r4-r11} /* Store eight registers on the return stack */
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
stmdb r1!, {r4-r11} /* Store eight registers on the return stack */
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#ifdef CONFIG_ARCH_FPU
vldmia r0, {s16-s31} /* Recover S16-S31 */
vldmia r0, {s16-s31} /* Recover S16-S31 */
#endif
b 3f /* Re-join common logic */
b 3f /* Re-join common logic */
2:
/* We are returning with no context switch. We simply need to "unwind"
* the same stack frame that we created at entry.
*/
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#ifdef CONFIG_ARCH_FPU
vldmia r1!, {s16-s31} /* Recover S16-S31 */
vldmia r1!, {s16-s31} /* Recover S16-S31 */
#endif
3:
@ -275,8 +276,8 @@ exception_common:
*/
mrs r2, control /* R2=Contents of the control register */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
orr r2, r2, #1 /* Unprivileged mode */
msr psp, r1 /* R1=The process stack pointer */
@ -287,10 +288,10 @@ exception_common:
5:
msr control, r2 /* Save the updated control register */
#else
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
ite eq /* next two instructions conditional */
msreq msp, r1 /* R1=The main stack pointer */
msrne psp, r1 /* R1=The process stack pointer */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
ite eq /* Next two instructions conditional */
msreq msp, r1 /* R1=The main stack pointer */
msrne psp, r1 /* R1=The process stack pointer */
#endif
/* Restore the interrupt state */
@ -305,7 +306,7 @@ exception_common:
* return to thread mode, and (2) select the correct stack.
*/
bx r14 /* And return */
bx r14 /* And return */
.size exception_common, .-exception_common

View file

@ -26,7 +26,7 @@
.syntax unified
.thumb
.file "arm_fetchadd.S"
.file "arm_fetchadd.S"
/****************************************************************************
* Public Functions
@ -57,15 +57,15 @@
up_fetchadd32:
1:
ldrex r2, [r0] /* Fetch the value to be incremented */
ldrex r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd32, . - up_fetchadd32
/****************************************************************************
@ -91,15 +91,15 @@ up_fetchadd32:
up_fetchsub32:
1:
ldrex r2, [r0] /* Fetch the value to be decremented */
ldrex r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub32, . - up_fetchsub32
/****************************************************************************
@ -125,15 +125,15 @@ up_fetchsub32:
up_fetchadd16:
1:
ldrexh r2, [r0] /* Fetch the value to be incremented */
ldrexh r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd16, . - up_fetchadd16
/****************************************************************************
@ -159,17 +159,17 @@ up_fetchadd16:
up_fetchsub16:
1:
ldrexh r2, [r0] /* Fetch the value to be decremented */
ldrexh r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
/* Attempt to save the decremented value */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub16, . - up_fetchsub16
/****************************************************************************
@ -195,15 +195,15 @@ up_fetchsub16:
up_fetchadd8:
1:
ldrexb r2, [r0] /* Fetch the value to be incremented */
ldrexb r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd8, . - up_fetchadd8
/****************************************************************************
@ -229,14 +229,14 @@ up_fetchadd8:
up_fetchsub8:
1:
ldrexb r2, [r0] /* Fetch the value to be decremented */
ldrexb r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub8, . - up_fetchsub8
.end

View file

@ -84,73 +84,73 @@ arm_savefpu:
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#else
/* Store all floating point registers */
#if 1 /* Use store multiple */
fstmias r1!, {s0-s31} /* Save the full FP context */
fstmias r1!, {s0-s31} /* Save the full FP context */
#else
vmov r2, r3, d0 /* r2, r3 = d0 */
vmov r2, r3, d0 /* r2, r3 = d0 */
str r2, [r1], #4 /* Save S0 and S1 values */
str r3, [r1], #4
vmov r2, r3, d1 /* r2, r3 = d1 */
vmov r2, r3, d1 /* r2, r3 = d1 */
str r2, [r1], #4 /* Save S2 and S3 values */
str r3, [r1], #4
vmov r2, r3, d2 /* r2, r3 = d2 */
vmov r2, r3, d2 /* r2, r3 = d2 */
str r2, [r1], #4 /* Save S4 and S5 values */
str r3, [r1], #4
vmov r2, r3, d3 /* r2, r3 = d3 */
vmov r2, r3, d3 /* r2, r3 = d3 */
str r2, [r1], #4 /* Save S6 and S7 values */
str r3, [r1], #4
vmov r2, r3, d4 /* r2, r3 = d4 */
vmov r2, r3, d4 /* r2, r3 = d4 */
str r2, [r1], #4 /* Save S8 and S9 values */
str r3, [r1], #4
vmov r2, r3, d5 /* r2, r3 = d5 */
vmov r2, r3, d5 /* r2, r3 = d5 */
str r2, [r1], #4 /* Save S10 and S11 values */
str r3, [r1], #4
vmov r2, r3, d6 /* r2, r3 = d6 */
vmov r2, r3, d6 /* r2, r3 = d6 */
str r2, [r1], #4 /* Save S12 and S13 values */
str r3, [r1], #4
vmov r2, r3, d7 /* r2, r3 = d7 */
vmov r2, r3, d7 /* r2, r3 = d7 */
str r2, [r1], #4 /* Save S14 and S15 values */
str r3, [r1], #4
vmov r2, r3, d8 /* r2, r3 = d8 */
vmov r2, r3, d8 /* r2, r3 = d8 */
str r2, [r1], #4 /* Save S16 and S17 values */
str r3, [r1], #4
vmov r2, r3, d9 /* r2, r3 = d9 */
vmov r2, r3, d9 /* r2, r3 = d9 */
str r2, [r1], #4 /* Save S18 and S19 values */
str r3, [r1], #4
vmov r2, r3, d10 /* r2, r3 = d10 */
vmov r2, r3, d10 /* r2, r3 = d10 */
str r2, [r1], #4 /* Save S20 and S21 values */
str r3, [r1], #4
vmov r2, r3, d11 /* r2, r3 = d11 */
vmov r2, r3, d11 /* r2, r3 = d11 */
str r2, [r1], #4 /* Save S22 and S23 values */
str r3, [r1], #4
vmov r2, r3, d12 /* r2, r3 = d12 */
vmov r2, r3, d12 /* r2, r3 = d12 */
str r2, [r1], #4 /* Save S24 and S25 values */
str r3, [r1], #4
vmov r2, r3, d13 /* r2, r3 = d13 */
vmov r2, r3, d13 /* r2, r3 = d13 */
str r2, [r1], #4 /* Save S26 and S27 values */
str r3, [r1], #4
vmov r2, r3, d14 /* r2, r3 = d14 */
vmov r2, r3, d14 /* r2, r3 = d14 */
str r2, [r1], #4 /* Save S28 and S29 values */
str r3, [r1], #4
vmov r2, r3, d15 /* r2, r3 = d15 */
vmov r2, r3, d15 /* r2, r3 = d15 */
str r2, [r1], #4 /* Save S30 and S31 values */
str r3, [r1], #4
#endif
/* Store the floating point control and status register */
fmrx r2, fpscr /* Fetch the FPCSR */
fmrx r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#endif
bx lr
@ -190,70 +190,70 @@ arm_restorefpu:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
#else
/* Load all floating point registers Registers are loaded in numeric order,
* s0, s1, ... in increasing address order.
*/
#if 1 /* Use load multiple */
fldmias r1!, {s0-s31} /* Restore the full FP context */
fldmias r1!, {s0-s31} /* Restore the full FP context */
#else
ldr r2, [r1], #4 /* Fetch S0 and S1 values */
ldr r3, [r1], #4
vmov d0, r2, r3 /* Save as d0 */
vmov d0, r2, r3 /* Save as d0 */
ldr r2, [r1], #4 /* Fetch S2 and S3 values */
ldr r3, [r1], #4
vmov d1, r2, r3 /* Save as d1 */
vmov d1, r2, r3 /* Save as d1 */
ldr r2, [r1], #4 /* Fetch S4 and S5 values */
ldr r3, [r1], #4
vmov d2, r2, r3 /* Save as d2 */
vmov d2, r2, r3 /* Save as d2 */
ldr r2, [r1], #4 /* Fetch S6 and S7 values */
ldr r3, [r1], #4
vmov d3, r2, r3 /* Save as d3 */
vmov d3, r2, r3 /* Save as d3 */
ldr r2, [r1], #4 /* Fetch S8 and S9 values */
ldr r3, [r1], #4
vmov d4, r2, r3 /* Save as d4 */
vmov d4, r2, r3 /* Save as d4 */
ldr r2, [r1], #4 /* Fetch S10 and S11 values */
ldr r3, [r1], #4
vmov d5, r2, r3 /* Save as d5 */
vmov d5, r2, r3 /* Save as d5 */
ldr r2, [r1], #4 /* Fetch S12 and S13 values */
ldr r3, [r1], #4
vmov d6, r2, r3 /* Save as d6 */
vmov d6, r2, r3 /* Save as d6 */
ldr r2, [r1], #4 /* Fetch S14 and S15 values */
ldr r3, [r1], #4
vmov d7, r2, r3 /* Save as d7 */
vmov d7, r2, r3 /* Save as d7 */
ldr r2, [r1], #4 /* Fetch S16 and S17 values */
ldr r3, [r1], #4
vmov d8, r2, r3 /* Save as d8 */
vmov d8, r2, r3 /* Save as d8 */
ldr r2, [r1], #4 /* Fetch S18 and S19 values */
ldr r3, [r1], #4
vmov d9, r2, r3 /* Save as d9 */
vmov d9, r2, r3 /* Save as d9 */
ldr r2, [r1], #4 /* Fetch S20 and S21 values */
ldr r3, [r1], #4
vmov d10, r2, r3 /* Save as d10 */
vmov d10, r2, r3 /* Save as d10 */
ldr r2, [r1], #4 /* Fetch S22 and S23 values */
ldr r3, [r1], #4
vmov d11, r2, r3 /* Save as d11 */
vmov d11, r2, r3 /* Save as d11 */
ldr r2, [r1], #4 /* Fetch S24 and S25 values */
ldr r3, [r1], #4
vmov d12, r2, r3 /* Save as d12 */
vmov d12, r2, r3 /* Save as d12 */
ldr r2, [r1], #4 /* Fetch S26 and S27 values */
ldr r3, [r1], #4
vmov d13, r2, r3 /* Save as d13 */
vmov d13, r2, r3 /* Save as d13 */
ldr r2, [r1], #4 /* Fetch S28 and S29 values */
ldr r3, [r1], #4
vmov d14, r2, r3 /* Save as d14 */
vmov d14, r2, r3 /* Save as d14 */
ldr r2, [r1], #4 /* Fetch S30 and S31 values */
ldr r3, [r1], #4
vmov d15, r2, r3 /* Save as d15 */
vmov d15, r2, r3 /* Save as d15 */
#endif
/* Load the floating point control and status register. r1 points t
@ -261,7 +261,7 @@ arm_restorefpu:
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
fmxr fpscr, r2 /* Restore the FPCSR */
fmxr fpscr, r2 /* Restore the FPCSR */
#endif
bx lr

View file

@ -68,12 +68,12 @@ arm_fullcontextrestore:
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_restore_context /* R0: restore context */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -37,7 +37,7 @@
/* Configuration ************************************************************/
#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
/* In kernel mode without an interrupt stack, this interrupt handler will set the MSP to the
/* In protected mode without an interrupt stack, this interrupt handler will set the MSP to the
* stack pointer of the interrupted thread. If the interrupted thread was a privileged
* thread, that will be the MSP otherwise it will be the PSP. If the PSP is used, then the
* value of the MSP will be invalid when the interrupt handler returns because it will be a
@ -51,7 +51,7 @@
*/
# if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 8
# error Interrupt stack must be used with high priority interrupts in kernel mode
# error Interrupt stack must be used with high priority interrupts in protected mode
# endif
/* Use the BASEPRI to control interrupts is required if nested, high
@ -115,13 +115,13 @@
*/
.text
.type exception_common, function
.type exception_common, function
exception_common:
/* Get the IRQ number from the IPSR */
mrs r0, ipsr /* R0=exception number */
mrs r0, ipsr /* R0=exception number */
/* Complete the context save */
@ -131,21 +131,21 @@ exception_common:
* EXC_RETURN is 0xfffffffd (unprivileged thread)
*/
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
tst r14, #EXC_RETURN_PROCESS_STACK /* Nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
mrs r1, psp /* R1=The process stack pointer (PSP) */
mov sp, r1 /* Set the MSP to the PSP */
mov sp, r1 /* Set the MSP to the PSP */
1:
#endif
/* r1 holds the value of the stack pointer AFTER the exception handling logic
/* sp holds the value of the stack pointer AFTER the exception handling logic
* pushed the various registers onto the stack. Get r2 = the value of the
* stack pointer BEFORE the interrupt modified it.
*/
mov r2, sp /* R2=Copy of the main/process stack pointer */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
#ifdef CONFIG_ARMV7M_USEBASEPRI
mrs r3, basepri /* R3=Current BASEPRI setting */
@ -169,9 +169,9 @@ exception_common:
*/
#ifdef CONFIG_BUILD_PROTECTED
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP value */
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP value */
#else
stmdb sp!, {r2-r11} /* Save the remaining registers plus the SP value */
stmdb sp!, {r2-r11} /* Save the remaining registers plus the SP value */
#endif
/* There are two arguments to arm_doirq:
@ -214,7 +214,7 @@ exception_common:
*/
cmp r0, r1 /* Context switch? */
beq 2f /* Branch if no context switch */
beq 2f /* Branch if no context switch */
/* We are returning with a pending context switch.
*
@ -241,16 +241,16 @@ exception_common:
* values to the stack.
*/
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1, {r4-r11} /* Fetch eight registers in HW save area */
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
stmdb r1!, {r4-r11} /* Store eight registers in HW save area */
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1, {r4-r11} /* Fetch eight registers in HW save area */
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
stmdb r1!, {r4-r11} /* Store eight registers in HW save area */
#ifdef CONFIG_BUILD_PROTECTED
ldmia r0, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r0, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r0, {r2-r11} /* Recover R4-R11 + 2 temp values */
ldmia r0, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
b 3f /* Re-join common logic */
b 3f /* Re-join common logic */
/* We are returning with no context switch. We simply need to "unwind"
* the same stack frame that we created
@ -261,9 +261,9 @@ exception_common:
2:
#ifdef CONFIG_BUILD_PROTECTED
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r1!, {r2-r11} /* Recover R4-R11 + 2 temp values */
ldmia r1!, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
#ifdef CONFIG_ARCH_FPU
@ -292,8 +292,8 @@ exception_common:
*/
mrs r2, control /* R2=Contents of the control register */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
orr r2, r2, #1 /* Unprivileged mode */
msr psp, r1 /* R1=The process stack pointer */
@ -310,7 +310,7 @@ exception_common:
* actually occurs with interrupts still disabled).
*/
ldr r14, =EXC_RETURN_PRIVTHR /* Load the special value */
ldr r14, =EXC_RETURN_PRIVTHR /* Load the special value */
#endif
/* Restore the interrupt state */

View file

@ -70,9 +70,9 @@ arm_saveusercontext:
/* Perform the System call with R0=0 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_save_context /* R0: save context (also return value) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* There are two return conditions. On the first return, R0 (the
* return value will be zero. On the second return we need to
@ -82,7 +82,7 @@ arm_saveusercontext:
add r2, r1, #(4*REG_R0)
mov r3, #1
str r3, [r2, #0]
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -18,11 +18,6 @@
*
****************************************************************************/
/* When this file is assembled, it will require the following GCC options:
*
* -mcpu=cortex-m4 -mfloat-abi=hard -mfpu=fpv4-sp-d16 -meabi=5 -mthumb
*/
/****************************************************************************
* Included Files
****************************************************************************/
@ -37,12 +32,12 @@
* Public Symbols
****************************************************************************/
.globl setjmp
.globl longjmp
.globl setjmp
.globl longjmp
.syntax unified
.thumb
.file "setjmp.S"
.syntax unified
.thumb
.file "setjmp.S"
/****************************************************************************
* Public Functions
@ -69,33 +64,33 @@
*
****************************************************************************/
.thumb_func
.type setjmp, function
.thumb_func
.type setjmp, function
setjmp:
/* Store callee-saved Core registers */
/* Store callee-saved Core registers */
mov ip, sp /* move sp to ip so we can save it */
stmia r0!, {r4-r11, ip, lr}
mov ip, sp /* Move sp to ip so we can save it */
stmia r0!, {r4-r11, ip, lr}
#ifdef CONFIG_ARCH_FPU
vstmia r0!, {s16-s31} /* Save the callee-saved FP registers */
vstmia r0!, {s16-s31} /* Save the callee-saved FP registers */
/* Store the floating point control and status register. At the end of the
* vstmia, r0 will point to the FPCSR storage location.
*/
/* Store the floating point control and status register. At the end of the
* vstmia, r0 will point to the FPCSR storage location.
*/
vmrs r1, fpscr /* Fetch the FPCSR */
str r1, [r0], #4 /* Save the floating point control and status register */
// DSA: don't need to inc r0
vmrs r1, fpscr /* Fetch the FPCSR */
str r1, [r0], #4 /* Save the floating point control and status register */
/* DSA: don't need to inc r0 */
#endif /* CONFIG_ARCH_FPU */
/* we're done, we're out of here */
/* we're done, we're out of here */
mov r0, #0
bx lr
mov r0, #0
bx lr
.size setjmp, .-setjmp
.size setjmp, .-setjmp
/****************************************************************************
* Name: longjmp
@ -119,29 +114,29 @@ setjmp:
*
****************************************************************************/
.thumb_func
.type longjmp, function
.thumb_func
.type longjmp, function
longjmp:
/* Load callee-saved Core registers */
/* Load callee-saved Core registers */
ldmia r0!, {r4-r11, ip, lr}
mov sp, ip /* restore sp */
ldmia r0!, {r4-r11, ip, lr}
mov sp, ip /* Restore sp */
#ifdef CONFIG_ARCH_FPU
/* Load callee-saved floating point registers. */
/* Load callee-saved floating point registers. */
vldmia r0!, {s16-s31} /* Restore FP context */
vldmia r0!, {s16-s31} /* Restore FP context */
/* Load the floating point control and status register. */
/* Load the floating point control and status register. */
ldr r2, [r0], #4 /* Fetch the floating point control and status register */
/* DSA: don't need to inc r0 */
vmsr fpscr, r2 /* Restore the FPCSR */
ldr r2, [r0], #4 /* Fetch the floating point control and status register */
/* DSA: don't need to inc r0 */
vmsr fpscr, r2 /* Restore the FPCSR */
#endif /* CONFIG_ARCH_FPU */
mov r0, r1 /* return val */
bx lr
mov r0, r1 /* return val */
bx lr
.size longjmp, .-longjmp
.end
.size longjmp, .-longjmp
.end

View file

@ -76,7 +76,7 @@ up_signal_handler:
/* Save some register */
push {lr} /* Save LR on the stack */
push {lr} /* Save LR on the stack */
/* Call the signal handler */
@ -84,7 +84,7 @@ up_signal_handler:
mov r0, r1 /* R0=signo */
mov r1, r2 /* R1=info */
mov r2, r3 /* R2=ucontext */
blx ip /* Call the signal handler */
blx ip /* Call the signal handler */
/* Restore the registers */

View file

@ -71,8 +71,8 @@ arm_switchcontext:
mov r2, r1 /* R2: restoreregs */
mov r1, r0 /* R1: saveregs */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* We will get here only after the rerturn from the context switch */

View file

@ -25,7 +25,7 @@
#include <nuttx/config.h>
#include <arch/spinlock.h>
.syntax unified
.syntax unified
.thumb
.file "arm_testset.S"
@ -83,19 +83,19 @@ up_testset:
/* Test if the spinlock is locked or not */
1:
ldrexb r2, [r0] /* Test if spinlock is locked or not */
ldrexb r2, [r0] /* Test if spinlock is locked or not */
cmp r2, r1 /* Already locked? */
beq 2f /* If already locked, return SP_LOCKED */
beq 2f /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
strexb r2, r1, [r0] /* Attempt to set the locked state */
strexb r2, r1, [r0] /* Attempt to set the locked state */
cmp r2, r1 /* r2 will be 1 is strexb failed */
beq 1b /* Failed to lock... try again */
beq 1b /* Failed to lock... try again */
/* Lock acquired -- return SP_UNLOCKED */
dmb /* Required before accessing protected resource */
dmb /* Required before accessing protected resource */
mov r0, #SP_UNLOCKED
bx lr

View file

@ -47,20 +47,21 @@
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the behavior is
* undefined if the process created by vfork() either modifies any data other than
* a variable of type pid_t used to store the return value from vfork(), or returns
* from the function in which vfork() was called, or calls any other function before
* successfully calling _exit() or one of the exec family of functions.
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the vfork()
* context as an argument. The overall sequence is:
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork()and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB. This
* consists of:
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
@ -77,10 +78,10 @@
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and returns
* the process ID of the child process to the parent process. Otherwise, -1 is
* returned to the parent, no child process is created, and errno is set to
* indicate the error.
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
@ -90,7 +91,7 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -63,15 +63,15 @@
up_fetchadd32:
ldrex r2, [r0] /* Fetch the value to be incremented */
ldrex r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne up_fetchadd32 /* Failed to lock... try again */
bne up_fetchadd32 /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
/****************************************************************************
* Name: up_fetchsub32
@ -92,15 +92,15 @@ up_fetchadd32:
up_fetchsub32:
ldrex r2, [r0] /* Fetch the value to be decremented */
ldrex r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne up_fetchsub32 /* Failed to lock... try again */
bne up_fetchsub32 /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
/****************************************************************************
* Name: up_fetchadd16
@ -121,15 +121,15 @@ up_fetchsub32:
up_fetchadd16:
ldrexh r2, [r0] /* Fetch the value to be incremented */
ldrexh r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne up_fetchadd16 /* Failed to lock... try again */
bne up_fetchadd16 /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
/****************************************************************************
* Name: up_fetchsub16
@ -150,17 +150,17 @@ up_fetchadd16:
up_fetchsub16:
ldrexh r2, [r0] /* Fetch the value to be decremented */
ldrexh r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
/* Attempt to save the decremented value */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne up_fetchsub16 /* Failed to lock... try again */
bne up_fetchsub16 /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
/****************************************************************************
* Name: up_fetchadd8
@ -181,15 +181,15 @@ up_fetchsub16:
up_fetchadd8:
ldrexb r2, [r0] /* Fetch the value to be incremented */
ldrexb r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne up_fetchadd8 /* Failed to lock... try again */
bne up_fetchadd8 /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
/****************************************************************************
* Name: up_fetchsub8
@ -210,14 +210,14 @@ up_fetchadd8:
up_fetchsub8:
ldrexb r2, [r0] /* Fetch the value to be decremented */
ldrexb r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne up_fetchsub8 /* Failed to lock... try again */
bne up_fetchsub8 /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
END

View file

@ -68,12 +68,12 @@ arm_fullcontextrestore:
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_restore_context /* R0: restore context */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
END

View file

@ -69,9 +69,9 @@ arm_saveusercontext:
/* Perform the System call with R0=0 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_save_context /* R0: save context (also return value) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* There are two return conditions. On the first return, R0 (the
* return value will be zero. On the second return we need to
@ -81,6 +81,6 @@ arm_saveusercontext:
add r2, r1, #(4*REG_R0)
mov r3, #1
str r3, [r2, #0]
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
END

View file

@ -71,8 +71,8 @@ arm_switchcontext:
mov r2, r1 /* R2: restoreregs */
mov r1, r0 /* R1: saveregs */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* We will get here only after the rerturn from the context switch */

View file

@ -28,12 +28,24 @@
MODULE up_testset
SECTION .text:CODE:NOROOT(2)
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
PUBLIC up_testset
/****************************************************************************
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
@ -67,19 +79,19 @@ up_testset:
/* Test if the spinlock is locked or not */
L1:
ldrexb r2, [r0] /* Test if spinlock is locked or not */
ldrexb r2, [r0] /* Test if spinlock is locked or not */
cmp r2, r1 /* Already locked? */
beq L2 /* If already locked, return SP_LOCKED */
beq L2 /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
strexb r2, r1, [r0] /* Attempt to set the locked state */
strexb r2, r1, [r0] /* Attempt to set the locked state */
cmp r2, r1 /* r2 will be 1 is strexb failed */
beq 1b /* Failed to lock... try again */
beq L1 /* Failed to lock... try again */
/* Lock acquired -- return SP_UNLOCKED */
dmb /* Required before accessing protected resource */
dmb /* Required before accessing protected resource */
mov r0, #SP_UNLOCKED
bx lr

View file

@ -48,20 +48,21 @@
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the behavior is
* undefined if the process created by vfork() either modifies any data other than
* a variable of type pid_t used to store the return value from vfork(), or returns
* from the function in which vfork() was called, or calls any other function before
* successfully calling _exit() or one of the exec family of functions.
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the vfork()
* context as an argument. The overall sequence is:
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork()and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB. This
* consists of:
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
@ -78,10 +79,10 @@
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and returns
* the process ID of the child process to the parent process. Otherwise, -1 is
* returned to the parent, no child process is created, and errno is set to
* indicate the error.
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
@ -90,7 +91,7 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -24,7 +24,7 @@
#include <nuttx/config.h>
.file "arm_fetchadd.S"
.file "arm_fetchadd.S"
/****************************************************************************
* Public Functions
@ -55,15 +55,15 @@
up_fetchadd32:
1:
ldrex r2, [r0] /* Fetch the value to be incremented */
ldrex r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strex failed */
bne 1b /* Failed to lock... try again */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd32, . - up_fetchadd32
/****************************************************************************
@ -89,15 +89,15 @@ up_fetchadd32:
up_fetchsub32:
1:
ldrex r2, [r0] /* Fetch the value to be decremented */
ldrex r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strex failed */
bne 1b /* Failed to lock... try again */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub32, . - up_fetchsub32
/****************************************************************************
@ -123,15 +123,15 @@ up_fetchsub32:
up_fetchadd16:
1:
ldrexh r2, [r0] /* Fetch the value to be incremented */
ldrexh r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexh failed */
bne 1b /* Failed to lock... try again */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd16, . - up_fetchadd16
/****************************************************************************
@ -157,17 +157,17 @@ up_fetchadd16:
up_fetchsub16:
1:
ldrexh r2, [r0] /* Fetch the value to be decremented */
ldrexh r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
/* Attempt to save the decremented value */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexh failed */
bne 1b /* Failed to lock... try again */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub16, . - up_fetchsub16
/****************************************************************************
@ -193,15 +193,15 @@ up_fetchsub16:
up_fetchadd8:
1:
ldrexb r2, [r0] /* Fetch the value to be incremented */
ldrexb r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexb failed */
bne 1b /* Failed to lock... try again */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd8, . - up_fetchadd8
/****************************************************************************
@ -227,14 +227,14 @@ up_fetchadd8:
up_fetchsub8:
1:
ldrexb r2, [r0] /* Fetch the value to be decremented */
ldrexb r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r2 will be 1 is strexb failed */
bne 1b /* Failed to lock... try again */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub8, . - up_fetchsub8
.end

View file

@ -25,6 +25,13 @@
#include <nuttx/config.h>
#include "cp15.h"
#ifdef CONFIG_ARCH_FPU
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
@ -36,17 +43,24 @@
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_fpuconfig.S"
/****************************************************************************
* Public Functions
* Assembly Macros
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
.text
/****************************************************************************
* Name: sam_fpuconfig
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: arm_fpuconfig
*
* Description:
* Configure the FPU. Enables access to CP10 and CP11
@ -69,9 +83,10 @@ arm_fpuconfig:
/* Set FPEXC.EN (B30) */
fmrx r0, fpexc
fmrx r0, fpexc
orr r0, r0, #0x40000000
fmxr fpexc, r0
fmxr fpexc, r0
bx lr
.size arm_fpuconfig, . - arm_fpuconfig
#endif
.end

View file

@ -23,11 +23,9 @@
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm_internal.h"
#include "svcall.h"
#include <arch/irq.h>
.file "arm_fullcontextrestore.S"
#include "svcall.h"
/****************************************************************************
* Pre-processor Definitions
@ -37,28 +35,28 @@
* Public Symbols
****************************************************************************/
.globl arm_fullcontextrestore
#ifdef CONFIG_ARCH_FPU
.cpu cortex-r4f
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_fullcontextrestore.S"
/****************************************************************************
* Macros
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: arm_fullcontextrestore
*
* Description:
* Restore the specified task context. Full prototype is:
* Restore the current thread context. Full prototype is:
*
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
* void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
*
* Returned Value:
* None
@ -67,7 +65,6 @@
.globl arm_fullcontextrestore
.type arm_fullcontextrestore, function
arm_fullcontextrestore:
/* On entry, a1 (r0) holds address of the register save area. All other
@ -86,14 +83,14 @@ arm_fullcontextrestore:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
#endif
#ifdef CONFIG_BUILD_PROTECTED
@ -105,13 +102,13 @@ arm_fullcontextrestore:
/* Perform the System call with R0=SYS_context_restore, R1=restoreregs */
mov r1, r0 /* R1: restoreregs */
mov r1, r0 /* R1: restoreregs */
mov r0, #SYS_context_restore /* R0: SYS_context_restore syscall */
svc #0x900001 /* Perform the system call */
svc #0x900001 /* Perform the system call */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
#else
/* For a flat build, we can do all of this here... Just think of this as
@ -121,16 +118,16 @@ arm_fullcontextrestore:
/* Recover all registers except for r0, r1, R15, and CPSR */
add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
ldmia r1, {r2-r14} /* Recover registers */
ldmia r1, {r2-r14} /* Recover registers */
/* Create a stack frame to hold the some registers */
sub sp, sp, #(3*4) /* Frame for three registers */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
str r1, [sp, #4] /* Save it in the stack */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
str r1, [sp, #8] /* Save it at the bottom of the frame */
/* Now we can restore the CPSR. We wait until we are completely
@ -140,12 +137,13 @@ arm_fullcontextrestore:
* disabled.
*/
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
msr spsr_cxsf, r1 /* Set the SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
msr spsr_cxsf, r1 /* Set the SPSR */
/* Now recover r0-r1, pc and cpsr, destroying the stack frame */
ldmia sp!, {r0-r1, pc}^
#endif
.size arm_fullcontextrestore, . - arm_fullcontextrestore
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -28,10 +28,7 @@
#include "cp15.h"
#include "sctlr.h"
#include "arm_internal.h"
#include "arm_arch.h"
#include <arch/board/board.h>
.file "arm_head.S"
/****************************************************************************
* Configuration
@ -64,14 +61,14 @@
* the boot logic must:
*
* - Configure SDRAM (if present),
* - Copy ourself to RAM, and
* - Copy ourself to DRAM, and
* - Clear .bss section (data should be fully initialized)
*
* In this case, we assume that the logic within this file executes from FLASH.
*
* 3. There is bootloader that copies us to SDRAM (CONFIG_BOOT_RUNFROMFLASH=n &&
* CONFIG_BOOT_COPYTORAM=n). In this case SDRAM was initialized by the boot
* loader, and this boot logic must:
* 3. There is bootloader that copies us to SDRAM (but probably not to the beginning)
* (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
* was initialized by the boot loader, and this boot logic must:
*
* - Clear .bss section (data should be fully initialized)
*/
@ -128,17 +125,9 @@
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_head.S"
/***************************************************************************
* .text
***************************************************************************/
.text
/****************************************************************************
* OS Entry Point
* Name: __start
****************************************************************************/
/* We assume the bootloader has already initialized most of the h/w for
@ -146,9 +135,11 @@
* below.
*/
.text
.global __start
.type __start, #function
__start:
__start:
/* Make sure that we are in SVC mode with IRQs and FIQs disabled */
mov r0, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
@ -328,7 +319,7 @@ __start:
/* Then write the configured control register */
mcr CP15_SCTLR(r0) /* Write control reg */
.rept 12 /* Some CPUs want want lots of NOPs here */
.rept 12 /* Some CPUs want want lots of NOPs here */
nop
.endr
@ -398,12 +389,12 @@ arm_data_initialize:
/* Zero BSS */
adr r0, .Linitparms
ldmia r0, {r0, r1}
ldmia r0, {r0, r1}
mov r2, #0
mov r2, #0
1:
cmp r0, r1 /* Clear up to _bss_end_ */
strcc r2, [r0], #4
strcc r2, [r0], #4
bcc 1b
#ifdef CONFIG_BOOT_RUNFROMFLASH
@ -417,7 +408,7 @@ arm_data_initialize:
*/
adr r3, .Ldatainit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
2:
ldr r3, [r0], #4
@ -434,7 +425,7 @@ arm_data_initialize:
*/
adr r3, .Lfuncinit
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
3:
ldr r3, [r0], #4
@ -451,8 +442,8 @@ arm_data_initialize:
*/
adr r3, ..Lramfunc
ldmia r3, {r0, r1}
ldr r3, =up_clean_dcache
ldmia r3, {r0, r1}
ldr r3, =up_clean_dcache
b r3
#else
/* Otherwise return to the caller */
@ -465,15 +456,19 @@ arm_data_initialize:
bx lr
#endif
/* .text Data:
/***************************************************************************
* Text-section constants
***************************************************************************/
/* Text-section constants:
*
* _sbss is the start of the BSS region (see linker script)
* _ebss is the end of the BSS region (see linker script)
*
* Typical Configuration:
* The idle task stack starts at the end of BSS and is of size
* CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there
* until the end of memory. See g_idle_topstack below.
* The idle task stack usually starts at the end of BSS and is of size
* CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the
* end of memory. See g_idle_topstack below.
*/
.type .Linitparms, %object
@ -500,7 +495,7 @@ arm_data_initialize:
.size arm_data_initialize, . - arm_data_initialize
/***************************************************************************
* .rodata
* Data section variables
***************************************************************************/
/* This global variable is unsigned long g_idle_topstack and is
@ -509,10 +504,12 @@ arm_data_initialize:
*/
.section .rodata, "a"
.align 4
.type g_idle_topstack, object
g_idle_topstack:
.long IDLE_STACK_TOP
.size g_idle_topstack, .-g_idle_topstack
.align 4
.globl g_idle_topstack
.type g_idle_topstack, object
g_idle_topstack:
.long IDLE_STACK_TOP
.size g_idle_topstack, .-g_idle_topstack
.end

View file

@ -39,7 +39,6 @@
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_restorefpu.S"
/****************************************************************************
@ -79,14 +78,14 @@ arm_restorefpu:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
bx lr
.size arm_restorefpu, .-arm_restorefpu

View file

@ -76,13 +76,13 @@ arm_savefpu:
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
bx lr

View file

@ -22,36 +22,48 @@
* Included Files
****************************************************************************/
#include <nuttx/irq.h>
#include "arm_internal.h"
#include <nuttx/config.h>
#include <arch/irq.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl arm_saveusercontext
#ifdef CONFIG_ARCH_FPU
.cpu cortex-r4f
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_saveusercontext.S"
/****************************************************************************
* Macros
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: arm_saveusercontext
*
* Description:
* Save the current thread context. Full prototype is:
*
* int arm_saveusercontext(uint32_t *saveregs);
*
* Returned Value:
* 0: Normal return
* 1: Context switch return
*
****************************************************************************/
.globl arm_saveusercontext
.type arm_saveusercontext, function
arm_saveusercontext:
/* On entry, a1 (r0) holds address of struct xcptcontext */
@ -70,11 +82,11 @@ arm_saveusercontext:
*/
add r1, r0, #(4*REG_R4)
stmia r1, {r4-r14}
stmia r1, {r4-r14}
/* Save the current cpsr */
mrs r2, cpsr /* R3 = CPSR value */
mrs r2, cpsr /* R2 = CPSR value */
add r1, r0, #(4*REG_CPSR)
str r2, [r1]
@ -93,25 +105,25 @@ arm_saveusercontext:
*/
#ifdef CONFIG_ARCH_FPU
add r1, r0, #(4*REG_S0) /* R1=Address of FP register storage */
add r1, r0, #(4*REG_S0) /* R1=Address of FP register storage */
/* Store all floating point registers. Registers are stored in numeric order,
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#endif
/* Return 0 now indicating that this return is not a context switch */
mov r0, #0 /* Return value == 0 */
mov pc, lr /* Return */
.size arm_saveusercontext, . - arm_saveusercontext
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -32,13 +32,12 @@
* File info
****************************************************************************/
.file "arm_signal_handler.S"
#ifdef CONFIG_ARCH_FPU
.cpu cortex-r4f
#else
.cpu cortex-r4
#endif
.syntax unified
.file "arm_signal_handler.S"
/****************************************************************************
* Private Functions
@ -48,8 +47,6 @@
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: up_signal_handler
*
@ -73,13 +70,14 @@
*
****************************************************************************/
.text
.globl up_signal_handler
.type up_signal_handler, function
up_signal_handler:
/* Save some register */
push {lr} /* Save LR on the stack */
push {lr} /* Save LR on the stack */
/* Call the signal handler */
@ -87,7 +85,7 @@ up_signal_handler:
mov r0, r1 /* R0=signo */
mov r1, r2 /* R1=info */
mov r2, r3 /* R2=ucontext */
blx ip /* Call the signal handler */
blx ip /* Call the signal handler */
/* Restore the registers */

View file

@ -81,19 +81,19 @@ up_testset:
/* Test if the spinlock is locked or not */
1:
ldrexb r2, [r0] /* Test if spinlock is locked or not */
ldrexb r2, [r0] /* Test if spinlock is locked or not */
cmp r2, r1 /* Already locked? */
beq 2f /* If already locked, return SP_LOCKED */
beq 2f /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
strexb r2, r1, [r0] /* Attempt to set the locked state */
strexb r2, r1, [r0] /* Attempt to set the locked state */
cmp r2, r1 /* r2 will be 1 is strexb failed */
beq 1b /* Failed to lock... try again */
beq 1b /* Failed to lock... try again */
/* Lock acquired -- return SP_UNLOCKED */
dmb /* Required before accessing protected resource */
dmb /* Required before accessing protected resource */
mov r0, #SP_UNLOCKED
bx lr

View file

@ -24,7 +24,6 @@
#include <nuttx/config.h>
#include <nuttx/irq.h>
#include "arm_arch.h"
.file "arm_vectoraddrexcptn.S"

View file

@ -42,12 +42,15 @@
g_irqtmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
g_undeftmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
g_aborttmp:
.word 0 /* Saved lr */
.word 0 /* Saved spsr */
#ifdef CONFIG_ARMV7R_DECODEFIQ
g_fiqtmp:
.word 0 /* Saved lr */
@ -87,31 +90,31 @@ arm_vectorirq:
ldr r13, .Lirqtmp
sub lr, lr, #4
str lr, [r13] /* Save lr_IRQ */
str lr, [r13] /* Save lr_IRQ */
mrs lr, spsr
str lr, [r13, #4] /* Save spsr_IRQ */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
#ifdef CONFIG_ARMV7R_DECODEFIQ
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
#else
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
#endif
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lirqtmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
ldmia r0, {r3, r4} /* Recover r3=lr_IRQ, r4=spsr_IRQ */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
@ -127,9 +130,9 @@ arm_vectorirq:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lirqcontinue
.Lirqentersvc:
@ -143,7 +146,7 @@ arm_vectorirq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lirqcontinue:
@ -156,26 +159,30 @@ arm_vectorirq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the IRQ handler with interrupts disabled. */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
#if CONFIG_ARCH_INTERRUPTSTACK > 7
/* Call arm_decodeirq() on the interrupt stack */
ldr sp, .Lirqstackbase /* SP = interrupt stack base */
str r0, [sp, #-4]! /* Save the xcp address at SP-4 then update SP */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodeirq /* Call the handler */
ldr sp, [r4] /* Restore the user stack pointer */
ldr sp, [r4] /* Restore the user stack pointer */
#else
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
/* Call arm_decodeirq() on the user stack */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodeirq /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
#endif
/* Upon return from arm_decodeirq, r0 holds the pointer to the register
@ -186,7 +193,7 @@ arm_vectorirq:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -202,25 +209,27 @@ arm_vectorirq:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lirqleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lirqtmp:
.word g_irqtmp
#if CONFIG_ARCH_INTERRUPTSTACK > 7
.Lirqstackbase:
.word g_intstackbase
#endif
.size arm_vectorirq, . - arm_vectorirq
.align 5
@ -243,19 +252,19 @@ arm_vectorsvc:
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
mov r3, r14 /* Save r14 as the PC as well */
mrs r4, spsr /* Get the saved CPSR */
mov r3, r14 /* Save r14 as the PC as well */
mrs r4, spsr /* Get the saved CPSR */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lsvcentersvc /* Branch if not user mode */
@ -264,9 +273,9 @@ arm_vectorsvc:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lsvccontinue
.Lsvcentersvc:
@ -280,7 +289,7 @@ arm_vectorsvc:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lsvccontinue:
@ -293,19 +302,19 @@ arm_vectorsvc:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the SVC handler with interrupts disabled.
* void arm_syscall(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_syscall /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_syscall /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_syscall, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -315,7 +324,7 @@ arm_vectorsvc:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -323,7 +332,7 @@ arm_vectorsvc:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lleavesvcsvc /* Branch if not user mode */
@ -331,18 +340,18 @@ arm_vectorsvc:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lleavesvcsvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.size arm_vectorsvc, . - arm_vectorsvc
@ -369,35 +378,35 @@ arm_vectordata:
*/
ldr r13, .Ldaborttmp /* Points to temp storage */
sub lr, lr, #8 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
sub lr, lr, #8 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Ldaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Ldabtentersvc /* Branch if not user mode */
@ -406,9 +415,9 @@ arm_vectordata:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Ldabtcontinue
.Ldabtentersvc:
@ -422,7 +431,7 @@ arm_vectordata:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Ldabtcontinue:
@ -435,21 +444,21 @@ arm_vectordata:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the data abort handler with interrupts disabled.
* void arm_dataabort(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mrc CP15_DFAR(r1) /* Get R1=DFAR */
mrc CP15_DFSR(r2) /* Get r2=DFSR */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_dataabort /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_dataabort, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -459,7 +468,7 @@ arm_vectordata:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -467,7 +476,7 @@ arm_vectordata:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Ldabtleavesvc /* Branch if not user mode */
@ -475,18 +484,18 @@ arm_vectordata:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Ldabtleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r1-r15}^ /* Return */
ldmia r0, {r1-r15}^ /* Return */
.Ldaborttmp:
.word g_aborttmp
@ -515,35 +524,35 @@ arm_vectorprefetch:
*/
ldr r13, .Lpaborttmp /* Points to temp storage */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lpaborttmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
ldmia r0, {r3, r4} /* Recover r3=lr_ABT, r4=spsr_ABT */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lpabtentersvc /* Branch if not user mode */
@ -552,9 +561,9 @@ arm_vectorprefetch:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lpabtcontinue
.Lpabtentersvc:
@ -568,7 +577,7 @@ arm_vectorprefetch:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lpabtcontinue:
@ -581,21 +590,21 @@ arm_vectorprefetch:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the prefetch abort handler with interrupts disabled.
* void arm_prefetchabort(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mrc CP15_IFAR(r1) /* Get R1=IFAR */
mrc CP15_IFSR(r2) /* Get r2=IFSR */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_prefetchabort /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_prefetchabort, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -605,7 +614,7 @@ arm_vectorprefetch:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -613,7 +622,7 @@ arm_vectorprefetch:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lpabtleavesvc /* Branch if not user mode */
@ -621,18 +630,18 @@ arm_vectorprefetch:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lpabtleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lpaborttmp:
.word g_aborttmp
@ -659,34 +668,34 @@ arm_vectorundefinsn:
*/
ldr r13, .Lundeftmp /* Points to temp storage */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lundeftmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_UND, r4=spsr_UND */
ldmia r0, {r3, r4} /* Recover r3=lr_UND, r4=spsr_UND */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
* USER mode r13(sp) and r14(lr).
*/
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
and r1, r4, #PSR_MODE_MASK /* Interrupted mode */
cmp r1, #PSR_MODE_USR /* User mode? */
bne .Lundefentersvc /* Branch if not user mode */
@ -695,9 +704,9 @@ arm_vectorundefinsn:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lundefcontinue
.Lundefentersvc:
@ -711,7 +720,7 @@ arm_vectorundefinsn:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lundefcontinue:
@ -724,19 +733,19 @@ arm_vectorundefinsn:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the undef insn handler with interrupts disabled.
* void arm_undefinedinsn(struct xcptcontext *xcp)
*/
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_undefinedinsn /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
/* Upon return from arm_undefinedinsn, r0 holds the pointer to the register
* state save area to use to restore the registers. This may or may not
@ -746,7 +755,7 @@ arm_vectorundefinsn:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -754,7 +763,7 @@ arm_vectorundefinsn:
* values of USER mode r13(sp) and r14(lr).
*/
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
and r2, r1, #PSR_MODE_MASK /* Interrupted mode */
cmp r2, #PSR_MODE_USR /* User mode? */
bne .Lundefleavesvc /* Branch if not user mode */
@ -762,18 +771,18 @@ arm_vectorundefinsn:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lundefleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lundeftmp:
.word g_undeftmp
@ -801,28 +810,28 @@ arm_vectorfiq:
/* On entry we are free to use the FIQ mode registers r8 through r14 */
ldr r13, .Lfiqtmp /* Points to temp storage */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR_fiq */
sub lr, lr, #4 /* Fixup return */
str lr, [r13] /* Save in temp storage */
mrs lr, spsr /* Get SPSR_fiq */
str lr, [r13, #4] /* Save in temp storage */
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
* and store r0-r12 into the frame.
*/
sub sp, sp, #XCPTCONTEXT_SIZE
stmia sp, {r0-r12} /* Save the SVC mode regs */
stmia sp, {r0-r12} /* Save the SVC mode regs */
/* Get the values for r15(pc) and CPSR in r3 and r4 */
ldr r0, .Lfiqtmp /* Points to temp storage */
ldmia r0, {r3, r4} /* Recover r3=lr_SVC, r4=spsr_SVC */
ldmia r0, {r3, r4} /* Recover r3=lr_SVC, r4=spsr_SVC */
#ifdef CONFIG_BUILD_PROTECTED
/* Did we enter from user mode? If so then we need get the values of
@ -838,9 +847,9 @@ arm_vectorfiq:
*/
add r0, sp, #(4*REG_SP) /* Offset to sp/lr storage */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
stmia r0, {r13, r14}^ /* Save user mode r13(sp) and r14(lr) */
add r0, sp, #(4*REG_R15) /* Offset to pc/cpsr storage */
stmia r0, {r3, r4} /* Save r15(pc), and the CPSR */
b .Lfiqcontinue
.Lfiqentersvc:
@ -854,7 +863,7 @@ arm_vectorfiq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
.Lfiqcontinue:
@ -867,26 +876,26 @@ arm_vectorfiq:
/* Save r13(sp), r14(lr), r15(pc), and the CPSR */
add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
stmia r0, {r1-r4}
stmia r0, {r1-r4}
#endif
/* Then call the FIQ handler with interrupts disabled. */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
#if CONFIG_ARCH_INTERRUPTSTACK > 7
ldr sp, .Lfiqstackbase /* SP = interrupt stack base */
str r0, [sp, #-4]! /* Save the xcp address at SP-4 then update SP */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodefiq /* Call the handler */
ldr sp, [r4] /* Restore the user stack pointer */
ldr sp, [r4] /* Restore the user stack pointer */
#else
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
mov r4, sp /* Save the SP in a preserved register */
bic sp, sp, #7 /* Force 8-byte alignment */
bl arm_decodefiq /* Call the handler */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
mov sp, r4 /* Restore the possibly unaligned stack pointer */
#endif
/* Upon return from arm_decodefiq, r0 holds the pointer to the register
@ -897,7 +906,7 @@ arm_vectorfiq:
/* Restore the CPSR, SVC mode registers and return */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the return SPSR */
msr spsr_cxsf, r1 /* Set the return mode SPSR */
#ifdef CONFIG_BUILD_PROTECTED
@ -913,21 +922,22 @@ arm_vectorfiq:
* is not in the register list).
*/
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
mov r13, r0 /* (SVC) R13=Register storage area */
ldmia r13, {r0-R12} /* Restore common R0-R12 */
add r14, r13, #(4*REG_R13) /* (SVC) R14=address of R13/R14 storage */
ldmia r14, {r13, r14}^ /* Restore user mode R13/R14 */
add r14, r13, #(4*REG_R15) /* (SVC) R14=address of R15 storage */
ldmia r14, {r15}^ /* Return */
.Lfiqleavesvc:
#endif
/* Life is simple when everything is SVC mode */
ldmia r0, {r0-r15}^ /* Return */
ldmia r0, {r0-r15}^ /* Return */
.Lfiqtmp:
.word g_fiqtmp
#if CONFIG_ARCH_INTERRUPTSTACK > 7
.Lfiqstackbase:
.word g_intstackbase

View file

@ -54,16 +54,16 @@
/* These will be relocated to VECTOR_BASE. */
_vector_start:
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lundefinedhandler /* 0x04: Undefined instruction */
ldr pc, .Lsvchandler /* 0x08: Software interrupt */
ldr pc, .Lsvchandler /* 0x08: Software interrupt */
ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
ldr pc, .Ldataaborthandler /* 0x10: Data abort */
ldr pc, .Laddrexcptnhandler /* 0x14: Address exception (reserved) */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl __start
.globl arm_vectorundefinsn
.globl arm_vectorsvc
.globl arm_vectorprefetch

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -92,21 +92,21 @@
cp15_clean_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
/* Loop, cleaning each cache line by writing its contents to memory */
1:
mcr CP15_DCCMVAC(r0) /* Clean data cache line to PoC by VA */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -94,19 +94,19 @@
cp15_coherent_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
/* Loop, flushing each D cache line to memory */
1:
mcr CP15_DCCMVAU(r12) /* Clean data or unified cache line by VA to PoU */
add r12, r12, r2 /* R12=Next cache line */
cmp r12, r1 /* Loop until all cache lines have been cleaned */
cmp r12, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb
@ -116,14 +116,14 @@ cp15_coherent_dcache:
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r12, r0, r3 /* R12=aligned start address */
/* Loop, invalidating each I cache line to memory */
1:
mcr CP15_ICIMVAU(r12) /* Invalidate instruction cache by VA to PoU */
add r12, r12, r2 /* R12=Next cache line */
cmp r12, r1 /* Loop until all cache lines have been invalidated */
cmp r12, r1 /* Loop until all cache lines have been invalidated */
blo 1b
mov r0, #0

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -93,20 +93,20 @@
cp15_flush_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
sub r3, r2, #1 /* R3=Cache line size mask */
bic r0, r0, r3 /* R0=aligned start address */
/* Loop, cleaning and invaliding each D cache line in the address range */
1:
mcr CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been cleaned */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -93,27 +93,27 @@
cp15_invalidate_dcache:
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
mrc CP15_CTR(r3) /* Read the Cache Type Register */
lsr r3, r3, #16 /* Isolate the DMinLine field */
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3 /* Get the cache line size in bytes */
sub r3, r2, #1 /* R3=Cache line size mask */
sub r3, r2, #1 /* R3=Cache line size mask */
tst r0, r3
bic r0, r0, r3 /* R0=aligned start address */
bic r0, r0, r3 /* R0=aligned start address */
mcrne CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
mcrne CP15_DCCIMVAC(r0) /* Clean and invalidate data cache line by VA to PoC */
tst r1, r3
bic r1, r1, r3 /* R0=aligned end address */
mcrne CP15_DCCIMVAC(r1) /* Clean and invalidate data cache line by VA to PoC */
bic r1, r1, r3 /* R0=aligned end address */
mcrne CP15_DCCIMVAC(r1) /* Clean and invalidate data cache line by VA to PoC */
/* Loop, invalidating each D cache line */
1:
mcr CP15_DCIMVAC(r0) /* Invalidate data cache line by VA to PoC */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been invalidate */
add r0, r0, r2 /* R12=Next cache line */
cmp r0, r1 /* Loop until all cache lines have been invalidate */
blo 1b
dsb

View file

@ -6,10 +6,10 @@
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright (c) 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright (c) 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
@ -91,24 +91,24 @@
cp15_invalidate_dcache_all:
mrc CP15_CCSIDR(r0) /* Read the Cache Size Identification Register */
ldr r3, =0xffff /* Isolate the NumSets field (bits 13-27) */
ldr r3, =0xffff /* Isolate the NumSets field (bits 13-27) */
and r0, r3, r0, lsr #13 /* r0=NumSets (number of sets - 1) */
mov r1, #0 /* r1 = way loop counter */
mov r1, #0 /* r1 = way loop counter */
way_loop:
mov r3, #0 /* r3 = set loop counter */
mov r3, #0 /* r3 = set loop counter */
set_loop:
mov r2, r1, lsl #30 /* r2 = way loop counter << 30 */
orr r2, r3, lsl #5 /* r2 = set/way cache operation format */
mcr CP15_DCISW(r2) /* Data Cache Invalidate by Set/Way */
add r3, r3, #1 /* Increment set counter */
cmp r0, r3 /* Last set? */
bne set_loop /* Keep looping if not */
add r3, r3, #1 /* Increment set counter */
cmp r0, r3 /* Last set? */
bne set_loop /* Keep looping if not */
add r1, r1, #1 /* Increment the way counter */
cmp r1, #4 /* Last way? (four ways assumed) */
bne way_loop /* Keep looping if not */
add r1, r1, #1 /* Increment the way counter */
cmp r1, #4 /* Last way? (four ways assumed) */
bne way_loop /* Keep looping if not */
dsb
bx lr

View file

@ -26,8 +26,6 @@
#include "arm_vfork.h"
.file "vfork.S"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
@ -36,6 +34,7 @@
* Public Symbols
****************************************************************************/
.file "vfork.S"
.globl up_vfork
/****************************************************************************
@ -89,7 +88,7 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_exception.S
* arch/arm/src/armv8-m/arm_exception.S
*
* Copyright (C) 2009-2013, 2015-2016, 2018 Gregory Nutt.
* All rights reserved.
@ -54,7 +54,7 @@
/* Configuration ************************************************************/
#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
/* In kernel mode without an interrupt stack, this interrupt handler will set the
/* In protected mode without an interrupt stack, this interrupt handler will set the
* MSP to the stack pointer of the interrupted thread. If the interrupted thread
* was a privileged thread, that will be the MSP otherwise it will be the PSP. If
* the PSP is used, then the value of the MSP will be invalid when the interrupt
@ -70,7 +70,7 @@
*/
# if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 8
# error Interrupt stack must be used with high priority interrupts in kernel mode
# error Interrupt stack must be used with high priority interrupts in protected mode
# endif
/* Use the BASEPRI to control interrupts is required if nested, high
@ -141,8 +141,8 @@
*/
.text
.type exception_common, function
.thumb_func
.type exception_common, function
exception_common:
mrs r0, ipsr /* R0=exception number */
@ -151,24 +151,24 @@ exception_common:
/* The EXC_RETURN value tells us whether the context is on the MSP or PSP */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
mrs r1, psp /* R1=The process stack pointer (PSP) */
mov sp, r1 /* Set the MSP to the PSP */
1:
mov r2, sp /* R2=Copy of the main/process stack pointer */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
/* (ignoring the xPSR[9] alignment bit) */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
/* (ignoring the xPSR[9] alignment bit) */
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
mov r3, #0x0
ittee eq
mrseq r1, msplim
msreq msplim, r3
mrsne r1, psplim
msrne psplim, r3
mrseq r1, msplim
msreq msplim, r3
mrsne r1, psplim
msrne psplim, r3
stmdb sp!, {r1}
stmdb sp!, {r1}
#endif
#ifdef CONFIG_ARMV8M_USEBASEPRI
@ -190,11 +190,11 @@ exception_common:
* where to put the registers.
*/
vstmdb sp!, {s16-s31} /* Save the non-volatile FP context */
vstmdb sp!, {s16-s31} /* Save the non-volatile FP context */
#endif
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP/PRIMASK values */
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP/PRIMASK values */
/* There are two arguments to arm_doirq:
*
@ -214,7 +214,7 @@ exception_common:
* here prohibits nested interrupts without some additional logic!
*/
setintstack r2, r3
setintstack r2, r3 /* SP = IRQ stack top */
#else
/* Otherwise, we will re-use the interrupted thread's stack. That may
@ -236,7 +236,7 @@ exception_common:
*/
cmp r0, r1 /* Context switch? */
beq 2f /* Branch if no context switch */
beq 2f /* Branch if no context switch */
/* We are returning with a pending context switch. This case is different
* because in this case, the register save structure does not lie on the
@ -253,40 +253,40 @@ exception_common:
*/
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1!, {r4-r11} /* Fetch eight registers in HW save area */
ldmia r1!, {r4-r11} /* Fetch eight registers in HW save area */
#ifdef CONFIG_ARCH_FPU
vldmia r1!, {s0-s15} /* Fetch sixteen FP registers in HW save area */
ldmia r1, {r2-r3} /* Fetch FPSCR and Reserved in HW save area */
vldmia r1!, {s0-s15} /* Fetch sixteen FP registers in HW save area */
ldmia r1, {r2-r3} /* Fetch FPSCR and Reserved in HW save area */
#endif
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
#ifdef CONFIG_ARCH_FPU
stmdb r1!, {r2-r3} /* Store FPSCR and Reserved on the return stack */
vstmdb r1!, {s0-s15} /* Store sixteen FP registers on the return stack */
stmdb r1!, {r2-r3} /* Store FPSCR and Reserved on the return stack */
vstmdb r1!, {s0-s15} /* Store sixteen FP registers on the return stack */
#endif
stmdb r1!, {r4-r11} /* Store eight registers on the return stack */
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
stmdb r1!, {r4-r11} /* Store eight registers on the return stack */
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#ifdef CONFIG_ARCH_FPU
vldmia r0!, {s16-s31} /* Recover S16-S31 */
vldmia r0!, {s16-s31} /* Recover S16-S31 */
#endif
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
ldmia r0, {r0} /* Get psplim/msplim*/
ldmia r0, {r0} /* Get psplim/msplim */
#endif
b 3f /* Re-join common logic */
b 3f /* Re-join common logic */
2:
/* We are returning with no context switch. We simply need to "unwind"
* the same stack frame that we created at entry.
*/
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#ifdef CONFIG_ARCH_FPU
vldmia r1!, {s16-s31} /* Recover S16-S31 */
vldmia r1!, {s16-s31} /* Recover S16-S31 */
#endif
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
ldmia r1!, {r0} /* Get psplim/msplim */
ldmia r1!, {r0} /* Get psplim/msplim */
#endif
3:
@ -300,8 +300,8 @@ exception_common:
*/
mrs r2, control /* R2=Contents of the control register */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
orr r2, r2, #1 /* Unprivileged mode */
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
@ -318,15 +318,15 @@ exception_common:
5:
msr control, r2 /* Save the updated control register */
#else
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
ite eq
msreq msplim, r0
msrne psplim, r0
msreq msplim, r0
msrne psplim, r0
#endif
ite eq /* next two instructions conditional */
msreq msp, r1 /* R1=The main stack pointer */
msrne psp, r1 /* R1=The process stack pointer */
ite eq /* Next two instructions conditional */
msreq msp, r1 /* R1=The main stack pointer */
msrne psp, r1 /* R1=The process stack pointer */
#endif
/* Restore the interrupt state */
@ -341,7 +341,7 @@ exception_common:
* return to thread mode, and (2) select the correct stack.
*/
bx r14 /* And return */
bx r14 /* And return */
.size exception_common, .-exception_common

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_fetchadd.S
* arch/arm/src/armv8-m/arm_fetchadd.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -26,7 +26,7 @@
.syntax unified
.thumb
.file "arm_fetchadd.S"
.file "arm_fetchadd.S"
/****************************************************************************
* Public Functions
@ -57,15 +57,15 @@
up_fetchadd32:
1:
ldrex r2, [r0] /* Fetch the value to be incremented */
ldrex r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd32, . - up_fetchadd32
/****************************************************************************
@ -91,15 +91,15 @@ up_fetchadd32:
up_fetchsub32:
1:
ldrex r2, [r0] /* Fetch the value to be decremented */
ldrex r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strex r3, r2, [r0] /* Attempt to save the result */
strex r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strex failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub32, . - up_fetchsub32
/****************************************************************************
@ -125,15 +125,15 @@ up_fetchsub32:
up_fetchadd16:
1:
ldrexh r2, [r0] /* Fetch the value to be incremented */
ldrexh r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd16, . - up_fetchadd16
/****************************************************************************
@ -159,17 +159,17 @@ up_fetchadd16:
up_fetchsub16:
1:
ldrexh r2, [r0] /* Fetch the value to be decremented */
ldrexh r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
/* Attempt to save the decremented value */
strexh r3, r2, [r0] /* Attempt to save the result */
strexh r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexh failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub16, . - up_fetchsub16
/****************************************************************************
@ -195,15 +195,15 @@ up_fetchsub16:
up_fetchadd8:
1:
ldrexb r2, [r0] /* Fetch the value to be incremented */
ldrexb r2, [r0] /* Fetch the value to be incremented */
add r2, r2, r1 /* Add the addend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the incremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchadd8, . - up_fetchadd8
/****************************************************************************
@ -229,14 +229,14 @@ up_fetchadd8:
up_fetchsub8:
1:
ldrexb r2, [r0] /* Fetch the value to be decremented */
ldrexb r2, [r0] /* Fetch the value to be decremented */
sub r2, r2, r1 /* Subtract the subtrahend */
strexb r3, r2, [r0] /* Attempt to save the result */
strexb r3, r2, [r0] /* Attempt to save the result */
teq r3, #0 /* r3 will be 1 if strexb failed */
bne 1b /* Failed to lock... try again */
bne 1b /* Failed to lock... try again */
mov r0, r2 /* Return the decremented value */
bx lr /* Successful! */
bx lr /* Successful! */
.size up_fetchsub8, . - up_fetchsub8
.end

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_fpu.S
* arch/arm/src/armv8-m/arm_fpu.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -84,73 +84,73 @@ arm_savefpu:
* s0, s1, ... in increasing address order.
*/
vstmia r1!, {s0-s31} /* Save the full FP context */
vstmia r1!, {s0-s31} /* Save the full FP context */
/* Store the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
vmrs r2, fpscr /* Fetch the FPCSR */
vmrs r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#else
/* Store all floating point registers */
#if 1 /* Use store multiple */
fstmias r1!, {s0-s31} /* Save the full FP context */
fstmias r1!, {s0-s31} /* Save the full FP context */
#else
vmov r2, r3, d0 /* r2, r3 = d0 */
vmov r2, r3, d0 /* r2, r3 = d0 */
str r2, [r1], #4 /* Save S0 and S1 values */
str r3, [r1], #4
vmov r2, r3, d1 /* r2, r3 = d1 */
vmov r2, r3, d1 /* r2, r3 = d1 */
str r2, [r1], #4 /* Save S2 and S3 values */
str r3, [r1], #4
vmov r2, r3, d2 /* r2, r3 = d2 */
vmov r2, r3, d2 /* r2, r3 = d2 */
str r2, [r1], #4 /* Save S4 and S5 values */
str r3, [r1], #4
vmov r2, r3, d3 /* r2, r3 = d3 */
vmov r2, r3, d3 /* r2, r3 = d3 */
str r2, [r1], #4 /* Save S6 and S7 values */
str r3, [r1], #4
vmov r2, r3, d4 /* r2, r3 = d4 */
vmov r2, r3, d4 /* r2, r3 = d4 */
str r2, [r1], #4 /* Save S8 and S9 values */
str r3, [r1], #4
vmov r2, r3, d5 /* r2, r3 = d5 */
vmov r2, r3, d5 /* r2, r3 = d5 */
str r2, [r1], #4 /* Save S10 and S11 values */
str r3, [r1], #4
vmov r2, r3, d6 /* r2, r3 = d6 */
vmov r2, r3, d6 /* r2, r3 = d6 */
str r2, [r1], #4 /* Save S12 and S13 values */
str r3, [r1], #4
vmov r2, r3, d7 /* r2, r3 = d7 */
vmov r2, r3, d7 /* r2, r3 = d7 */
str r2, [r1], #4 /* Save S14 and S15 values */
str r3, [r1], #4
vmov r2, r3, d8 /* r2, r3 = d8 */
vmov r2, r3, d8 /* r2, r3 = d8 */
str r2, [r1], #4 /* Save S16 and S17 values */
str r3, [r1], #4
vmov r2, r3, d9 /* r2, r3 = d9 */
vmov r2, r3, d9 /* r2, r3 = d9 */
str r2, [r1], #4 /* Save S18 and S19 values */
str r3, [r1], #4
vmov r2, r3, d10 /* r2, r3 = d10 */
vmov r2, r3, d10 /* r2, r3 = d10 */
str r2, [r1], #4 /* Save S20 and S21 values */
str r3, [r1], #4
vmov r2, r3, d11 /* r2, r3 = d11 */
vmov r2, r3, d11 /* r2, r3 = d11 */
str r2, [r1], #4 /* Save S22 and S23 values */
str r3, [r1], #4
vmov r2, r3, d12 /* r2, r3 = d12 */
vmov r2, r3, d12 /* r2, r3 = d12 */
str r2, [r1], #4 /* Save S24 and S25 values */
str r3, [r1], #4
vmov r2, r3, d13 /* r2, r3 = d13 */
vmov r2, r3, d13 /* r2, r3 = d13 */
str r2, [r1], #4 /* Save S26 and S27 values */
str r3, [r1], #4
vmov r2, r3, d14 /* r2, r3 = d14 */
vmov r2, r3, d14 /* r2, r3 = d14 */
str r2, [r1], #4 /* Save S28 and S29 values */
str r3, [r1], #4
vmov r2, r3, d15 /* r2, r3 = d15 */
vmov r2, r3, d15 /* r2, r3 = d15 */
str r2, [r1], #4 /* Save S30 and S31 values */
str r3, [r1], #4
#endif
/* Store the floating point control and status register */
fmrx r2, fpscr /* Fetch the FPCSR */
fmrx r2, fpscr /* Fetch the FPCSR */
str r2, [r1], #4 /* Save the floating point control and status register */
#endif
bx lr
@ -190,70 +190,70 @@ arm_restorefpu:
* s0, s1, ... in increasing address order.
*/
vldmia r1!, {s0-s31} /* Restore the full FP context */
vldmia r1!, {s0-s31} /* Restore the full FP context */
/* Load the floating point control and status register. At the end of the
* vstmia, r1 will point to the FPCSR storage location.
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
vmsr fpscr, r2 /* Restore the FPCSR */
#else
/* Load all floating point registers Registers are loaded in numeric order,
* s0, s1, ... in increasing address order.
*/
#if 1 /* Use load multiple */
fldmias r1!, {s0-s31} /* Restore the full FP context */
fldmias r1!, {s0-s31} /* Restore the full FP context */
#else
ldr r2, [r1], #4 /* Fetch S0 and S1 values */
ldr r3, [r1], #4
vmov d0, r2, r3 /* Save as d0 */
vmov d0, r2, r3 /* Save as d0 */
ldr r2, [r1], #4 /* Fetch S2 and S3 values */
ldr r3, [r1], #4
vmov d1, r2, r3 /* Save as d1 */
vmov d1, r2, r3 /* Save as d1 */
ldr r2, [r1], #4 /* Fetch S4 and S5 values */
ldr r3, [r1], #4
vmov d2, r2, r3 /* Save as d2 */
vmov d2, r2, r3 /* Save as d2 */
ldr r2, [r1], #4 /* Fetch S6 and S7 values */
ldr r3, [r1], #4
vmov d3, r2, r3 /* Save as d3 */
vmov d3, r2, r3 /* Save as d3 */
ldr r2, [r1], #4 /* Fetch S8 and S9 values */
ldr r3, [r1], #4
vmov d4, r2, r3 /* Save as d4 */
vmov d4, r2, r3 /* Save as d4 */
ldr r2, [r1], #4 /* Fetch S10 and S11 values */
ldr r3, [r1], #4
vmov d5, r2, r3 /* Save as d5 */
vmov d5, r2, r3 /* Save as d5 */
ldr r2, [r1], #4 /* Fetch S12 and S13 values */
ldr r3, [r1], #4
vmov d6, r2, r3 /* Save as d6 */
vmov d6, r2, r3 /* Save as d6 */
ldr r2, [r1], #4 /* Fetch S14 and S15 values */
ldr r3, [r1], #4
vmov d7, r2, r3 /* Save as d7 */
vmov d7, r2, r3 /* Save as d7 */
ldr r2, [r1], #4 /* Fetch S16 and S17 values */
ldr r3, [r1], #4
vmov d8, r2, r3 /* Save as d8 */
vmov d8, r2, r3 /* Save as d8 */
ldr r2, [r1], #4 /* Fetch S18 and S19 values */
ldr r3, [r1], #4
vmov d9, r2, r3 /* Save as d9 */
vmov d9, r2, r3 /* Save as d9 */
ldr r2, [r1], #4 /* Fetch S20 and S21 values */
ldr r3, [r1], #4
vmov d10, r2, r3 /* Save as d10 */
vmov d10, r2, r3 /* Save as d10 */
ldr r2, [r1], #4 /* Fetch S22 and S23 values */
ldr r3, [r1], #4
vmov d11, r2, r3 /* Save as d11 */
vmov d11, r2, r3 /* Save as d11 */
ldr r2, [r1], #4 /* Fetch S24 and S25 values */
ldr r3, [r1], #4
vmov d12, r2, r3 /* Save as d12 */
vmov d12, r2, r3 /* Save as d12 */
ldr r2, [r1], #4 /* Fetch S26 and S27 values */
ldr r3, [r1], #4
vmov d13, r2, r3 /* Save as d13 */
vmov d13, r2, r3 /* Save as d13 */
ldr r2, [r1], #4 /* Fetch S28 and S29 values */
ldr r3, [r1], #4
vmov d14, r2, r3 /* Save as d14 */
vmov d14, r2, r3 /* Save as d14 */
ldr r2, [r1], #4 /* Fetch S30 and S31 values */
ldr r3, [r1], #4
vmov d15, r2, r3 /* Save as d15 */
vmov d15, r2, r3 /* Save as d15 */
#endif
/* Load the floating point control and status register. r1 points t
@ -261,7 +261,7 @@ arm_restorefpu:
*/
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
fmxr fpscr, r2 /* Restore the FPCSR */
fmxr fpscr, r2 /* Restore the FPCSR */
#endif
bx lr

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_fullcontextrestore.S
* arch/arm/src/armv8-m/arm_fullcontextrestore.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -68,12 +68,12 @@ arm_fullcontextrestore:
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_restore_context /* R0: restore context */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* This call should not return */
bx lr /* Unnecessary ... will not return */
bx lr /* Unnecessary ... will not return */
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/up_lazyexcption.S
* arch/arm/src/armv8-m/up_lazyexcption.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -37,7 +37,7 @@
/* Configuration ************************************************************/
#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
/* In kernel mode without an interrupt stack, this interrupt handler will set the MSP to the
/* In protected mode without an interrupt stack, this interrupt handler will set the MSP to the
* stack pointer of the interrupted thread. If the interrupted thread was a privileged
* thread, that will be the MSP otherwise it will be the PSP. If the PSP is used, then the
* value of the MSP will be invalid when the interrupt handler returns because it will be a
@ -51,7 +51,7 @@
*/
# if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 8
# error Interrupt stack must be used with high priority interrupts in kernel mode
# error Interrupt stack must be used with high priority interrupts in protected mode
# endif
/* Use the BASEPRI to control interrupts is required if nested, high
@ -119,17 +119,17 @@
*/
.text
.type exception_common, function
.type exception_common, function
exception_common:
/* Get the IRQ number from the IPSR */
mrs r0, ipsr /* R0=exception number */
mrs r0, ipsr /* R0=exception number */
/* Complete the context save */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
tst r14, #EXC_RETURN_PROCESS_STACK /* Nonzero if context on process stack */
#ifdef CONFIG_BUILD_PROTECTED
/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
@ -137,31 +137,31 @@ exception_common:
* EXC_RETURN is 0xfffffffd (unprivileged thread)
*/
beq 1f /* Branch if context already on the MSP */
beq 1f /* Branch if context already on the MSP */
mrs r1, psp /* R1=The process stack pointer (PSP) */
mov sp, r1 /* Set the MSP to the PSP */
1:
#endif
/* r1 holds the value of the stack pointer AFTER the exception handling logic
/* sp holds the value of the stack pointer AFTER the exception handling logic
* pushed the various registers onto the stack. Get r2 = the value of the
* stack pointer BEFORE the interrupt modified it.
*/
mov r2, sp /* R2=Copy of the main/process stack pointer */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
mov r3, #0x0
ittee eq
mrseq r1, msplim
msreq msplim, r3
mrsne r1, psplim
msrne psplim, r3
mrseq r1, msplim
msreq msplim, r3
mrsne r1, psplim
msrne psplim, r3
stmdb sp!, {r1}
stmdb sp!, {r1}
#endif
#ifdef CONFIG_ARMV8M_USEBASEPRI
@ -186,9 +186,9 @@ exception_common:
*/
#ifdef CONFIG_BUILD_PROTECTED
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP value */
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP value */
#else
stmdb sp!, {r2-r11} /* Save the remaining registers plus the SP value */
stmdb sp!, {r2-r11} /* Save the remaining registers plus the SP value */
#endif
/* There are two arguments to arm_doirq:
@ -209,7 +209,7 @@ exception_common:
* here prohibits nested interrupts without some additional logic!
*/
setintstack r2, r3
setintstack r2, r3 /* SP = IRQ stack top */
#else
/* Otherwise, we will re-use the interrupted thread's stack. That may
@ -231,7 +231,7 @@ exception_common:
*/
cmp r0, r1 /* Context switch? */
beq 2f /* Branch if no context switch */
beq 2f /* Branch if no context switch */
/* We are returning with a pending context switch.
*
@ -258,19 +258,19 @@ exception_common:
* values to the stack.
*/
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1, {r4-r11} /* Fetch eight registers in HW save area */
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
stmdb r1!, {r4-r11} /* Store eight registers in HW save area */
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1, {r4-r11} /* Fetch eight registers in HW save area */
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
stmdb r1!, {r4-r11} /* Store eight registers in HW save area */
#ifdef CONFIG_BUILD_PROTECTED
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r0!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r0!, {r2-r11} /* Recover R4-R11 + 2 temp values */
ldmia r0!, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
ldmia r0, {r0} /* Get psplim/msplim*/
ldmia r0, {r0} /* Get psplim/msplim*/
#endif
b 3f /* Re-join common logic */
b 3f /* Re-join common logic */
/* We are returning with no context switch. We simply need to "unwind"
* the same stack frame that we created
@ -281,9 +281,9 @@ exception_common:
2:
#ifdef CONFIG_BUILD_PROTECTED
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r1!, {r2-r11} /* Recover R4-R11 + 2 temp values */
ldmia r1!, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
#ifdef CONFIG_ARCH_FPU
@ -295,7 +295,7 @@ exception_common:
#endif
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
ldmia r1!, {r0} /* Get psplim/msplim */
ldmia r1!, {r0} /* Get psplim/msplim */
#endif
/* Set up to return from the exception
@ -316,8 +316,8 @@ exception_common:
*/
mrs r2, control /* R2=Contents of the control register */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
orr r2, r2, #1 /* Unprivileged mode */
#ifdef CONFIG_ARMV8M_STACKCHECK_HARDWARE
@ -343,7 +343,7 @@ exception_common:
* actually occurs with interrupts still disabled).
*/
ldr r14, =EXC_RETURN_PRIVTHR /* Load the special value */
ldr r14, =EXC_RETURN_PRIVTHR /* Load the special value */
#endif
/* Restore the interrupt state */

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_saveusercontext.S
* arch/arm/src/armv8-m/arm_saveusercontext.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -70,9 +70,9 @@ arm_saveusercontext:
/* Perform the System call with R0=0 and R1=regs */
mov r1, r0 /* R1: regs */
mov r1, r0 /* R1: regs */
mov r0, #SYS_save_context /* R0: save context (also return value) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* There are two return conditions. On the first return, R0 (the
* return value will be zero. On the second return we need to
@ -82,7 +82,7 @@ arm_saveusercontext:
add r2, r1, #(4*REG_R0)
mov r3, #1
str r3, [r2, #0]
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
bx lr /* "normal" return with r0=0 or
* context switch with r0=1 */
.size arm_saveusercontext, .-arm_saveusercontext
.end

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_setjmp.S
* arch/arm/src/armv8-m/arm_setjmp.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -18,11 +18,6 @@
*
****************************************************************************/
/* When this file is assembled, it will require the following GCC options:
*
* -mcpu=cortex-m4 -mfloat-abi=hard -mfpu=fpv4-sp-d16 -meabi=5 -mthumb
*/
/****************************************************************************
* Included Files
****************************************************************************/
@ -37,12 +32,12 @@
* Public Symbols
****************************************************************************/
.globl setjmp
.globl longjmp
.globl setjmp
.globl longjmp
.syntax unified
.thumb
.file "setjmp.S"
.syntax unified
.thumb
.file "setjmp.S"
/****************************************************************************
* Public Functions
@ -69,33 +64,33 @@
*
****************************************************************************/
.thumb_func
.type setjmp, function
.thumb_func
.type setjmp, function
setjmp:
/* Store callee-saved Core registers */
/* Store callee-saved Core registers */
mov ip, sp /* move sp to ip so we can save it */
stmia r0!, {r4-r11, ip, lr}
mov ip, sp /* Move sp to ip so we can save it */
stmia r0!, {r4-r11, ip, lr}
#ifdef CONFIG_ARCH_FPU
vstmia r0!, {s16-s31} /* Save the callee-saved FP registers */
vstmia r0!, {s16-s31} /* Save the callee-saved FP registers */
/* Store the floating point control and status register. At the end of the
* vstmia, r0 will point to the FPCSR storage location.
*/
/* Store the floating point control and status register. At the end of the
* vstmia, r0 will point to the FPCSR storage location.
*/
vmrs r1, fpscr /* Fetch the FPCSR */
str r1, [r0], #4 /* Save the floating point control and status register */
/* DSA: don't need to inc r0 */
vmrs r1, fpscr /* Fetch the FPCSR */
str r1, [r0], #4 /* Save the floating point control and status register */
/* DSA: don't need to inc r0 */
#endif /* CONFIG_ARCH_FPU */
/* we're done, we're out of here */
/* we're done, we're out of here */
mov r0, #0
bx lr
mov r0, #0
bx lr
.size setjmp, .-setjmp
.size setjmp, .-setjmp
/****************************************************************************
* Name: longjmp
@ -119,29 +114,29 @@ setjmp:
*
****************************************************************************/
.thumb_func
.type longjmp, function
.thumb_func
.type longjmp, function
longjmp:
/* Load callee-saved Core registers */
/* Load callee-saved Core registers */
ldmia r0!, {r4-r11, ip, lr}
mov sp, ip /* restore sp */
ldmia r0!, {r4-r11, ip, lr}
mov sp, ip /* Restore sp */
#ifdef CONFIG_ARCH_FPU
/* Load callee-saved floating point registers. */
/* Load callee-saved floating point registers. */
vldmia r0!, {s16-s31} /* Restore FP context */
vldmia r0!, {s16-s31} /* Restore FP context */
/* Load the floating point control and status register. */
/* Load the floating point control and status register. */
ldr r2, [r0], #4 /* Fetch the floating point control and status register */
/* DSA: don't need to inc r0 */
vmsr fpscr, r2 /* Restore the FPCSR */
ldr r2, [r0], #4 /* Fetch the floating point control and status register */
/* DSA: don't need to inc r0 */
vmsr fpscr, r2 /* Restore the FPCSR */
#endif /* CONFIG_ARCH_FPU */
mov r0, r1 /* return val */
bx lr
mov r0, r1 /* return val */
b lr
.size longjmp, .-longjmp
.end
.size longjmp, .-longjmp
.end

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_signal_handler.S
* arch/arm/src/armv8-m/arm_signal_handler.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -76,7 +76,7 @@ up_signal_handler:
/* Save some register */
push {lr} /* Save LR on the stack */
push {lr} /* Save LR on the stack */
/* Call the signal handler */
@ -84,7 +84,7 @@ up_signal_handler:
mov r0, r1 /* R0=signo */
mov r1, r2 /* R1=info */
mov r2, r3 /* R2=ucontext */
blx ip /* Call the signal handler */
blx ip /* Call the signal handler */
/* Restore the registers */

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_switchcontext.S
* arch/arm/src/armv8-m/arm_switchcontext.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -71,8 +71,8 @@ arm_switchcontext:
mov r2, r1 /* R2: restoreregs */
mov r1, r0 /* R1: saveregs */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
mov r0, #SYS_switch_context /* R0: context switch */
svc 0 /* Force synchronous SVCall (or Hard Fault) */
/* We will get here only after the rerturn from the context switch */

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/arm_testset.S
* arch/arm/src/armv8-m/arm_testset.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -25,7 +25,7 @@
#include <nuttx/config.h>
#include <arch/spinlock.h>
.syntax unified
.syntax unified
.thumb
.file "arm_testset.S"
@ -83,19 +83,19 @@ up_testset:
/* Test if the spinlock is locked or not */
1:
ldrexb r2, [r0] /* Test if spinlock is locked or not */
ldrexb r2, [r0] /* Test if spinlock is locked or not */
cmp r2, r1 /* Already locked? */
beq 2f /* If already locked, return SP_LOCKED */
beq 2f /* If already locked, return SP_LOCKED */
/* Not locked ... attempt to lock it */
strexb r2, r1, [r0] /* Attempt to set the locked state */
strexb r2, r1, [r0] /* Attempt to set the locked state */
cmp r2, r1 /* r2 will be 1 is strexb failed */
beq 1b /* Failed to lock... try again */
beq 1b /* Failed to lock... try again */
/* Lock acquired -- return SP_UNLOCKED */
dmb /* Required before accessing protected resource */
dmb /* Required before accessing protected resource */
mov r0, #SP_UNLOCKED
bx lr

View file

@ -1,5 +1,5 @@
/****************************************************************************
* arch/arm/src/armv8-m/gnu/vfork.S
* arch/arm/src/armv8-m/vfork.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -47,20 +47,21 @@
* Name: vfork
*
* Description:
* The vfork() function has the same effect as fork(), except that the behavior is
* undefined if the process created by vfork() either modifies any data other than
* a variable of type pid_t used to store the return value from vfork(), or returns
* from the function in which vfork() was called, or calls any other function before
* successfully calling _exit() or one of the exec family of functions.
* The vfork() function has the same effect as fork(), except that the
* behavior is undefined if the process created by vfork() either modifies
* any data other than a variable of type pid_t used to store the return
* value from vfork(), or returns from the function in which vfork() was
* called, or calls any other function before successfully calling _exit()
* or one of the exec family of functions.
*
* This thin layer implements vfork by simply calling up_vfork() with the vfork()
* context as an argument. The overall sequence is:
* This thin layer implements vfork by simply calling up_vfork() with the
* vfork() context as an argument. The overall sequence is:
*
* 1) User code calls vfork(). vfork() collects context information and
* transfers control up up_vfork().
* 2) up_vfork()and calls nxtask_setup_vfork().
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB. This
* consists of:
* 3) nxtask_setup_vfork() allocates and configures the child task's TCB.
* This consists of:
* - Allocation of the child task's TCB.
* - Initialization of file descriptors and streams
* - Configuration of environment variables
@ -77,10 +78,10 @@
* None
*
* Returned Value:
* Upon successful completion, vfork() returns 0 to the child process and returns
* the process ID of the child process to the parent process. Otherwise, -1 is
* returned to the parent, no child process is created, and errno is set to
* indicate the error.
* Upon successful completion, vfork() returns 0 to the child process and
* returns the process ID of the child process to the parent process.
* Otherwise, -1 is returned to the parent, no child process is created,
* and errno is set to indicate the error.
*
****************************************************************************/
@ -90,7 +91,7 @@
vfork:
/* Create a stack frame */
mov r0, sp /* Save the value of the stack on entry */
mov r0, sp /* Save the value of the stack on entry */
sub sp, sp, #VFORK_SIZEOF /* Allocate the structure on the stack */
/* CPU registers */

View file

@ -1,5 +1,5 @@
/**************************************************************************
* c5471/c5471_lowputc.S
* arch/arm/src/c5471/c5471_lowputc.S
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -25,8 +25,6 @@
#include <nuttx/config.h>
#include "chip.h"
#include "arm_arch.h"
#include "arm_internal.h"
/**************************************************************************
* Pre-processor Definitions
@ -89,25 +87,25 @@ arm_lowputc:
* register.
*/
str r0, [r2, #UART_THR_OFFS]
str r0, [r2, #UART_THR_OFFS]
/* Wait for the tranmsit holding register (THR) to be
* emptied. This is determined when bit 6 of the LSR
* is set.
*/
2: ldr r1, [r2, #UART_LSR_OFFS]
tst r1, #0x00000020
beq 2b
2: ldr r1, [r2, #UART_LSR_OFFS]
tst r1, #0x00000020
beq 2b
/* If the character that we just sent was a linefeed,
* then send a carriage return as well.
*/
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
/* And return */
mov pc, lr
mov pc, lr

View file

@ -27,7 +27,6 @@
#include "arm.h"
#include "chip.h"
#include "arm_arch.h"
/****************************************************************************
* Pre-processor Definitions
@ -77,14 +76,14 @@ g_aborttmp:
arm_vectorirq:
/* On entry, we are in IRQ mode. We are free to use
* the IRQ mode r13 and r14.
*
*
*/
ldr r13, .Lirqtmp
sub lr, lr, #4
str lr, [r13] @ save lr_IRQ
str lr, [r13] /* Save lr_IRQ */
mrs lr, spsr
str lr, [r13, #4] @ save spsr_IRQ
str lr, [r13, #4] /* Save spsr_IRQ */
/* Then switch back to SVC mode */
@ -126,15 +125,15 @@ arm_vectorirq:
*/
#else
ldr r6, =SRC_IRQ_REG
ldr r6, [r6] /* Get source IRQ reg */
mov r0, #0 /* Assume IRQ0_IRQ set */
ldr r6, [r6] /* Get source IRQ reg */
mov r0, #0 /* Assume IRQ0_IRQ set */
.Lmorebits:
tst r6, #1 /* Is IRQ set? */
bne .Lhaveirq /* Yes... we have the IRQ */
add r0, r0, #1 /* Setup next IRQ */
mov r6, r6, lsr #1 /* Shift right one */
cmp r0, #16 /* Only 16 valid bits */
bcc .Lmorebits /* Keep until we have looked
tst r6, #1 /* Is IRQ set? */
bne .Lhaveirq /* Yes... we have the IRQ */
add r0, r0, #1 /* Setup next IRQ */
mov r6, r6, lsr #1 /* Shift right one */
cmp r0, #16 /* Only 16 valid bits */
bcc .Lmorebits /* Keep until we have looked
* at all bits */
b .Lnoirqset /* If we get here, there is
* no pending interrupt */

View file

@ -25,8 +25,6 @@
#include <nuttx/config.h>
#include "chip.h"
#include "arm_internal.h"
#include "arm_arch.h"
/**************************************************************************
* Pre-processor Definitions
@ -104,10 +102,10 @@ arm_lowputc:
* then send a carriage return as well.
*/
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
/* And return */
mov pc, lr
mov pc, lr

View file

@ -25,8 +25,6 @@
#include <nuttx/config.h>
#include "arm.h"
#include "arm_internal.h"
#include "arm_arch.h"
/********************************************************************
* Pre-processor Definitions
@ -63,7 +61,7 @@ up_restart:
* space.
*/
mksection r0, r4 /* r0=phys. base section */
mksection r0, r4 /* r0=phys. base section */
ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
add r3, r1, r0 /* r3=flags + base */
str r3, [r4, r0, lsr #18] /* identity mapping */
@ -82,7 +80,7 @@ up_phyrestart:
mov r0, #0
mcr p15, 0, r0, c7, c7 /* Invalidate I,D caches */
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c8, c7 /* Invalidate I,D TLBs */
/* Clear bits in control register (see start.h): Disable,

View file

@ -25,8 +25,6 @@
#include <nuttx/config.h>
#include "chip.h"
#include "arm_internal.h"
#include "arm_arch.h"
/**************************************************************************
* Pre-processor Definitions
@ -95,9 +93,9 @@ arm_lowputc:
* then send a carriage return as well.
*/
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
/* Wait for the tranmsit register to be emptied. When the bit is
* non-zero, the TX Buffer FIFO is empty.
@ -109,5 +107,5 @@ arm_lowputc:
/* Then return */
mov pc, lr
mov pc, lr
.end

View file

@ -31,8 +31,6 @@
#include "lpc214x_pll.h"
#include "lpc214x_apb.h"
#include "lpc214x_pinsel.h"
#include "arm_internal.h"
#include "arm_arch.h"
/****************************************************************************
* Pre-processor Definitions
@ -457,7 +455,7 @@ _vector_table:
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl __start
.globl arm_vectorundefinsn
.globl arm_vectorswi
.globl arm_vectorprefetch
@ -466,7 +464,7 @@ _vector_table:
.globl arm_vectorfiq
.Lresethandler:
.long __start
.long __start
.Lundefinedhandler:
.long arm_vectorundefinsn
.Lswihandler:
@ -491,7 +489,7 @@ _vector_table:
*
****************************************************************************/
.global __start
.global __start
.type __start, #function
__start:
@ -531,7 +529,7 @@ __start:
/* Configure Fast GPIO Port */
configfastport r0, r1
configfastport r0, r1
/* Configure the uart so that we can get debug output as soon
* as possible. Modifies r0, r1, r2, and r14.
@ -543,7 +541,7 @@ __start:
/* Setup system stack (and get the BSS range) */
adr r0, LC0
ldmia r0, {r4, r5, sp}
ldmia r0, {r4, r5, sp}
/* Clear system BSS section */

View file

@ -25,8 +25,6 @@
#include <nuttx/config.h>
#include "chip.h"
#include "arm_arch.h"
#include "arm_internal.h"
#include "lpc214x_pinsel.h"
#include "lpc214x_uart.h"
@ -141,7 +139,7 @@ arm_lowputc:
/* And return */
mov pc, lr
mov pc, lr
.size arm_lowputc, . - arm_lowputc
/* This performs basic initialization of the UART. This can be called very

View file

@ -47,7 +47,6 @@
#include "arm.h"
#include "lpc2378.h"
#include "arm_arch.h"
#include "lpc23xx_uart.h"
#include "lpc23xx_scb.h"
#include "lpc23xx_pinsel.h"
@ -95,11 +94,11 @@ _vector_table:
ldr pc, .Lswihandler /* 0x08: Software interrupt */
ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
ldr pc, .Ldataaborthandler /* 0x10: Data abort */
.long 0xB8A06F58 /* 0x14: Vector checksum */
.long 0xB8A06F58 /* 0x14: Vector checksum */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl __start
.globl arm_vectorundefinsn
.globl arm_vectorswi
.globl arm_vectorprefetch
@ -108,7 +107,7 @@ _vector_table:
.globl arm_vectorfiq
.Lresethandler:
.long __start
.long __start
.Lundefinedhandler:
.long arm_vectorundefinsn
.Lswihandler:
@ -132,7 +131,7 @@ _vector_table:
* below.
*/
.text
.globl __start
.globl __start
.type __start, #function
__start:
@ -158,7 +157,7 @@ __start:
/* Setup system stack (and get the BSS range) */
adr r0, LC0
ldmia r0, {r4, r5, sp}
ldmia r0, {r4, r5, sp}
/* Clear system BSS section (Initialize with 0) */

View file

@ -43,8 +43,6 @@
**************************************************************************/
#include <nuttx/config.h>
#include "arm_internal.h"
#include "arm_arch.h"
#include "lpc23xx_pinsel.h"
#include "lpc23xx_scb.h"
#include "lpc23xx_uart.h"
@ -52,6 +50,7 @@
/**************************************************************************
* Pre-processor Definitions
**************************************************************************/
@ //-- Pins
@ PINSEL0 |= (0x01<<4) | //-- P0.2 TXD0
@ (0x01<<6); //-- P0.3 RXD0
@ -142,6 +141,7 @@
FCR_RX_FIFO_RESET | FCR_FIFO_ENABLE)
@#define MULVAL (12 << 4)
@#define DIVADDVAL 3
/**************************************************************************
* Private Types
**************************************************************************/
@ -192,7 +192,7 @@ arm_lowputc:
/* And return */
mov pc, lr
mov pc, lr
.size arm_lowputc, . - arm_lowputc
/* This performs basic initialization of the UART. This can be called very
@ -216,14 +216,15 @@ up_lowsetup:
str r1, [r0]
/* Power Up Uart0 */
ldr r0, =UARTxPCLKSEL /* PCLKSEL0 address */
ldr r1, [r0]
ldr r2, =(~PCLKSEL_MASK)
and r1, r2
ldr r2, =(U0_PCLKSEL)
orr r1, r2
str r1, [r0]
ldr r0, =UARTxPCLKSEL /* PCLKSEL0 address */
ldr r1, [r0]
ldr r2, =(~PCLKSEL_MASK)
and r1, r2
ldr r2, =(U0_PCLKSEL)
orr r1, r2
str r1, [r0]
/* Configure parity, data bits, stop bits and set DLAB=1 */
@ -233,19 +234,19 @@ up_lowsetup:
/* Set the BAUD divisor */
mov r1, #((MULVAL << 4) | DIVADDVAL)
strb r1, [r0, #UART_FDR_OFFSET]
mov r1, #((MULVAL << 4) | DIVADDVAL)
strb r1, [r0, #UART_FDR_OFFSET]
mov r1, #DLMVAL
strb r1, [r0, #UART_DLM_OFFSET]
mov r1, #DLMVAL
strb r1, [r0, #UART_DLM_OFFSET]
mov r1, #DLLVAL
strb r1, [r0, #UART_DLL_OFFSET]
mov r1, #DLLVAL
strb r1, [r0, #UART_DLL_OFFSET]
/* Clear DLAB and Set format 8N1 */
mov r1, #LCR_VALUE
strb r1, [r0, #UART_LCR_OFFSET]
mov r1, #LCR_VALUE
strb r1, [r0, #UART_LCR_OFFSET]
/* Configure the FIFOs */

View file

@ -35,7 +35,7 @@
/* Place a branch to the real head at the entry point */
.section .text.start
.section .text.start
b __start
/* Exception Vectors like they are needed for the exception vector
@ -43,7 +43,7 @@
* linked to appear at 0x80001c
*/
.section .text.exceptions
.section .text.exceptions
_undef_instr:
b arm_vectorundefinsn
_sw_interr:

View file

@ -40,8 +40,6 @@
#include <nuttx/config.h>
#include "chip.h"
#include "arm_arch.h"
#include "arm_internal.h"
/**************************************************************************
* Pre-processor Definitions
@ -100,25 +98,25 @@ arm_lowputc:
* register.
*/
strb r0, [r2, #UART_THR]
strb r0, [r2, #UART_THR]
/* Wait for the tranmsit holding register (THR) to be
* emptied. This is determined when bit 6 of the LSR
* is set.
*/
2: ldrb r1, [r2, #UART_LSR]
tst r1, #UART_LSR_THRE
beq 2b
2: ldrb r1, [r2, #UART_LSR]
tst r1, #UART_LSR_THRE
beq 2b
/* If the character that we just sent was a linefeed,
* then send a carriage return as well.
*/
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
teq r0, #'\n'
moveq r0, #'\r'
beq 1b
/* And return */
mov pc, lr
mov pc, lr

View file

@ -23,12 +23,9 @@
****************************************************************************/
#include <nuttx/config.h> /* NuttX configuration settings */
#include <arch/board/board.h> /* Board-specific settings */
#include "arm.h" /* ARM-specific settings */
#include "chip.h" /* Chip-specific settings */
#include "arm_internal.h"
#include "arm_arch.h"
#include "arm.h" /* ARM-specific settings */
#include "chip.h" /* Chip-specific settings */
/****************************************************************************
* Pre-processor Definitions
@ -45,17 +42,17 @@
****************************************************************************/
.globl str71x_prccuinit /* Clock initialization */
.globl up_lowsetup /* Early initialization of UART */
.globl up_lowsetup /* Early initialization of UART */
#ifdef USE_EARLYSERIALINIT
.globl arm_earlyserialinit /* Early initialization of serial driver */
#endif
#ifdef CONFIG_ARCH_LEDS
.globl board_autoled_initialize /* Boot LED setup */
.globl board_autoled_initialize /* Boot LED setup */
#endif
#ifdef CONFIG_DEBUG_FEATURES
.globl arm_lowputc /* Low-level debug output */
.globl arm_lowputc /* Low-level debug output */
#endif
.globl nx_start /* NuttX entry point */
.globl nx_start /* NuttX entry point */
/****************************************************************************
* Macros
@ -104,7 +101,7 @@
# define EXTMEM_GPIO_BITSET 0x0000000f /* P2.0-3 */
#endif
ldr \base, =STR71X_GPIO_BASE ; Configure P2.0 to P2.3/7 in AF_PP mode
ldr \base, =STR71X_GPIO_BASE /* Configure P2.0 to P2.3/7 in AF_PP mode */
ldr \value, [\base, #STR71X_GPIO_PC0_OFFSET]
orr \value, \value, #EXTMEM_GPIO_BITSET
str \value, [\base, #STR71X_GPIO_PC0_OFFSET]
@ -346,12 +343,12 @@ eicloop:
/* Disable all APB1 peripherals */
ldr \value, =STR71X_APB1_APB1ALL
strh \value, [\base1, #STR71X_APB_CKDIS_OFFSET]
strh \value, [\base1, #STR71X_APB_CKDIS_OFFSET]
/* Disable all(or most) APB2 peripherals */
ldr \value, =(STR71X_APB2_APB2ALL & ~STR71X_APB2_EIC)
strh \value, [\base2, #STR71X_APB_CKDIS_OFFSET]
strh \value, [\base2, #STR71X_APB_CKDIS_OFFSET]
/* Allow EMI and USB */
@ -361,7 +358,7 @@ eicloop:
#else
ldr \value, =STR71X_RCCUPER_EMI
#endif
strh \value, [\base1, #STR71X_RCCU_PER_OFFSET]
strh \value, [\base1, #STR71X_RCCU_PER_OFFSET]
#endif
.endm
@ -381,7 +378,7 @@ eicloop:
/* Read the PCU BOOTCR register */
ldr \base, =STR71X_PCU_BASE
ldrh \value, [\base, #STR71X_PCU_BOOTCR_OFFSET]
ldrh \value, [\base, #STR71X_PCU_BOOTCR_OFFSET]
/* Mask out the old boot mode bits and set the boot mode to FLASH */
@ -390,7 +387,7 @@ eicloop:
/* Save the modified BOOTCR register */
strh \value, [\base, #STR71X_PCU_BOOTCR_OFFSET]
strh \value, [\base, #STR71X_PCU_BOOTCR_OFFSET]
.endm
/****************************************************************************
@ -413,14 +410,14 @@ eicloop:
.globl _vector_table
.type _vector_table, %function
_vector_table:
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lresethandler /* 0x00: Reset */
ldr pc, .Lundefinedhandler /* 0x04: Undefined instruction */
ldr pc, .Lswihandler /* 0x08: Software interrupt */
ldr pc, .Lswihandler /* 0x08: Software interrupt */
ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
ldr pc, .Ldataaborthandler /* 0x10: Data abort */
.long 0 /* 0x14: Reserved vector */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.long 0 /* 0x14: Reserved vector */
ldr pc, .Lirqhandler /* 0x18: IRQ */
ldr pc, .Lfiqhandler /* 0x1c: FIQ */
.globl __start
.globl arm_vectorundefinsn
@ -478,11 +475,11 @@ __flashstart:
/* Initialize the external memory interface (EMI) */
emiinit r0, r1
emiinit r0, r1
/* Initialize the enhanced interrupt controller (EIC) */
eicinit r0, r1, r2, r3
eicinit r0, r1, r2, r3
/* Disable all peripherals except EIC */
@ -490,27 +487,27 @@ __flashstart:
/* Map memory appropriately for configuration */
remap r0, r1
remap r0, r1
/* Setup system stack (and get the BSS range) */
adr r0, LC0
ldmia r0, {r4, r5, sp}
ldmia r0, {r4, r5, sp}
/* Clear system BSS section */
mov r0, #0
1: cmp r4, r5
strcc r0, [r4], #4
strcc r0, [r4], #4
bcc 1b
/* Copy system .data sections from FLASH to new home in RAM. */
adr r3, LC2
ldmia r3, {r0, r1, r2}
ldmia r3, {r0, r1, r2}
2: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
2: ldmia r0!, {r3 - r10}
stmia r1!, {r3 - r10}
cmp r1, r2
blt 2b
@ -523,7 +520,7 @@ __flashstart:
*/
bl up_lowsetup
showprogress 'A'
showprogress 'A'
/* Perform early serial initialization */
@ -532,7 +529,7 @@ __flashstart:
bl arm_earlyserialinit
#endif
showprogress 'B'
showprogress 'B'
/* Call C++ constructors */
@ -543,16 +540,16 @@ ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
stmfd sp!, {r0-r1}
mov lr, pc
mov pc, r2
ldmfd sp!, {r0-r1}
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
showprogress 'C'
showprogress 'C'
#endif
showprogress '\n'
showprogress '\n'
/* Initialize onboard LEDs */
@ -573,10 +570,10 @@ dtor_loop:
cmp r0, r1
beq dtor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
stmfd sp!, {r0-r1}
mov lr, pc
mov pc, r2
ldmfd sp!, {r0-r1}
ldmfd sp!, {r0-r1}
b dtor_loop
dtor_end:
#endif