nuttx/atomic.h:Fix missing type declarations at compile time

Summary:
  1.Modify the conditions for entering different include header files
  2.Added pre-definition for _Atomic _Bool when it is missing
  3.Added nuttx for stdatomic implementation. When toolchain does not support atomic, use lib/stdatomic to implement it

Signed-off-by: chenrun1 <chenrun1@xiaomi.com>
This commit is contained in:
chenrun1 2024-07-29 21:20:05 +08:00 committed by Xiang Xiao
parent 667e92390b
commit 8e1a042eef
10 changed files with 519 additions and 272 deletions

View file

@ -325,7 +325,6 @@ config ARCH_CHIP_RP2040
select ARCH_HAVE_TESTSET
select ARCH_HAVE_I2CRESET
select ARM_HAVE_WFE_SEV
select LIBC_ARCH_ATOMIC
select ARCH_HAVE_PWM_MULTICHAN
select ARCH_BOARD_COMMON
---help---
@ -638,7 +637,6 @@ config ARCH_CHIP_CXD56XX
config ARCH_CHIP_PHY62XX
bool "Phyplus PHY62XX BLE"
select ARCH_CORTEXM0
select LIBC_ARCH_ATOMIC
---help---
Phyplus PHY62XX architectures (ARM Cortex-M0).
@ -646,7 +644,6 @@ config ARCH_CHIP_TLSR82
bool "Telink TLSR82XX"
select ARCH_ARMV6M
select ARCH_HAVE_RESET
select LIBC_ARCH_ATOMIC
---help---
Telink tlsr82xx architectures (Customed armv6m)

View file

@ -73,7 +73,6 @@ config ARCH_CHIP_ESP32C3
select ARCH_VECNOTIRQ
select ARCH_HAVE_MPU
select ARCH_HAVE_RESET
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY
select LIBC_ARCH_MEMCHR
select LIBC_ARCH_MEMCMP

View file

@ -22,7 +22,6 @@ config ARCH_CHIP_ESP32
select ARCH_HAVE_TEXT_HEAP
select ARCH_VECNOTIRQ
select LIBC_PREVENT_STRING_KERNEL
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY if BUILD_FLAT
select LIBC_ARCH_MEMCHR if BUILD_FLAT
select LIBC_ARCH_MEMCMP if BUILD_FLAT
@ -55,7 +54,6 @@ config ARCH_CHIP_ESP32S2
select ARCH_HAVE_RESET
select ARCH_HAVE_TEXT_HEAP
select ARCH_VECNOTIRQ
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY
select LIBC_ARCH_MEMCHR
select LIBC_ARCH_MEMCMP

View file

@ -25,22 +25,59 @@
* Included Files
****************************************************************************/
#if !defined(__cplusplus) || defined(__clang__)
# include <stdatomic.h>
#elif defined(__has_include) && __has_include(<atomic>)
#ifdef __has_include
# if defined(__cplusplus) && __has_include(<atomic>)
extern "C++"
{
# include <atomic>
# define ATOMIC_VAR_INIT(value) (value)
using std::atomic_bool;
using std::atomic_char;
using std::atomic_schar;
using std::atomic_uchar;
using std::atomic_short;
using std::atomic_ushort;
using std::atomic_int;
using std::atomic_uint;
using std::atomic_ushort;
using std::atomic_long;
using std::atomic_ulong;
using std::atomic_llong;
using std::atomic_ullong;
using std::atomic_load;
using std::atomic_load_explicit;
using std::atomic_store;
using std::atomic_fetch_add;
using std::atomic_fetch_sub;
using std::atomic_store_explicit;
using std::atomic_exchange;
using std::atomic_exchange_explicit;
using std::atomic_compare_exchange_strong;
# define ATOMIC_VAR_INIT(value) (value)
using std::atomic_compare_exchange_strong_explicit;
using std::atomic_compare_exchange_weak;
using std::atomic_compare_exchange_weak_explicit;
using std::atomic_fetch_add;
using std::atomic_fetch_add_explicit;
using std::atomic_fetch_sub;
using std::atomic_fetch_sub_explicit;
using std::atomic_fetch_and;
using std::atomic_fetch_and_explicit;
using std::atomic_fetch_or;
using std::atomic_fetch_or_explicit;
using std::atomic_fetch_xor;
using std::atomic_fetch_xor_explicit;
}
# elif __has_include(<stdatomic.h>)
# if !(__clang__) && defined(__cplusplus)
# define _Atomic
# endif
# include <stdbool.h>
# include <stdatomic.h>
# else
# include <nuttx/lib/stdatomic.h>
# endif
#else
# include <nuttx/lib/stdatomic.h>
#endif
/****************************************************************************

View file

@ -0,0 +1,240 @@
/****************************************************************************
* include/nuttx/lib/stdatomic.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __INCLUDE_NUTTX_LIB_STDATOMIC_H
#define __INCLUDE_NUTTX_LIB_STDATOMIC_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <stdint.h>
#include <stdbool.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#ifndef __ATOMIC_RELAXED
# define __ATOMIC_RELAXED 0
#endif
#ifndef __ATOMIC_CONSUM
# define __ATOMIC_CONSUME 1
#endif
#ifndef __ATOMIC_ACQUIR
# define __ATOMIC_ACQUIRE 2
#endif
#ifndef __ATOMIC_RELEAS
# define __ATOMIC_RELEASE 3
#endif
#ifndef __ATOMIC_ACQ_REL
# define __ATOMIC_ACQ_REL 4
#endif
#ifndef __ATOMIC_SEQ_CS
# define __ATOMIC_SEQ_CST 5
#endif
#define ATOMIC_VAR_INIT(value) (value)
#define atomic_store_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_store_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_store_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_store_4(obj, val, type) : \
__atomic_store_8(obj, val, type))
#define atomic_store(obj, val) atomic_store_n(obj, val, __ATOMIC_RELAXED)
#define atomic_store_explicit(obj, val, type) atomic_store_n(obj, val, type)
#define atomic_init(obj, val) atomic_store(obj, val)
#define atomic_load_n(obj, type) \
(sizeof(*(obj)) == 1 ? __atomic_load_1(obj, type) : \
sizeof(*(obj)) == 2 ? __atomic_load_2(obj, type) : \
sizeof(*(obj)) == 4 ? __atomic_load_4(obj, type) : \
__atomic_load_8(obj, type))
#define atomic_load(obj) atomic_load_n(obj, __ATOMIC_RELAXED)
#define atomic_load_explicit(obj, type) atomic_load_n(obj, type)
#define atomic_exchange_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_exchange_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_exchange_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_exchange_4(obj, val, type) : \
__atomic_exchange_8(obj, val, type))
#define atomic_exchange(obj, val) atomic_exchange_n(obj, val, __ATOMIC_RELAXED)
#define atomic_exchange_explicit(obj, val, type) atomic_exchange_n(obj, val, type)
#define atomic_compare_exchange_n(obj, expected, desired, weak, success, failure) \
(sizeof(*(obj)) == 1 ? __atomic_compare_exchange_1(obj, expected, desired, weak, success, failure) : \
sizeof(*(obj)) == 2 ? __atomic_compare_exchange_2(obj, expected, desired, weak, success, failure) : \
sizeof(*(obj)) == 4 ? __atomic_compare_exchange_4(obj, expected, desired, weak, success, failure) : \
__atomic_compare_exchange_8(obj, expected, desired, weak, success, failure))
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_n(obj, expected, desired, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, success, failure) \
atomic_compare_exchange_n(obj, expected, desired, false, success, failure)
#define atomic_compare_exchange_weak(obj, expected, desired) \
atomic_compare_exchange_n(obj, expected, desired, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, success, failure) \
atomic_compare_exchange_n(obj, expected, desired, true, success, failure)
#define atomic_fetch_or_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_or_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_or_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_or_4(obj, val, type) : \
__atomic_fetch_or_8(obj, val, type))
#define atomic_fetch_or(obj, val) atomic_fetch_or_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_or_explicit(obj, val, type) atomic_fetch_or_n(obj, val, type)
#define atomic_fetch_and_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_and_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_and_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_and_4(obj, val, type) : \
__atomic_fetch_and_8(obj, val, type))
#define atomic_fetch_and(obj, val) atomic_fetch_and_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_and_explicit(obj, val, type) atomic_fetch_and_n(obj, val, type)
#define atomic_fetch_xor_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_xor_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_xor_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_xor_4(obj, val, type) : \
__atomic_fetch_xor_8(obj, val, type))
#define atomic_fetch_xor(obj, val) atomic_fetch_xor_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_xor_explicit(obj, val, type) atomic_fetch_xor_n(obj, val, type)
#define atomic_fetch_add_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_add_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_add_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_add_4(obj, val, type) : \
__atomic_fetch_add_8(obj, val, type))
#define atomic_fetch_add(obj, val) atomic_fetch_add_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_add_explicit(obj, val, type) atomic_fetch_add_n(obj, val, type)
#define atomic_fetch_sub_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_sub_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_sub_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_sub_4(obj, val, type) : \
__atomic_fetch_sub_8(obj, val, type))
#define atomic_fetch_sub(obj, val) atomic_fetch_sub_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_sub_explicit(obj, val, type) atomic_fetch_sub_n(obj, val, type)
/****************************************************************************
* Public Types
****************************************************************************/
typedef volatile bool atomic_bool;
typedef volatile char atomic_char;
typedef volatile signed char atomic_schar;
typedef volatile unsigned char atomic_uchar;
typedef volatile short atomic_short;
typedef volatile unsigned short atomic_ushort;
typedef volatile int atomic_int;
typedef volatile unsigned int atomic_uint;
typedef volatile long atomic_long;
typedef volatile unsigned long atomic_ulong;
typedef volatile long long atomic_llong;
typedef volatile unsigned long long atomic_ullong;
typedef volatile wchar_t atomic_wchar_t;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
void __atomic_store_1(FAR volatile void *ptr, uint8_t value, int memorder);
void __atomic_store_2(FAR volatile void *ptr, uint16_t value, int memorder);
void __atomic_store_4(FAR volatile void *ptr, uint32_t value, int memorder);
void __atomic_store_8(FAR volatile void *ptr, uint64_t value, int memorder);
uint8_t __atomic_load_1(FAR const volatile void *ptr, int memorder);
uint16_t __atomic_load_2(FAR const volatile void *ptr, int memorder);
uint32_t __atomic_load_4(FAR const volatile void *ptr, int memorder);
uint64_t __atomic_load_8(FAR const volatile void *ptr, int memorder);
uint8_t __atomic_exchange_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_exchange_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_exchange_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_exchange_8(FAR volatile void *ptr, uint64_t value,
int memorder);
bool __atomic_compare_exchange_1(FAR volatile void *mem, FAR void *expect,
uint8_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_2(FAR volatile void *mem, FAR void *expect,
uint16_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_4(FAR volatile void *mem, FAR void *expect,
uint32_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_8(FAR volatile void *mem, FAR void *expect,
uint64_t desired, bool weak, int success,
int failure);
uint8_t __atomic_fetch_add_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_add_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_add_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_add_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_sub_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_sub_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_sub_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_sub_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_and_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_and_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_and_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_and_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_or_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_or_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_or_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_or_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_xor_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_xor_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_xor_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_xor_8(FAR volatile void *ptr, uint64_t value,
int memorder);
#endif /* __INCLUDE_NUTTX_LIB_STDATOMIC_H */

View file

@ -78,6 +78,8 @@
# define false (bool)0
# define __bool_true_false_are_defined 1
# else
# define _Bool uint8_t
# endif /* __cplusplus */
# endif /* CONFIG_ARCH_STDBOOL_H */

View file

@ -20,6 +20,4 @@
add_subdirectory(${CONFIG_ARCH})
if(CONFIG_LIBC_ARCH_ATOMIC)
target_sources(c PRIVATE arch_atomic.c)
endif()
target_sources(c PRIVATE arch_atomic.c)

View file

@ -44,10 +44,6 @@ config ARCH_ROMGETC
# Default settings for C library functions that may be replaced with
# architecture-specific versions.
config LIBC_ARCH_ATOMIC
bool
default n
config LIBC_ARCH_MEMCHR
bool
default n

View file

@ -18,9 +18,7 @@
#
############################################################################
ifeq ($(CONFIG_LIBC_ARCH_ATOMIC),y)
CSRCS += arch_atomic.c
endif
CSRCS += arch_atomic.c
ifeq ($(CONFIG_ARCH_ARM),y)
include $(TOPDIR)/libs/libc/machine/arm/Make.defs

View file

@ -34,9 +34,8 @@
#define STORE(n, type) \
\
void __atomic_store_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
void weak_function __atomic_store_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
\
@ -47,7 +46,7 @@
#define LOAD(n, type) \
\
type __atomic_load_ ## n (FAR const volatile void *ptr, \
type weak_function __atomic_load_##n (FAR const volatile void *ptr, \
int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -60,9 +59,8 @@
#define EXCHANGE(n, type) \
\
type __atomic_exchange_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_exchange_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -76,13 +74,10 @@
#define CMP_EXCHANGE(n, type) \
\
bool __atomic_compare_exchange_ ## n ( \
FAR volatile void *mem, \
bool weak_function __atomic_compare_exchange_##n (FAR volatile void *mem, \
FAR void *expect, \
type desired, \
bool weak, \
int success, \
int failure) \
type desired, bool weak, \
int success, int failure) \
{ \
bool ret = false; \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -105,9 +100,8 @@
#define FETCH_ADD(n, type) \
\
type __atomic_fetch_add_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_fetch_add_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -121,9 +115,8 @@
#define FETCH_SUB(n, type) \
\
type __atomic_fetch_sub_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_fetch_sub_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -137,9 +130,8 @@
#define FETCH_AND(n, type) \
\
type __atomic_fetch_and_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_fetch_and_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -153,9 +145,8 @@
#define FETCH_OR(n, type) \
\
type __atomic_fetch_or_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_fetch_or_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -169,9 +160,8 @@
#define FETCH_XOR(n, type) \
\
type __atomic_fetch_xor_ ## n (FAR volatile void *ptr, \
type value, \
int memorder) \
type weak_function __atomic_fetch_xor_##n (FAR volatile void *ptr, \
type value, int memorder) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
FAR type *tmp = (FAR type *)ptr; \
@ -185,8 +175,7 @@
#define SYNC_ADD_FETCH(n, type) \
\
type __sync_add_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_add_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -200,8 +189,7 @@
#define SYNC_SUB_FETCH(n, type) \
\
type __sync_sub_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_sub_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -215,8 +203,7 @@
#define SYNC_OR_FETCH(n, type) \
\
type __sync_or_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_or_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -230,8 +217,7 @@
#define SYNC_AND_FETCH(n, type) \
\
type __sync_and_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_and_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -245,8 +231,7 @@
#define SYNC_XOR_FETCH(n, type) \
\
type __sync_xor_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_xor_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -260,8 +245,7 @@
#define SYNC_NAND_FETCH(n, type) \
\
type __sync_nand_and_fetch_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_nand_and_fetch_##n (FAR volatile void *ptr, \
type value) \
{ \
irqstate_t irqstate = spin_lock_irqsave(NULL); \
@ -275,8 +259,7 @@
#define SYNC_BOOL_CMP_SWAP(n, type) \
\
bool __sync_bool_compare_and_swap_ ## n ( \
FAR volatile void *ptr, \
bool weak_function __sync_bool_compare_and_swap_##n (FAR volatile void *ptr, \
type oldvalue, \
type newvalue) \
{ \
@ -296,8 +279,7 @@
#define SYNC_VAL_CMP_SWAP(n, type) \
\
type __sync_val_compare_and_swap_ ## n ( \
FAR volatile void *ptr, \
type weak_function __sync_val_compare_and_swap_##n (FAR volatile void *ptr, \
type oldvalue, \
type newvalue) \
{ \