nuttx/atomic.h:Fix missing type declarations at compile time

Summary:
  1.Modify the conditions for entering different include header files
  2.Added pre-definition for _Atomic _Bool when it is missing
  3.Added nuttx for stdatomic implementation. When toolchain does not support atomic, use lib/stdatomic to implement it

Signed-off-by: chenrun1 <chenrun1@xiaomi.com>
This commit is contained in:
chenrun1 2024-07-29 21:20:05 +08:00 committed by Xiang Xiao
parent 667e92390b
commit 8e1a042eef
10 changed files with 519 additions and 272 deletions

View file

@ -325,7 +325,6 @@ config ARCH_CHIP_RP2040
select ARCH_HAVE_TESTSET select ARCH_HAVE_TESTSET
select ARCH_HAVE_I2CRESET select ARCH_HAVE_I2CRESET
select ARM_HAVE_WFE_SEV select ARM_HAVE_WFE_SEV
select LIBC_ARCH_ATOMIC
select ARCH_HAVE_PWM_MULTICHAN select ARCH_HAVE_PWM_MULTICHAN
select ARCH_BOARD_COMMON select ARCH_BOARD_COMMON
---help--- ---help---
@ -638,7 +637,6 @@ config ARCH_CHIP_CXD56XX
config ARCH_CHIP_PHY62XX config ARCH_CHIP_PHY62XX
bool "Phyplus PHY62XX BLE" bool "Phyplus PHY62XX BLE"
select ARCH_CORTEXM0 select ARCH_CORTEXM0
select LIBC_ARCH_ATOMIC
---help--- ---help---
Phyplus PHY62XX architectures (ARM Cortex-M0). Phyplus PHY62XX architectures (ARM Cortex-M0).
@ -646,7 +644,6 @@ config ARCH_CHIP_TLSR82
bool "Telink TLSR82XX" bool "Telink TLSR82XX"
select ARCH_ARMV6M select ARCH_ARMV6M
select ARCH_HAVE_RESET select ARCH_HAVE_RESET
select LIBC_ARCH_ATOMIC
---help--- ---help---
Telink tlsr82xx architectures (Customed armv6m) Telink tlsr82xx architectures (Customed armv6m)

View file

@ -73,7 +73,6 @@ config ARCH_CHIP_ESP32C3
select ARCH_VECNOTIRQ select ARCH_VECNOTIRQ
select ARCH_HAVE_MPU select ARCH_HAVE_MPU
select ARCH_HAVE_RESET select ARCH_HAVE_RESET
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY select LIBC_ARCH_MEMCPY
select LIBC_ARCH_MEMCHR select LIBC_ARCH_MEMCHR
select LIBC_ARCH_MEMCMP select LIBC_ARCH_MEMCMP

View file

@ -22,7 +22,6 @@ config ARCH_CHIP_ESP32
select ARCH_HAVE_TEXT_HEAP select ARCH_HAVE_TEXT_HEAP
select ARCH_VECNOTIRQ select ARCH_VECNOTIRQ
select LIBC_PREVENT_STRING_KERNEL select LIBC_PREVENT_STRING_KERNEL
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY if BUILD_FLAT select LIBC_ARCH_MEMCPY if BUILD_FLAT
select LIBC_ARCH_MEMCHR if BUILD_FLAT select LIBC_ARCH_MEMCHR if BUILD_FLAT
select LIBC_ARCH_MEMCMP if BUILD_FLAT select LIBC_ARCH_MEMCMP if BUILD_FLAT
@ -55,7 +54,6 @@ config ARCH_CHIP_ESP32S2
select ARCH_HAVE_RESET select ARCH_HAVE_RESET
select ARCH_HAVE_TEXT_HEAP select ARCH_HAVE_TEXT_HEAP
select ARCH_VECNOTIRQ select ARCH_VECNOTIRQ
select LIBC_ARCH_ATOMIC
select LIBC_ARCH_MEMCPY select LIBC_ARCH_MEMCPY
select LIBC_ARCH_MEMCHR select LIBC_ARCH_MEMCHR
select LIBC_ARCH_MEMCMP select LIBC_ARCH_MEMCMP

View file

@ -25,22 +25,59 @@
* Included Files * Included Files
****************************************************************************/ ****************************************************************************/
#if !defined(__cplusplus) || defined(__clang__) #ifdef __has_include
# include <stdatomic.h> # if defined(__cplusplus) && __has_include(<atomic>)
#elif defined(__has_include) && __has_include(<atomic>)
extern "C++" extern "C++"
{ {
# include <atomic> # include <atomic>
# define ATOMIC_VAR_INIT(value) (value)
using std::atomic_bool;
using std::atomic_char;
using std::atomic_schar;
using std::atomic_uchar;
using std::atomic_short;
using std::atomic_ushort;
using std::atomic_int; using std::atomic_int;
using std::atomic_uint; using std::atomic_uint;
using std::atomic_ushort; using std::atomic_long;
using std::atomic_ulong;
using std::atomic_llong;
using std::atomic_ullong;
using std::atomic_load; using std::atomic_load;
using std::atomic_load_explicit;
using std::atomic_store; using std::atomic_store;
using std::atomic_fetch_add; using std::atomic_store_explicit;
using std::atomic_fetch_sub; using std::atomic_exchange;
using std::atomic_exchange_explicit;
using std::atomic_compare_exchange_strong; using std::atomic_compare_exchange_strong;
# define ATOMIC_VAR_INIT(value) (value) using std::atomic_compare_exchange_strong_explicit;
using std::atomic_compare_exchange_weak;
using std::atomic_compare_exchange_weak_explicit;
using std::atomic_fetch_add;
using std::atomic_fetch_add_explicit;
using std::atomic_fetch_sub;
using std::atomic_fetch_sub_explicit;
using std::atomic_fetch_and;
using std::atomic_fetch_and_explicit;
using std::atomic_fetch_or;
using std::atomic_fetch_or_explicit;
using std::atomic_fetch_xor;
using std::atomic_fetch_xor_explicit;
} }
# elif __has_include(<stdatomic.h>)
# if !(__clang__) && defined(__cplusplus)
# define _Atomic
# endif
# include <stdbool.h>
# include <stdatomic.h>
# else
# include <nuttx/lib/stdatomic.h>
# endif
#else
# include <nuttx/lib/stdatomic.h>
#endif #endif
/**************************************************************************** /****************************************************************************

View file

@ -0,0 +1,240 @@
/****************************************************************************
* include/nuttx/lib/stdatomic.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __INCLUDE_NUTTX_LIB_STDATOMIC_H
#define __INCLUDE_NUTTX_LIB_STDATOMIC_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <stdint.h>
#include <stdbool.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#ifndef __ATOMIC_RELAXED
# define __ATOMIC_RELAXED 0
#endif
#ifndef __ATOMIC_CONSUM
# define __ATOMIC_CONSUME 1
#endif
#ifndef __ATOMIC_ACQUIR
# define __ATOMIC_ACQUIRE 2
#endif
#ifndef __ATOMIC_RELEAS
# define __ATOMIC_RELEASE 3
#endif
#ifndef __ATOMIC_ACQ_REL
# define __ATOMIC_ACQ_REL 4
#endif
#ifndef __ATOMIC_SEQ_CS
# define __ATOMIC_SEQ_CST 5
#endif
#define ATOMIC_VAR_INIT(value) (value)
#define atomic_store_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_store_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_store_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_store_4(obj, val, type) : \
__atomic_store_8(obj, val, type))
#define atomic_store(obj, val) atomic_store_n(obj, val, __ATOMIC_RELAXED)
#define atomic_store_explicit(obj, val, type) atomic_store_n(obj, val, type)
#define atomic_init(obj, val) atomic_store(obj, val)
#define atomic_load_n(obj, type) \
(sizeof(*(obj)) == 1 ? __atomic_load_1(obj, type) : \
sizeof(*(obj)) == 2 ? __atomic_load_2(obj, type) : \
sizeof(*(obj)) == 4 ? __atomic_load_4(obj, type) : \
__atomic_load_8(obj, type))
#define atomic_load(obj) atomic_load_n(obj, __ATOMIC_RELAXED)
#define atomic_load_explicit(obj, type) atomic_load_n(obj, type)
#define atomic_exchange_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_exchange_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_exchange_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_exchange_4(obj, val, type) : \
__atomic_exchange_8(obj, val, type))
#define atomic_exchange(obj, val) atomic_exchange_n(obj, val, __ATOMIC_RELAXED)
#define atomic_exchange_explicit(obj, val, type) atomic_exchange_n(obj, val, type)
#define atomic_compare_exchange_n(obj, expected, desired, weak, success, failure) \
(sizeof(*(obj)) == 1 ? __atomic_compare_exchange_1(obj, expected, desired, weak, success, failure) : \
sizeof(*(obj)) == 2 ? __atomic_compare_exchange_2(obj, expected, desired, weak, success, failure) : \
sizeof(*(obj)) == 4 ? __atomic_compare_exchange_4(obj, expected, desired, weak, success, failure) : \
__atomic_compare_exchange_8(obj, expected, desired, weak, success, failure))
#define atomic_compare_exchange_strong(obj, expected, desired) \
atomic_compare_exchange_n(obj, expected, desired, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
#define atomic_compare_exchange_strong_explicit(obj, expected, desired, success, failure) \
atomic_compare_exchange_n(obj, expected, desired, false, success, failure)
#define atomic_compare_exchange_weak(obj, expected, desired) \
atomic_compare_exchange_n(obj, expected, desired, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
#define atomic_compare_exchange_weak_explicit(obj, expected, desired, success, failure) \
atomic_compare_exchange_n(obj, expected, desired, true, success, failure)
#define atomic_fetch_or_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_or_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_or_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_or_4(obj, val, type) : \
__atomic_fetch_or_8(obj, val, type))
#define atomic_fetch_or(obj, val) atomic_fetch_or_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_or_explicit(obj, val, type) atomic_fetch_or_n(obj, val, type)
#define atomic_fetch_and_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_and_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_and_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_and_4(obj, val, type) : \
__atomic_fetch_and_8(obj, val, type))
#define atomic_fetch_and(obj, val) atomic_fetch_and_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_and_explicit(obj, val, type) atomic_fetch_and_n(obj, val, type)
#define atomic_fetch_xor_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_xor_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_xor_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_xor_4(obj, val, type) : \
__atomic_fetch_xor_8(obj, val, type))
#define atomic_fetch_xor(obj, val) atomic_fetch_xor_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_xor_explicit(obj, val, type) atomic_fetch_xor_n(obj, val, type)
#define atomic_fetch_add_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_add_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_add_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_add_4(obj, val, type) : \
__atomic_fetch_add_8(obj, val, type))
#define atomic_fetch_add(obj, val) atomic_fetch_add_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_add_explicit(obj, val, type) atomic_fetch_add_n(obj, val, type)
#define atomic_fetch_sub_n(obj, val, type) \
(sizeof(*(obj)) == 1 ? __atomic_fetch_sub_1(obj, val, type) : \
sizeof(*(obj)) == 2 ? __atomic_fetch_sub_2(obj, val, type) : \
sizeof(*(obj)) == 4 ? __atomic_fetch_sub_4(obj, val, type) : \
__atomic_fetch_sub_8(obj, val, type))
#define atomic_fetch_sub(obj, val) atomic_fetch_sub_n(obj, val, __ATOMIC_RELAXED)
#define atomic_fetch_sub_explicit(obj, val, type) atomic_fetch_sub_n(obj, val, type)
/****************************************************************************
* Public Types
****************************************************************************/
typedef volatile bool atomic_bool;
typedef volatile char atomic_char;
typedef volatile signed char atomic_schar;
typedef volatile unsigned char atomic_uchar;
typedef volatile short atomic_short;
typedef volatile unsigned short atomic_ushort;
typedef volatile int atomic_int;
typedef volatile unsigned int atomic_uint;
typedef volatile long atomic_long;
typedef volatile unsigned long atomic_ulong;
typedef volatile long long atomic_llong;
typedef volatile unsigned long long atomic_ullong;
typedef volatile wchar_t atomic_wchar_t;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
void __atomic_store_1(FAR volatile void *ptr, uint8_t value, int memorder);
void __atomic_store_2(FAR volatile void *ptr, uint16_t value, int memorder);
void __atomic_store_4(FAR volatile void *ptr, uint32_t value, int memorder);
void __atomic_store_8(FAR volatile void *ptr, uint64_t value, int memorder);
uint8_t __atomic_load_1(FAR const volatile void *ptr, int memorder);
uint16_t __atomic_load_2(FAR const volatile void *ptr, int memorder);
uint32_t __atomic_load_4(FAR const volatile void *ptr, int memorder);
uint64_t __atomic_load_8(FAR const volatile void *ptr, int memorder);
uint8_t __atomic_exchange_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_exchange_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_exchange_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_exchange_8(FAR volatile void *ptr, uint64_t value,
int memorder);
bool __atomic_compare_exchange_1(FAR volatile void *mem, FAR void *expect,
uint8_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_2(FAR volatile void *mem, FAR void *expect,
uint16_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_4(FAR volatile void *mem, FAR void *expect,
uint32_t desired, bool weak, int success,
int failure);
bool __atomic_compare_exchange_8(FAR volatile void *mem, FAR void *expect,
uint64_t desired, bool weak, int success,
int failure);
uint8_t __atomic_fetch_add_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_add_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_add_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_add_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_sub_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_sub_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_sub_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_sub_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_and_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_and_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_and_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_and_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_or_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_or_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_or_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_or_8(FAR volatile void *ptr, uint64_t value,
int memorder);
uint8_t __atomic_fetch_xor_1(FAR volatile void *ptr, uint8_t value,
int memorder);
uint16_t __atomic_fetch_xor_2(FAR volatile void *ptr, uint16_t value,
int memorder);
uint32_t __atomic_fetch_xor_4(FAR volatile void *ptr, uint32_t value,
int memorder);
uint64_t __atomic_fetch_xor_8(FAR volatile void *ptr, uint64_t value,
int memorder);
#endif /* __INCLUDE_NUTTX_LIB_STDATOMIC_H */

View file

@ -78,6 +78,8 @@
# define false (bool)0 # define false (bool)0
# define __bool_true_false_are_defined 1 # define __bool_true_false_are_defined 1
# else
# define _Bool uint8_t
# endif /* __cplusplus */ # endif /* __cplusplus */
# endif /* CONFIG_ARCH_STDBOOL_H */ # endif /* CONFIG_ARCH_STDBOOL_H */

View file

@ -20,6 +20,4 @@
add_subdirectory(${CONFIG_ARCH}) add_subdirectory(${CONFIG_ARCH})
if(CONFIG_LIBC_ARCH_ATOMIC) target_sources(c PRIVATE arch_atomic.c)
target_sources(c PRIVATE arch_atomic.c)
endif()

View file

@ -44,10 +44,6 @@ config ARCH_ROMGETC
# Default settings for C library functions that may be replaced with # Default settings for C library functions that may be replaced with
# architecture-specific versions. # architecture-specific versions.
config LIBC_ARCH_ATOMIC
bool
default n
config LIBC_ARCH_MEMCHR config LIBC_ARCH_MEMCHR
bool bool
default n default n

View file

@ -18,9 +18,7 @@
# #
############################################################################ ############################################################################
ifeq ($(CONFIG_LIBC_ARCH_ATOMIC),y) CSRCS += arch_atomic.c
CSRCS += arch_atomic.c
endif
ifeq ($(CONFIG_ARCH_ARM),y) ifeq ($(CONFIG_ARCH_ARM),y)
include $(TOPDIR)/libs/libc/machine/arm/Make.defs include $(TOPDIR)/libs/libc/machine/arm/Make.defs

View file

@ -32,286 +32,268 @@
* Pre-processor Definitions * Pre-processor Definitions
****************************************************************************/ ****************************************************************************/
#define STORE(n, type) \ #define STORE(n, type) \
\ \
void __atomic_store_ ## n (FAR volatile void *ptr, \ void weak_function __atomic_store_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ \
\ *(FAR type *)ptr = value; \
*(FAR type *)ptr = value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \
} }
#define LOAD(n, type) \ #define LOAD(n, type) \
\ \
type __atomic_load_ ## n (FAR const volatile void *ptr, \ type weak_function __atomic_load_##n (FAR const volatile void *ptr, \
int memorder) \ int memorder) \
{ \ { \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
\ \
type ret = *(FAR type *)ptr; \ type ret = *(FAR type *)ptr; \
\ \
spin_unlock_irqrestore(NULL, irqstate); \ spin_unlock_irqrestore(NULL, irqstate); \
return ret; \ return ret; \
} }
#define EXCHANGE(n, type) \ #define EXCHANGE(n, type) \
\ \
type __atomic_exchange_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_exchange_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ type ret = *tmp; \
type ret = *tmp; \ *tmp = value; \
*tmp = value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define CMP_EXCHANGE(n, type) \ #define CMP_EXCHANGE(n, type) \
\ \
bool __atomic_compare_exchange_ ## n ( \ bool weak_function __atomic_compare_exchange_##n (FAR volatile void *mem, \
FAR volatile void *mem, \ FAR void *expect, \
FAR void *expect, \ type desired, bool weak, \
type desired, \ int success, int failure) \
bool weak, \ { \
int success, \ bool ret = false; \
int failure) \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
{ \ FAR type *tmpmem = (FAR type *)mem; \
bool ret = false; \ FAR type *tmpexp = (FAR type *)expect; \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ \
FAR type *tmpmem = (FAR type *)mem; \ if (*tmpmem == *tmpexp) \
FAR type *tmpexp = (FAR type *)expect; \ { \
\ ret = true; \
if (*tmpmem == *tmpexp) \ *tmpmem = desired; \
{ \ } \
ret = true; \ else \
*tmpmem = desired; \ { \
} \ *tmpexp = *tmpmem; \
else \ } \
{ \ \
*tmpexp = *tmpmem; \ spin_unlock_irqrestore(NULL, irqstate); \
} \ return ret; \
\
spin_unlock_irqrestore(NULL, irqstate); \
return ret; \
} }
#define FETCH_ADD(n, type) \ #define FETCH_ADD(n, type) \
\ \
type __atomic_fetch_add_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_fetch_add_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ *tmp = *tmp + value; \
*tmp = *tmp + value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define FETCH_SUB(n, type) \ #define FETCH_SUB(n, type) \
\ \
type __atomic_fetch_sub_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_fetch_sub_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ *tmp = *tmp - value; \
*tmp = *tmp - value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define FETCH_AND(n, type) \ #define FETCH_AND(n, type) \
\ \
type __atomic_fetch_and_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_fetch_and_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ *tmp = *tmp & value; \
*tmp = *tmp & value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define FETCH_OR(n, type) \ #define FETCH_OR(n, type) \
\ \
type __atomic_fetch_or_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_fetch_or_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ *tmp = *tmp | value; \
*tmp = *tmp | value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define FETCH_XOR(n, type) \ #define FETCH_XOR(n, type) \
\ \
type __atomic_fetch_xor_ ## n (FAR volatile void *ptr, \ type weak_function __atomic_fetch_xor_##n (FAR volatile void *ptr, \
type value, \ type value, int memorder) \
int memorder) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ *tmp = *tmp ^ value; \
*tmp = *tmp ^ value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define SYNC_ADD_FETCH(n, type) \ #define SYNC_ADD_FETCH(n, type) \
\ \
type __sync_add_and_fetch_ ## n ( \ type weak_function __sync_add_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = *tmp + value; \
*tmp = *tmp + value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_SUB_FETCH(n, type) \ #define SYNC_SUB_FETCH(n, type) \
\ \
type __sync_sub_and_fetch_ ## n ( \ type weak_function __sync_sub_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = *tmp - value; \
*tmp = *tmp - value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_OR_FETCH(n, type) \ #define SYNC_OR_FETCH(n, type) \
\ \
type __sync_or_and_fetch_ ## n ( \ type weak_function __sync_or_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = *tmp | value; \
*tmp = *tmp | value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_AND_FETCH(n, type) \ #define SYNC_AND_FETCH(n, type) \
\ \
type __sync_and_and_fetch_ ## n ( \ type weak_function __sync_and_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = *tmp & value; \
*tmp = *tmp & value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_XOR_FETCH(n, type) \ #define SYNC_XOR_FETCH(n, type) \
\ \
type __sync_xor_and_fetch_ ## n ( \ type weak_function __sync_xor_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = *tmp ^ value; \
*tmp = *tmp ^ value; \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_NAND_FETCH(n, type) \ #define SYNC_NAND_FETCH(n, type) \
\ \
type __sync_nand_and_fetch_ ## n ( \ type weak_function __sync_nand_and_fetch_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type value) \
type value) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ *tmp = ~(*tmp & value); \
*tmp = ~(*tmp & value); \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return *tmp; \
return *tmp; \
} }
#define SYNC_BOOL_CMP_SWAP(n, type) \ #define SYNC_BOOL_CMP_SWAP(n, type) \
\ \
bool __sync_bool_compare_and_swap_ ## n ( \ bool weak_function __sync_bool_compare_and_swap_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type oldvalue, \
type oldvalue, \ type newvalue) \
type newvalue) \ { \
{ \ bool ret = false; \
bool ret = false; \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ \
\ if (*tmp == oldvalue) \
if (*tmp == oldvalue) \ { \
{ \ ret = true; \
ret = true; \ *tmp = newvalue; \
*tmp = newvalue; \ } \
} \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
#define SYNC_VAL_CMP_SWAP(n, type) \ #define SYNC_VAL_CMP_SWAP(n, type) \
\ \
type __sync_val_compare_and_swap_ ## n ( \ type weak_function __sync_val_compare_and_swap_##n (FAR volatile void *ptr, \
FAR volatile void *ptr, \ type oldvalue, \
type oldvalue, \ type newvalue) \
type newvalue) \ { \
{ \ irqstate_t irqstate = spin_lock_irqsave(NULL); \
irqstate_t irqstate = spin_lock_irqsave(NULL); \ FAR type *tmp = (FAR type *)ptr; \
FAR type *tmp = (FAR type *)ptr; \ type ret = *tmp; \
type ret = *tmp; \ \
\ if (*tmp == oldvalue) \
if (*tmp == oldvalue) \ { \
{ \ *tmp = newvalue; \
*tmp = newvalue; \ } \
} \ \
\ spin_unlock_irqrestore(NULL, irqstate); \
spin_unlock_irqrestore(NULL, irqstate); \ return ret; \
return ret; \
} }
/**************************************************************************** /****************************************************************************