2019-11-03 01:30:33 +08:00
|
|
|
#
|
|
|
|
# For a description of the syntax of this configuration file,
|
|
|
|
# see the file kconfig-language.txt in the NuttX tools repository.
|
|
|
|
#
|
|
|
|
|
2020-02-15 21:38:35 +08:00
|
|
|
menuconfig RPTUN
|
|
|
|
bool "Remote Proc Tunnel Driver Support"
|
|
|
|
default n
|
2023-12-12 10:53:45 +08:00
|
|
|
select RPMSG
|
2020-02-15 21:38:35 +08:00
|
|
|
---help---
|
|
|
|
RPTUN driver is used for multi-cores' communication.
|
|
|
|
|
2020-02-08 15:30:09 +08:00
|
|
|
if RPTUN
|
|
|
|
|
2023-08-10 16:06:57 +08:00
|
|
|
config RPTUN_SECURE
|
|
|
|
bool "rptun secure support"
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
This is a rptun driver for communications between secure (TEE)
|
|
|
|
and non-secure (REE) environments. With this driver, REE and
|
|
|
|
TEE and communicate with each other by using the native rpmsg
|
|
|
|
or various Rpmsg services have been implemented in NuttX.
|
|
|
|
|
2024-02-18 15:31:47 +08:00
|
|
|
config RPTUN_IVSHMEM
|
|
|
|
bool "rptun ivshmem support"
|
|
|
|
default n
|
2024-05-13 17:00:51 +08:00
|
|
|
depends on PCI_IVSHMEM
|
2024-02-18 15:31:47 +08:00
|
|
|
---help---
|
|
|
|
This is rptun driver based on pci ivshmem.
|
|
|
|
|
|
|
|
if RPTUN_IVSHMEM
|
|
|
|
|
|
|
|
config RPTUN_IVSHMEM_NAME
|
|
|
|
string "rptun ivshmem name"
|
|
|
|
---help---
|
|
|
|
Using this config to custom the rptun ivshmem cpuname and role,
|
|
|
|
using ";" to split the names.
|
2024-05-13 17:00:51 +08:00
|
|
|
For example, if RPTUN_IVSHMEM_CPUNAME = "0:cpu1:m;1:cpu2:s" and pass
|
2024-02-18 15:31:47 +08:00
|
|
|
two ivshmem devices to the qemu, we will get two rptun ivshmem
|
2024-05-13 17:00:51 +08:00
|
|
|
drivers with ivshmem device match id: [0, 1],
|
|
|
|
remote cpu names: ["cpu1", "cpu2"] and roles: ["master", "slave"']
|
2024-02-18 15:31:47 +08:00
|
|
|
|
|
|
|
config RPTUN_IVSHMEM_BUFFSIZE
|
|
|
|
int "rptun ivshmem rpmsg buffer size"
|
|
|
|
default 2048
|
|
|
|
---help---
|
|
|
|
The rpmsg buffer size in resource table, the RX and TX buffer size
|
|
|
|
are same for now.
|
|
|
|
|
|
|
|
config RPTUN_IVSHMEM_BUFFNUM
|
|
|
|
int "rptun ivshmem rpmsg buffer number"
|
|
|
|
default 8
|
|
|
|
---help---
|
|
|
|
The rpmsg buffer number in resource table, the RX and TX buffer number
|
|
|
|
are same for now.
|
|
|
|
|
|
|
|
endif
|
|
|
|
|
2019-11-03 01:30:33 +08:00
|
|
|
config RPTUN_PRIORITY
|
2021-12-28 20:13:28 +08:00
|
|
|
int "rptun thread priority"
|
2019-11-03 01:30:33 +08:00
|
|
|
default 224
|
|
|
|
|
|
|
|
config RPTUN_STACKSIZE
|
|
|
|
int "rptun stack size"
|
2021-12-15 20:09:26 +08:00
|
|
|
default 4096
|
2020-02-08 15:30:09 +08:00
|
|
|
|
2021-09-02 14:47:31 +08:00
|
|
|
config RPTUN_LOADER
|
2021-10-18 23:06:10 +08:00
|
|
|
bool "rptun loader support"
|
|
|
|
default n
|
|
|
|
|
2022-02-09 23:08:22 +08:00
|
|
|
config RPTUN_PM
|
|
|
|
bool "rptun power management"
|
|
|
|
depends on PM
|
|
|
|
default n
|
|
|
|
---help---
|
|
|
|
If TX/RX buffer is supplied and powered by each CPU.
|
|
|
|
And when one CPU in DEEP sleep, then it's buffer will
|
|
|
|
goto RAM-retention mode, can't access from another CPU.
|
|
|
|
So, we provide this method to resolve this.
|
|
|
|
|
2023-04-07 23:38:33 +08:00
|
|
|
config RPTUN_PM_AUTORELAX
|
|
|
|
bool "rptun pm autorelax"
|
|
|
|
depends on RPTUN_PM
|
|
|
|
default y
|
|
|
|
---help---
|
|
|
|
use wd_timer to auto relax pm
|
|
|
|
|
2020-02-08 15:30:09 +08:00
|
|
|
endif # RPTUN
|